From d38ceaf99ed015f2a0b9af3499791bd3a3daae21 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Mon, 20 Apr 2015 16:55:21 -0400 Subject: drm/amdgpu: add core driver (v4) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This adds the non-asic specific core driver code. v2: remove extra kconfig option v3: implement minor fixes from Fengguang Wu v4: fix cast in amdgpu_ucode.c Acked-by: Christian König Acked-by: Jammy Zhou Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 1971 ++++++++++++++++++++++++++++ 1 file changed, 1971 insertions(+) create mode 100644 drivers/gpu/drm/amd/amdgpu/amdgpu_device.c (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_device.c') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c new file mode 100644 index 000000000000..cd4bb90fa85c --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -0,0 +1,1971 @@ +/* + * Copyright 2008 Advanced Micro Devices, Inc. + * Copyright 2008 Red Hat Inc. + * Copyright 2009 Jerome Glisse. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Dave Airlie + * Alex Deucher + * Jerome Glisse + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "amdgpu.h" +#include "amdgpu_i2c.h" +#include "atom.h" +#include "amdgpu_atombios.h" +#include "bif/bif_4_1_d.h" + +static int amdgpu_debugfs_regs_init(struct amdgpu_device *adev); +static void amdgpu_debugfs_regs_cleanup(struct amdgpu_device *adev); + +static const char *amdgpu_asic_name[] = { + "BONAIRE", + "KAVERI", + "KABINI", + "HAWAII", + "MULLINS", + "TOPAZ", + "TONGA", + "CARRIZO", + "LAST", +}; + +bool amdgpu_device_is_px(struct drm_device *dev) +{ + struct amdgpu_device *adev = dev->dev_private; + + if (adev->flags & AMDGPU_IS_PX) + return true; + return false; +} + +/* + * MMIO register access helper functions. + */ +uint32_t amdgpu_mm_rreg(struct amdgpu_device *adev, uint32_t reg, + bool always_indirect) +{ + if ((reg * 4) < adev->rmmio_size && !always_indirect) + return readl(((void __iomem *)adev->rmmio) + (reg * 4)); + else { + unsigned long flags; + uint32_t ret; + + spin_lock_irqsave(&adev->mmio_idx_lock, flags); + writel((reg * 4), ((void __iomem *)adev->rmmio) + (mmMM_INDEX * 4)); + ret = readl(((void __iomem *)adev->rmmio) + (mmMM_DATA * 4)); + spin_unlock_irqrestore(&adev->mmio_idx_lock, flags); + + return ret; + } +} + +void amdgpu_mm_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v, + bool always_indirect) +{ + if ((reg * 4) < adev->rmmio_size && !always_indirect) + writel(v, ((void __iomem *)adev->rmmio) + (reg * 4)); + else { + unsigned long flags; + + spin_lock_irqsave(&adev->mmio_idx_lock, flags); + writel((reg * 4), ((void __iomem *)adev->rmmio) + (mmMM_INDEX * 4)); + writel(v, ((void __iomem *)adev->rmmio) + (mmMM_DATA * 4)); + spin_unlock_irqrestore(&adev->mmio_idx_lock, flags); + } +} + +u32 amdgpu_io_rreg(struct amdgpu_device *adev, u32 reg) +{ + if ((reg * 4) < adev->rio_mem_size) + return ioread32(adev->rio_mem + (reg * 4)); + else { + iowrite32((reg * 4), adev->rio_mem + (mmMM_INDEX * 4)); + return ioread32(adev->rio_mem + (mmMM_DATA * 4)); + } +} + +void amdgpu_io_wreg(struct amdgpu_device *adev, u32 reg, u32 v) +{ + + if ((reg * 4) < adev->rio_mem_size) + iowrite32(v, adev->rio_mem + (reg * 4)); + else { + iowrite32((reg * 4), adev->rio_mem + (mmMM_INDEX * 4)); + iowrite32(v, adev->rio_mem + (mmMM_DATA * 4)); + } +} + +/** + * amdgpu_mm_rdoorbell - read a doorbell dword + * + * @adev: amdgpu_device pointer + * @index: doorbell index + * + * Returns the value in the doorbell aperture at the + * requested doorbell index (CIK). + */ +u32 amdgpu_mm_rdoorbell(struct amdgpu_device *adev, u32 index) +{ + if (index < adev->doorbell.num_doorbells) { + return readl(adev->doorbell.ptr + index); + } else { + DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", index); + return 0; + } +} + +/** + * amdgpu_mm_wdoorbell - write a doorbell dword + * + * @adev: amdgpu_device pointer + * @index: doorbell index + * @v: value to write + * + * Writes @v to the doorbell aperture at the + * requested doorbell index (CIK). + */ +void amdgpu_mm_wdoorbell(struct amdgpu_device *adev, u32 index, u32 v) +{ + if (index < adev->doorbell.num_doorbells) { + writel(v, adev->doorbell.ptr + index); + } else { + DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", index); + } +} + +/** + * amdgpu_invalid_rreg - dummy reg read function + * + * @adev: amdgpu device pointer + * @reg: offset of register + * + * Dummy register read function. Used for register blocks + * that certain asics don't have (all asics). + * Returns the value in the register. + */ +static uint32_t amdgpu_invalid_rreg(struct amdgpu_device *adev, uint32_t reg) +{ + DRM_ERROR("Invalid callback to read register 0x%04X\n", reg); + BUG(); + return 0; +} + +/** + * amdgpu_invalid_wreg - dummy reg write function + * + * @adev: amdgpu device pointer + * @reg: offset of register + * @v: value to write to the register + * + * Dummy register read function. Used for register blocks + * that certain asics don't have (all asics). + */ +static void amdgpu_invalid_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v) +{ + DRM_ERROR("Invalid callback to write register 0x%04X with 0x%08X\n", + reg, v); + BUG(); +} + +/** + * amdgpu_block_invalid_rreg - dummy reg read function + * + * @adev: amdgpu device pointer + * @block: offset of instance + * @reg: offset of register + * + * Dummy register read function. Used for register blocks + * that certain asics don't have (all asics). + * Returns the value in the register. + */ +static uint32_t amdgpu_block_invalid_rreg(struct amdgpu_device *adev, + uint32_t block, uint32_t reg) +{ + DRM_ERROR("Invalid callback to read register 0x%04X in block 0x%04X\n", + reg, block); + BUG(); + return 0; +} + +/** + * amdgpu_block_invalid_wreg - dummy reg write function + * + * @adev: amdgpu device pointer + * @block: offset of instance + * @reg: offset of register + * @v: value to write to the register + * + * Dummy register read function. Used for register blocks + * that certain asics don't have (all asics). + */ +static void amdgpu_block_invalid_wreg(struct amdgpu_device *adev, + uint32_t block, + uint32_t reg, uint32_t v) +{ + DRM_ERROR("Invalid block callback to write register 0x%04X in block 0x%04X with 0x%08X\n", + reg, block, v); + BUG(); +} + +static int amdgpu_vram_scratch_init(struct amdgpu_device *adev) +{ + int r; + + if (adev->vram_scratch.robj == NULL) { + r = amdgpu_bo_create(adev, AMDGPU_GPU_PAGE_SIZE, + PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_VRAM, 0, + NULL, &adev->vram_scratch.robj); + if (r) { + return r; + } + } + + r = amdgpu_bo_reserve(adev->vram_scratch.robj, false); + if (unlikely(r != 0)) + return r; + r = amdgpu_bo_pin(adev->vram_scratch.robj, + AMDGPU_GEM_DOMAIN_VRAM, &adev->vram_scratch.gpu_addr); + if (r) { + amdgpu_bo_unreserve(adev->vram_scratch.robj); + return r; + } + r = amdgpu_bo_kmap(adev->vram_scratch.robj, + (void **)&adev->vram_scratch.ptr); + if (r) + amdgpu_bo_unpin(adev->vram_scratch.robj); + amdgpu_bo_unreserve(adev->vram_scratch.robj); + + return r; +} + +static void amdgpu_vram_scratch_fini(struct amdgpu_device *adev) +{ + int r; + + if (adev->vram_scratch.robj == NULL) { + return; + } + r = amdgpu_bo_reserve(adev->vram_scratch.robj, false); + if (likely(r == 0)) { + amdgpu_bo_kunmap(adev->vram_scratch.robj); + amdgpu_bo_unpin(adev->vram_scratch.robj); + amdgpu_bo_unreserve(adev->vram_scratch.robj); + } + amdgpu_bo_unref(&adev->vram_scratch.robj); +} + +/** + * amdgpu_program_register_sequence - program an array of registers. + * + * @adev: amdgpu_device pointer + * @registers: pointer to the register array + * @array_size: size of the register array + * + * Programs an array or registers with and and or masks. + * This is a helper for setting golden registers. + */ +void amdgpu_program_register_sequence(struct amdgpu_device *adev, + const u32 *registers, + const u32 array_size) +{ + u32 tmp, reg, and_mask, or_mask; + int i; + + if (array_size % 3) + return; + + for (i = 0; i < array_size; i +=3) { + reg = registers[i + 0]; + and_mask = registers[i + 1]; + or_mask = registers[i + 2]; + + if (and_mask == 0xffffffff) { + tmp = or_mask; + } else { + tmp = RREG32(reg); + tmp &= ~and_mask; + tmp |= or_mask; + } + WREG32(reg, tmp); + } +} + +void amdgpu_pci_config_reset(struct amdgpu_device *adev) +{ + pci_write_config_dword(adev->pdev, 0x7c, AMDGPU_ASIC_RESET_DATA); +} + +/* + * GPU doorbell aperture helpers function. + */ +/** + * amdgpu_doorbell_init - Init doorbell driver information. + * + * @adev: amdgpu_device pointer + * + * Init doorbell driver information (CIK) + * Returns 0 on success, error on failure. + */ +static int amdgpu_doorbell_init(struct amdgpu_device *adev) +{ + /* doorbell bar mapping */ + adev->doorbell.base = pci_resource_start(adev->pdev, 2); + adev->doorbell.size = pci_resource_len(adev->pdev, 2); + + adev->doorbell.num_doorbells = min_t(u32, adev->doorbell.size / sizeof(u32), + AMDGPU_DOORBELL_MAX_ASSIGNMENT+1); + if (adev->doorbell.num_doorbells == 0) + return -EINVAL; + + adev->doorbell.ptr = ioremap(adev->doorbell.base, adev->doorbell.num_doorbells * sizeof(u32)); + if (adev->doorbell.ptr == NULL) { + return -ENOMEM; + } + DRM_INFO("doorbell mmio base: 0x%08X\n", (uint32_t)adev->doorbell.base); + DRM_INFO("doorbell mmio size: %u\n", (unsigned)adev->doorbell.size); + + return 0; +} + +/** + * amdgpu_doorbell_fini - Tear down doorbell driver information. + * + * @adev: amdgpu_device pointer + * + * Tear down doorbell driver information (CIK) + */ +static void amdgpu_doorbell_fini(struct amdgpu_device *adev) +{ + iounmap(adev->doorbell.ptr); + adev->doorbell.ptr = NULL; +} + +/** + * amdgpu_doorbell_get_kfd_info - Report doorbell configuration required to + * setup amdkfd + * + * @adev: amdgpu_device pointer + * @aperture_base: output returning doorbell aperture base physical address + * @aperture_size: output returning doorbell aperture size in bytes + * @start_offset: output returning # of doorbell bytes reserved for amdgpu. + * + * amdgpu and amdkfd share the doorbell aperture. amdgpu sets it up, + * takes doorbells required for its own rings and reports the setup to amdkfd. + * amdgpu reserved doorbells are at the start of the doorbell aperture. + */ +void amdgpu_doorbell_get_kfd_info(struct amdgpu_device *adev, + phys_addr_t *aperture_base, + size_t *aperture_size, + size_t *start_offset) +{ + /* + * The first num_doorbells are used by amdgpu. + * amdkfd takes whatever's left in the aperture. + */ + if (adev->doorbell.size > adev->doorbell.num_doorbells * sizeof(u32)) { + *aperture_base = adev->doorbell.base; + *aperture_size = adev->doorbell.size; + *start_offset = adev->doorbell.num_doorbells * sizeof(u32); + } else { + *aperture_base = 0; + *aperture_size = 0; + *start_offset = 0; + } +} + +/* + * amdgpu_wb_*() + * Writeback is the the method by which the the GPU updates special pages + * in memory with the status of certain GPU events (fences, ring pointers, + * etc.). + */ + +/** + * amdgpu_wb_fini - Disable Writeback and free memory + * + * @adev: amdgpu_device pointer + * + * Disables Writeback and frees the Writeback memory (all asics). + * Used at driver shutdown. + */ +static void amdgpu_wb_fini(struct amdgpu_device *adev) +{ + if (adev->wb.wb_obj) { + if (!amdgpu_bo_reserve(adev->wb.wb_obj, false)) { + amdgpu_bo_kunmap(adev->wb.wb_obj); + amdgpu_bo_unpin(adev->wb.wb_obj); + amdgpu_bo_unreserve(adev->wb.wb_obj); + } + amdgpu_bo_unref(&adev->wb.wb_obj); + adev->wb.wb = NULL; + adev->wb.wb_obj = NULL; + } +} + +/** + * amdgpu_wb_init- Init Writeback driver info and allocate memory + * + * @adev: amdgpu_device pointer + * + * Disables Writeback and frees the Writeback memory (all asics). + * Used at driver startup. + * Returns 0 on success or an -error on failure. + */ +static int amdgpu_wb_init(struct amdgpu_device *adev) +{ + int r; + + if (adev->wb.wb_obj == NULL) { + r = amdgpu_bo_create(adev, AMDGPU_MAX_WB * 4, PAGE_SIZE, true, + AMDGPU_GEM_DOMAIN_GTT, 0, NULL, &adev->wb.wb_obj); + if (r) { + dev_warn(adev->dev, "(%d) create WB bo failed\n", r); + return r; + } + r = amdgpu_bo_reserve(adev->wb.wb_obj, false); + if (unlikely(r != 0)) { + amdgpu_wb_fini(adev); + return r; + } + r = amdgpu_bo_pin(adev->wb.wb_obj, AMDGPU_GEM_DOMAIN_GTT, + &adev->wb.gpu_addr); + if (r) { + amdgpu_bo_unreserve(adev->wb.wb_obj); + dev_warn(adev->dev, "(%d) pin WB bo failed\n", r); + amdgpu_wb_fini(adev); + return r; + } + r = amdgpu_bo_kmap(adev->wb.wb_obj, (void **)&adev->wb.wb); + amdgpu_bo_unreserve(adev->wb.wb_obj); + if (r) { + dev_warn(adev->dev, "(%d) map WB bo failed\n", r); + amdgpu_wb_fini(adev); + return r; + } + + adev->wb.num_wb = AMDGPU_MAX_WB; + memset(&adev->wb.used, 0, sizeof(adev->wb.used)); + + /* clear wb memory */ + memset((char *)adev->wb.wb, 0, AMDGPU_GPU_PAGE_SIZE); + } + + return 0; +} + +/** + * amdgpu_wb_get - Allocate a wb entry + * + * @adev: amdgpu_device pointer + * @wb: wb index + * + * Allocate a wb slot for use by the driver (all asics). + * Returns 0 on success or -EINVAL on failure. + */ +int amdgpu_wb_get(struct amdgpu_device *adev, u32 *wb) +{ + unsigned long offset = find_first_zero_bit(adev->wb.used, adev->wb.num_wb); + if (offset < adev->wb.num_wb) { + __set_bit(offset, adev->wb.used); + *wb = offset; + return 0; + } else { + return -EINVAL; + } +} + +/** + * amdgpu_wb_free - Free a wb entry + * + * @adev: amdgpu_device pointer + * @wb: wb index + * + * Free a wb slot allocated for use by the driver (all asics) + */ +void amdgpu_wb_free(struct amdgpu_device *adev, u32 wb) +{ + if (wb < adev->wb.num_wb) + __clear_bit(wb, adev->wb.used); +} + +/** + * amdgpu_vram_location - try to find VRAM location + * @adev: amdgpu device structure holding all necessary informations + * @mc: memory controller structure holding memory informations + * @base: base address at which to put VRAM + * + * Function will place try to place VRAM at base address provided + * as parameter (which is so far either PCI aperture address or + * for IGP TOM base address). + * + * If there is not enough space to fit the unvisible VRAM in the 32bits + * address space then we limit the VRAM size to the aperture. + * + * Note: We don't explicitly enforce VRAM start to be aligned on VRAM size, + * this shouldn't be a problem as we are using the PCI aperture as a reference. + * Otherwise this would be needed for rv280, all r3xx, and all r4xx, but + * not IGP. + * + * Note: we use mc_vram_size as on some board we need to program the mc to + * cover the whole aperture even if VRAM size is inferior to aperture size + * Novell bug 204882 + along with lots of ubuntu ones + * + * Note: when limiting vram it's safe to overwritte real_vram_size because + * we are not in case where real_vram_size is inferior to mc_vram_size (ie + * note afected by bogus hw of Novell bug 204882 + along with lots of ubuntu + * ones) + * + * Note: IGP TOM addr should be the same as the aperture addr, we don't + * explicitly check for that thought. + * + * FIXME: when reducing VRAM size align new size on power of 2. + */ +void amdgpu_vram_location(struct amdgpu_device *adev, struct amdgpu_mc *mc, u64 base) +{ + uint64_t limit = (uint64_t)amdgpu_vram_limit << 20; + + mc->vram_start = base; + if (mc->mc_vram_size > (adev->mc.mc_mask - base + 1)) { + dev_warn(adev->dev, "limiting VRAM to PCI aperture size\n"); + mc->real_vram_size = mc->aper_size; + mc->mc_vram_size = mc->aper_size; + } + mc->vram_end = mc->vram_start + mc->mc_vram_size - 1; + if (limit && limit < mc->real_vram_size) + mc->real_vram_size = limit; + dev_info(adev->dev, "VRAM: %lluM 0x%016llX - 0x%016llX (%lluM used)\n", + mc->mc_vram_size >> 20, mc->vram_start, + mc->vram_end, mc->real_vram_size >> 20); +} + +/** + * amdgpu_gtt_location - try to find GTT location + * @adev: amdgpu device structure holding all necessary informations + * @mc: memory controller structure holding memory informations + * + * Function will place try to place GTT before or after VRAM. + * + * If GTT size is bigger than space left then we ajust GTT size. + * Thus function will never fails. + * + * FIXME: when reducing GTT size align new size on power of 2. + */ +void amdgpu_gtt_location(struct amdgpu_device *adev, struct amdgpu_mc *mc) +{ + u64 size_af, size_bf; + + size_af = ((adev->mc.mc_mask - mc->vram_end) + mc->gtt_base_align) & ~mc->gtt_base_align; + size_bf = mc->vram_start & ~mc->gtt_base_align; + if (size_bf > size_af) { + if (mc->gtt_size > size_bf) { + dev_warn(adev->dev, "limiting GTT\n"); + mc->gtt_size = size_bf; + } + mc->gtt_start = (mc->vram_start & ~mc->gtt_base_align) - mc->gtt_size; + } else { + if (mc->gtt_size > size_af) { + dev_warn(adev->dev, "limiting GTT\n"); + mc->gtt_size = size_af; + } + mc->gtt_start = (mc->vram_end + 1 + mc->gtt_base_align) & ~mc->gtt_base_align; + } + mc->gtt_end = mc->gtt_start + mc->gtt_size - 1; + dev_info(adev->dev, "GTT: %lluM 0x%016llX - 0x%016llX\n", + mc->gtt_size >> 20, mc->gtt_start, mc->gtt_end); +} + +/* + * GPU helpers function. + */ +/** + * amdgpu_card_posted - check if the hw has already been initialized + * + * @adev: amdgpu_device pointer + * + * Check if the asic has been initialized (all asics). + * Used at driver startup. + * Returns true if initialized or false if not. + */ +bool amdgpu_card_posted(struct amdgpu_device *adev) +{ + uint32_t reg; + + /* then check MEM_SIZE, in case the crtcs are off */ + reg = RREG32(mmCONFIG_MEMSIZE); + + if (reg) + return true; + + return false; + +} + +/** + * amdgpu_boot_test_post_card - check and possibly initialize the hw + * + * @adev: amdgpu_device pointer + * + * Check if the asic is initialized and if not, attempt to initialize + * it (all asics). + * Returns true if initialized or false if not. + */ +bool amdgpu_boot_test_post_card(struct amdgpu_device *adev) +{ + if (amdgpu_card_posted(adev)) + return true; + + if (adev->bios) { + DRM_INFO("GPU not posted. posting now...\n"); + if (adev->is_atom_bios) + amdgpu_atom_asic_init(adev->mode_info.atom_context); + return true; + } else { + dev_err(adev->dev, "Card not posted and no BIOS - ignoring\n"); + return false; + } +} + +/** + * amdgpu_dummy_page_init - init dummy page used by the driver + * + * @adev: amdgpu_device pointer + * + * Allocate the dummy page used by the driver (all asics). + * This dummy page is used by the driver as a filler for gart entries + * when pages are taken out of the GART + * Returns 0 on sucess, -ENOMEM on failure. + */ +int amdgpu_dummy_page_init(struct amdgpu_device *adev) +{ + if (adev->dummy_page.page) + return 0; + adev->dummy_page.page = alloc_page(GFP_DMA32 | GFP_KERNEL | __GFP_ZERO); + if (adev->dummy_page.page == NULL) + return -ENOMEM; + adev->dummy_page.addr = pci_map_page(adev->pdev, adev->dummy_page.page, + 0, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); + if (pci_dma_mapping_error(adev->pdev, adev->dummy_page.addr)) { + dev_err(&adev->pdev->dev, "Failed to DMA MAP the dummy page\n"); + __free_page(adev->dummy_page.page); + adev->dummy_page.page = NULL; + return -ENOMEM; + } + return 0; +} + +/** + * amdgpu_dummy_page_fini - free dummy page used by the driver + * + * @adev: amdgpu_device pointer + * + * Frees the dummy page used by the driver (all asics). + */ +void amdgpu_dummy_page_fini(struct amdgpu_device *adev) +{ + if (adev->dummy_page.page == NULL) + return; + pci_unmap_page(adev->pdev, adev->dummy_page.addr, + PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); + __free_page(adev->dummy_page.page); + adev->dummy_page.page = NULL; +} + + +/* ATOM accessor methods */ +/* + * ATOM is an interpreted byte code stored in tables in the vbios. The + * driver registers callbacks to access registers and the interpreter + * in the driver parses the tables and executes then to program specific + * actions (set display modes, asic init, etc.). See amdgpu_atombios.c, + * atombios.h, and atom.c + */ + +/** + * cail_pll_read - read PLL register + * + * @info: atom card_info pointer + * @reg: PLL register offset + * + * Provides a PLL register accessor for the atom interpreter (r4xx+). + * Returns the value of the PLL register. + */ +static uint32_t cail_pll_read(struct card_info *info, uint32_t reg) +{ + return 0; +} + +/** + * cail_pll_write - write PLL register + * + * @info: atom card_info pointer + * @reg: PLL register offset + * @val: value to write to the pll register + * + * Provides a PLL register accessor for the atom interpreter (r4xx+). + */ +static void cail_pll_write(struct card_info *info, uint32_t reg, uint32_t val) +{ + +} + +/** + * cail_mc_read - read MC (Memory Controller) register + * + * @info: atom card_info pointer + * @reg: MC register offset + * + * Provides an MC register accessor for the atom interpreter (r4xx+). + * Returns the value of the MC register. + */ +static uint32_t cail_mc_read(struct card_info *info, uint32_t reg) +{ + return 0; +} + +/** + * cail_mc_write - write MC (Memory Controller) register + * + * @info: atom card_info pointer + * @reg: MC register offset + * @val: value to write to the pll register + * + * Provides a MC register accessor for the atom interpreter (r4xx+). + */ +static void cail_mc_write(struct card_info *info, uint32_t reg, uint32_t val) +{ + +} + +/** + * cail_reg_write - write MMIO register + * + * @info: atom card_info pointer + * @reg: MMIO register offset + * @val: value to write to the pll register + * + * Provides a MMIO register accessor for the atom interpreter (r4xx+). + */ +static void cail_reg_write(struct card_info *info, uint32_t reg, uint32_t val) +{ + struct amdgpu_device *adev = info->dev->dev_private; + + WREG32(reg, val); +} + +/** + * cail_reg_read - read MMIO register + * + * @info: atom card_info pointer + * @reg: MMIO register offset + * + * Provides an MMIO register accessor for the atom interpreter (r4xx+). + * Returns the value of the MMIO register. + */ +static uint32_t cail_reg_read(struct card_info *info, uint32_t reg) +{ + struct amdgpu_device *adev = info->dev->dev_private; + uint32_t r; + + r = RREG32(reg); + return r; +} + +/** + * cail_ioreg_write - write IO register + * + * @info: atom card_info pointer + * @reg: IO register offset + * @val: value to write to the pll register + * + * Provides a IO register accessor for the atom interpreter (r4xx+). + */ +static void cail_ioreg_write(struct card_info *info, uint32_t reg, uint32_t val) +{ + struct amdgpu_device *adev = info->dev->dev_private; + + WREG32_IO(reg, val); +} + +/** + * cail_ioreg_read - read IO register + * + * @info: atom card_info pointer + * @reg: IO register offset + * + * Provides an IO register accessor for the atom interpreter (r4xx+). + * Returns the value of the IO register. + */ +static uint32_t cail_ioreg_read(struct card_info *info, uint32_t reg) +{ + struct amdgpu_device *adev = info->dev->dev_private; + uint32_t r; + + r = RREG32_IO(reg); + return r; +} + +/** + * amdgpu_atombios_fini - free the driver info and callbacks for atombios + * + * @adev: amdgpu_device pointer + * + * Frees the driver info and register access callbacks for the ATOM + * interpreter (r4xx+). + * Called at driver shutdown. + */ +static void amdgpu_atombios_fini(struct amdgpu_device *adev) +{ + if (adev->mode_info.atom_context) + kfree(adev->mode_info.atom_context->scratch); + kfree(adev->mode_info.atom_context); + adev->mode_info.atom_context = NULL; + kfree(adev->mode_info.atom_card_info); + adev->mode_info.atom_card_info = NULL; +} + +/** + * amdgpu_atombios_init - init the driver info and callbacks for atombios + * + * @adev: amdgpu_device pointer + * + * Initializes the driver info and register access callbacks for the + * ATOM interpreter (r4xx+). + * Returns 0 on sucess, -ENOMEM on failure. + * Called at driver startup. + */ +static int amdgpu_atombios_init(struct amdgpu_device *adev) +{ + struct card_info *atom_card_info = + kzalloc(sizeof(struct card_info), GFP_KERNEL); + + if (!atom_card_info) + return -ENOMEM; + + adev->mode_info.atom_card_info = atom_card_info; + atom_card_info->dev = adev->ddev; + atom_card_info->reg_read = cail_reg_read; + atom_card_info->reg_write = cail_reg_write; + /* needed for iio ops */ + if (adev->rio_mem) { + atom_card_info->ioreg_read = cail_ioreg_read; + atom_card_info->ioreg_write = cail_ioreg_write; + } else { + DRM_ERROR("Unable to find PCI I/O BAR; using MMIO for ATOM IIO\n"); + atom_card_info->ioreg_read = cail_reg_read; + atom_card_info->ioreg_write = cail_reg_write; + } + atom_card_info->mc_read = cail_mc_read; + atom_card_info->mc_write = cail_mc_write; + atom_card_info->pll_read = cail_pll_read; + atom_card_info->pll_write = cail_pll_write; + + adev->mode_info.atom_context = amdgpu_atom_parse(atom_card_info, adev->bios); + if (!adev->mode_info.atom_context) { + amdgpu_atombios_fini(adev); + return -ENOMEM; + } + + mutex_init(&adev->mode_info.atom_context->mutex); + amdgpu_atombios_scratch_regs_init(adev); + amdgpu_atom_allocate_fb_scratch(adev->mode_info.atom_context); + return 0; +} + +/* if we get transitioned to only one device, take VGA back */ +/** + * amdgpu_vga_set_decode - enable/disable vga decode + * + * @cookie: amdgpu_device pointer + * @state: enable/disable vga decode + * + * Enable/disable vga decode (all asics). + * Returns VGA resource flags. + */ +static unsigned int amdgpu_vga_set_decode(void *cookie, bool state) +{ + struct amdgpu_device *adev = cookie; + amdgpu_asic_set_vga_state(adev, state); + if (state) + return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM | + VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM; + else + return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM; +} + +/** + * amdgpu_check_pot_argument - check that argument is a power of two + * + * @arg: value to check + * + * Validates that a certain argument is a power of two (all asics). + * Returns true if argument is valid. + */ +static bool amdgpu_check_pot_argument(int arg) +{ + return (arg & (arg - 1)) == 0; +} + +/** + * amdgpu_check_arguments - validate module params + * + * @adev: amdgpu_device pointer + * + * Validates certain module parameters and updates + * the associated values used by the driver (all asics). + */ +static void amdgpu_check_arguments(struct amdgpu_device *adev) +{ + /* vramlimit must be a power of two */ + if (!amdgpu_check_pot_argument(amdgpu_vram_limit)) { + dev_warn(adev->dev, "vram limit (%d) must be a power of 2\n", + amdgpu_vram_limit); + amdgpu_vram_limit = 0; + } + + if (amdgpu_gart_size != -1) { + /* gtt size must be power of two and greater or equal to 32M */ + if (amdgpu_gart_size < 32) { + dev_warn(adev->dev, "gart size (%d) too small\n", + amdgpu_gart_size); + amdgpu_gart_size = -1; + } else if (!amdgpu_check_pot_argument(amdgpu_gart_size)) { + dev_warn(adev->dev, "gart size (%d) must be a power of 2\n", + amdgpu_gart_size); + amdgpu_gart_size = -1; + } + } + + if (!amdgpu_check_pot_argument(amdgpu_vm_size)) { + dev_warn(adev->dev, "VM size (%d) must be a power of 2\n", + amdgpu_vm_size); + amdgpu_vm_size = 4; + } + + if (amdgpu_vm_size < 1) { + dev_warn(adev->dev, "VM size (%d) too small, min is 1GB\n", + amdgpu_vm_size); + amdgpu_vm_size = 4; + } + + /* + * Max GPUVM size for Cayman, SI and CI are 40 bits. + */ + if (amdgpu_vm_size > 1024) { + dev_warn(adev->dev, "VM size (%d) too large, max is 1TB\n", + amdgpu_vm_size); + amdgpu_vm_size = 4; + } + + /* defines number of bits in page table versus page directory, + * a page is 4KB so we have 12 bits offset, minimum 9 bits in the + * page table and the remaining bits are in the page directory */ + if (amdgpu_vm_block_size == -1) { + + /* Total bits covered by PD + PTs */ + unsigned bits = ilog2(amdgpu_vm_size) + 18; + + /* Make sure the PD is 4K in size up to 8GB address space. + Above that split equal between PD and PTs */ + if (amdgpu_vm_size <= 8) + amdgpu_vm_block_size = bits - 9; + else + amdgpu_vm_block_size = (bits + 3) / 2; + + } else if (amdgpu_vm_block_size < 9) { + dev_warn(adev->dev, "VM page table size (%d) too small\n", + amdgpu_vm_block_size); + amdgpu_vm_block_size = 9; + } + + if (amdgpu_vm_block_size > 24 || + (amdgpu_vm_size * 1024) < (1ull << amdgpu_vm_block_size)) { + dev_warn(adev->dev, "VM page table size (%d) too large\n", + amdgpu_vm_block_size); + amdgpu_vm_block_size = 9; + } +} + +/** + * amdgpu_switcheroo_set_state - set switcheroo state + * + * @pdev: pci dev pointer + * @state: vga switcheroo state + * + * Callback for the switcheroo driver. Suspends or resumes the + * the asics before or after it is powered up using ACPI methods. + */ +static void amdgpu_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state) +{ + struct drm_device *dev = pci_get_drvdata(pdev); + + if (amdgpu_device_is_px(dev) && state == VGA_SWITCHEROO_OFF) + return; + + if (state == VGA_SWITCHEROO_ON) { + unsigned d3_delay = dev->pdev->d3_delay; + + printk(KERN_INFO "amdgpu: switched on\n"); + /* don't suspend or resume card normally */ + dev->switch_power_state = DRM_SWITCH_POWER_CHANGING; + + amdgpu_resume_kms(dev, true, true); + + dev->pdev->d3_delay = d3_delay; + + dev->switch_power_state = DRM_SWITCH_POWER_ON; + drm_kms_helper_poll_enable(dev); + } else { + printk(KERN_INFO "amdgpu: switched off\n"); + drm_kms_helper_poll_disable(dev); + dev->switch_power_state = DRM_SWITCH_POWER_CHANGING; + amdgpu_suspend_kms(dev, true, true); + dev->switch_power_state = DRM_SWITCH_POWER_OFF; + } +} + +/** + * amdgpu_switcheroo_can_switch - see if switcheroo state can change + * + * @pdev: pci dev pointer + * + * Callback for the switcheroo driver. Check of the switcheroo + * state can be changed. + * Returns true if the state can be changed, false if not. + */ +static bool amdgpu_switcheroo_can_switch(struct pci_dev *pdev) +{ + struct drm_device *dev = pci_get_drvdata(pdev); + + /* + * FIXME: open_count is protected by drm_global_mutex but that would lead to + * locking inversion with the driver load path. And the access here is + * completely racy anyway. So don't bother with locking for now. + */ + return dev->open_count == 0; +} + +static const struct vga_switcheroo_client_ops amdgpu_switcheroo_ops = { + .set_gpu_state = amdgpu_switcheroo_set_state, + .reprobe = NULL, + .can_switch = amdgpu_switcheroo_can_switch, +}; + +int amdgpu_set_clockgating_state(struct amdgpu_device *adev, + enum amdgpu_ip_block_type block_type, + enum amdgpu_clockgating_state state) +{ + int i, r = 0; + + for (i = 0; i < adev->num_ip_blocks; i++) { + if (adev->ip_blocks[i].type == block_type) { + r = adev->ip_blocks[i].funcs->set_clockgating_state(adev, + state); + if (r) + return r; + } + } + return r; +} + +int amdgpu_set_powergating_state(struct amdgpu_device *adev, + enum amdgpu_ip_block_type block_type, + enum amdgpu_powergating_state state) +{ + int i, r = 0; + + for (i = 0; i < adev->num_ip_blocks; i++) { + if (adev->ip_blocks[i].type == block_type) { + r = adev->ip_blocks[i].funcs->set_powergating_state(adev, + state); + if (r) + return r; + } + } + return r; +} + +const struct amdgpu_ip_block_version * amdgpu_get_ip_block( + struct amdgpu_device *adev, + enum amdgpu_ip_block_type type) +{ + int i; + + for (i = 0; i < adev->num_ip_blocks; i++) + if (adev->ip_blocks[i].type == type) + return &adev->ip_blocks[i]; + + return NULL; +} + +/** + * amdgpu_ip_block_version_cmp + * + * @adev: amdgpu_device pointer + * @type: enum amdgpu_ip_block_type + * @major: major version + * @minor: minor version + * + * return 0 if equal or greater + * return 1 if smaller or the ip_block doesn't exist + */ +int amdgpu_ip_block_version_cmp(struct amdgpu_device *adev, + enum amdgpu_ip_block_type type, + u32 major, u32 minor) +{ + const struct amdgpu_ip_block_version *ip_block; + ip_block = amdgpu_get_ip_block(adev, type); + + if (ip_block && ((ip_block->major > major) || + ((ip_block->major == major) && + (ip_block->minor >= minor)))) + return 0; + + return 1; +} + +static int amdgpu_early_init(struct amdgpu_device *adev) +{ + int i, r = -EINVAL; + + switch (adev->asic_type) { + default: + /* FIXME: not supported yet */ + return -EINVAL; + } + + + + if (adev->ip_blocks == NULL) { + DRM_ERROR("No IP blocks found!\n"); + return r; + } + + for (i = 0; i < adev->num_ip_blocks; i++) { + if ((amdgpu_ip_block_mask & (1 << i)) == 0) { + DRM_ERROR("disabled ip block: %d\n", i); + adev->ip_block_enabled[i] = false; + } else { + if (adev->ip_blocks[i].funcs->early_init) { + r = adev->ip_blocks[i].funcs->early_init(adev); + if (r) + return r; + } + adev->ip_block_enabled[i] = true; + } + } + + return 0; +} + +static int amdgpu_init(struct amdgpu_device *adev) +{ + int i, r; + + for (i = 0; i < adev->num_ip_blocks; i++) { + if (!adev->ip_block_enabled[i]) + continue; + r = adev->ip_blocks[i].funcs->sw_init(adev); + if (r) + return r; + /* need to do gmc hw init early so we can allocate gpu mem */ + if (adev->ip_blocks[i].type == AMDGPU_IP_BLOCK_TYPE_GMC) { + r = amdgpu_vram_scratch_init(adev); + if (r) + return r; + r = adev->ip_blocks[i].funcs->hw_init(adev); + if (r) + return r; + r = amdgpu_wb_init(adev); + if (r) + return r; + } + } + + for (i = 0; i < adev->num_ip_blocks; i++) { + if (!adev->ip_block_enabled[i]) + continue; + /* gmc hw init is done early */ + if (adev->ip_blocks[i].type == AMDGPU_IP_BLOCK_TYPE_GMC) + continue; + r = adev->ip_blocks[i].funcs->hw_init(adev); + if (r) + return r; + } + + return 0; +} + +static int amdgpu_late_init(struct amdgpu_device *adev) +{ + int i = 0, r; + + for (i = 0; i < adev->num_ip_blocks; i++) { + if (!adev->ip_block_enabled[i]) + continue; + /* enable clockgating to save power */ + r = adev->ip_blocks[i].funcs->set_clockgating_state(adev, + AMDGPU_CG_STATE_GATE); + if (r) + return r; + if (adev->ip_blocks[i].funcs->late_init) { + r = adev->ip_blocks[i].funcs->late_init(adev); + if (r) + return r; + } + } + + return 0; +} + +static int amdgpu_fini(struct amdgpu_device *adev) +{ + int i, r; + + for (i = adev->num_ip_blocks - 1; i >= 0; i--) { + if (!adev->ip_block_enabled[i]) + continue; + if (adev->ip_blocks[i].type == AMDGPU_IP_BLOCK_TYPE_GMC) { + amdgpu_wb_fini(adev); + amdgpu_vram_scratch_fini(adev); + } + /* ungate blocks before hw fini so that we can shutdown the blocks safely */ + r = adev->ip_blocks[i].funcs->set_clockgating_state(adev, + AMDGPU_CG_STATE_UNGATE); + if (r) + return r; + r = adev->ip_blocks[i].funcs->hw_fini(adev); + /* XXX handle errors */ + } + + for (i = adev->num_ip_blocks - 1; i >= 0; i--) { + if (!adev->ip_block_enabled[i]) + continue; + r = adev->ip_blocks[i].funcs->sw_fini(adev); + /* XXX handle errors */ + adev->ip_block_enabled[i] = false; + } + + return 0; +} + +static int amdgpu_suspend(struct amdgpu_device *adev) +{ + int i, r; + + for (i = adev->num_ip_blocks - 1; i >= 0; i--) { + if (!adev->ip_block_enabled[i]) + continue; + /* ungate blocks so that suspend can properly shut them down */ + r = adev->ip_blocks[i].funcs->set_clockgating_state(adev, + AMDGPU_CG_STATE_UNGATE); + /* XXX handle errors */ + r = adev->ip_blocks[i].funcs->suspend(adev); + /* XXX handle errors */ + } + + return 0; +} + +static int amdgpu_resume(struct amdgpu_device *adev) +{ + int i, r; + + for (i = 0; i < adev->num_ip_blocks; i++) { + if (!adev->ip_block_enabled[i]) + continue; + r = adev->ip_blocks[i].funcs->resume(adev); + if (r) + return r; + } + + return 0; +} + +/** + * amdgpu_device_init - initialize the driver + * + * @adev: amdgpu_device pointer + * @pdev: drm dev pointer + * @pdev: pci dev pointer + * @flags: driver flags + * + * Initializes the driver info and hw (all asics). + * Returns 0 for success or an error on failure. + * Called at driver startup. + */ +int amdgpu_device_init(struct amdgpu_device *adev, + struct drm_device *ddev, + struct pci_dev *pdev, + uint32_t flags) +{ + int r, i; + bool runtime = false; + + adev->shutdown = false; + adev->dev = &pdev->dev; + adev->ddev = ddev; + adev->pdev = pdev; + adev->flags = flags; + adev->asic_type = flags & AMDGPU_ASIC_MASK; + adev->is_atom_bios = false; + adev->usec_timeout = AMDGPU_MAX_USEC_TIMEOUT; + adev->mc.gtt_size = 512 * 1024 * 1024; + adev->accel_working = false; + adev->num_rings = 0; + adev->mman.buffer_funcs = NULL; + adev->mman.buffer_funcs_ring = NULL; + adev->vm_manager.vm_pte_funcs = NULL; + adev->vm_manager.vm_pte_funcs_ring = NULL; + adev->gart.gart_funcs = NULL; + adev->fence_context = fence_context_alloc(AMDGPU_MAX_RINGS); + + adev->smc_rreg = &amdgpu_invalid_rreg; + adev->smc_wreg = &amdgpu_invalid_wreg; + adev->pcie_rreg = &amdgpu_invalid_rreg; + adev->pcie_wreg = &amdgpu_invalid_wreg; + adev->uvd_ctx_rreg = &amdgpu_invalid_rreg; + adev->uvd_ctx_wreg = &amdgpu_invalid_wreg; + adev->didt_rreg = &amdgpu_invalid_rreg; + adev->didt_wreg = &amdgpu_invalid_wreg; + adev->audio_endpt_rreg = &amdgpu_block_invalid_rreg; + adev->audio_endpt_wreg = &amdgpu_block_invalid_wreg; + + DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X).\n", + amdgpu_asic_name[adev->asic_type], pdev->vendor, pdev->device, + pdev->subsystem_vendor, pdev->subsystem_device); + + /* mutex initialization are all done here so we + * can recall function without having locking issues */ + mutex_init(&adev->ring_lock); + atomic_set(&adev->irq.ih.lock, 0); + mutex_init(&adev->gem.mutex); + mutex_init(&adev->pm.mutex); + mutex_init(&adev->gfx.gpu_clock_mutex); + mutex_init(&adev->srbm_mutex); + mutex_init(&adev->grbm_idx_mutex); + init_rwsem(&adev->pm.mclk_lock); + init_rwsem(&adev->exclusive_lock); + mutex_init(&adev->mn_lock); + hash_init(adev->mn_hash); + + amdgpu_check_arguments(adev); + + /* Registers mapping */ + /* TODO: block userspace mapping of io register */ + spin_lock_init(&adev->mmio_idx_lock); + spin_lock_init(&adev->smc_idx_lock); + spin_lock_init(&adev->pcie_idx_lock); + spin_lock_init(&adev->uvd_ctx_idx_lock); + spin_lock_init(&adev->didt_idx_lock); + spin_lock_init(&adev->audio_endpt_idx_lock); + + adev->rmmio_base = pci_resource_start(adev->pdev, 5); + adev->rmmio_size = pci_resource_len(adev->pdev, 5); + adev->rmmio = ioremap(adev->rmmio_base, adev->rmmio_size); + if (adev->rmmio == NULL) { + return -ENOMEM; + } + DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)adev->rmmio_base); + DRM_INFO("register mmio size: %u\n", (unsigned)adev->rmmio_size); + + /* doorbell bar mapping */ + amdgpu_doorbell_init(adev); + + /* io port mapping */ + for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) { + if (pci_resource_flags(adev->pdev, i) & IORESOURCE_IO) { + adev->rio_mem_size = pci_resource_len(adev->pdev, i); + adev->rio_mem = pci_iomap(adev->pdev, i, adev->rio_mem_size); + break; + } + } + if (adev->rio_mem == NULL) + DRM_ERROR("Unable to find PCI I/O BAR\n"); + + /* early init functions */ + r = amdgpu_early_init(adev); + if (r) + return r; + + /* if we have > 1 VGA cards, then disable the amdgpu VGA resources */ + /* this will fail for cards that aren't VGA class devices, just + * ignore it */ + vga_client_register(adev->pdev, adev, NULL, amdgpu_vga_set_decode); + + if (amdgpu_runtime_pm == 1) + runtime = true; + if (amdgpu_device_is_px(ddev)) + runtime = true; + vga_switcheroo_register_client(adev->pdev, &amdgpu_switcheroo_ops, runtime); + if (runtime) + vga_switcheroo_init_domain_pm_ops(adev->dev, &adev->vga_pm_domain); + + /* Read BIOS */ + if (!amdgpu_get_bios(adev)) + return -EINVAL; + /* Must be an ATOMBIOS */ + if (!adev->is_atom_bios) { + dev_err(adev->dev, "Expecting atombios for GPU\n"); + return -EINVAL; + } + r = amdgpu_atombios_init(adev); + if (r) + return r; + + /* Post card if necessary */ + if (!amdgpu_card_posted(adev)) { + if (!adev->bios) { + dev_err(adev->dev, "Card not posted and no BIOS - ignoring\n"); + return -EINVAL; + } + DRM_INFO("GPU not posted. posting now...\n"); + amdgpu_atom_asic_init(adev->mode_info.atom_context); + } + + /* Initialize clocks */ + r = amdgpu_atombios_get_clock_info(adev); + if (r) + return r; + /* init i2c buses */ + amdgpu_atombios_i2c_init(adev); + + /* Fence driver */ + r = amdgpu_fence_driver_init(adev); + if (r) + return r; + + /* init the mode config */ + drm_mode_config_init(adev->ddev); + + r = amdgpu_init(adev); + if (r) { + amdgpu_fini(adev); + return r; + } + + adev->accel_working = true; + + amdgpu_fbdev_init(adev); + + r = amdgpu_ib_pool_init(adev); + if (r) { + dev_err(adev->dev, "IB initialization failed (%d).\n", r); + return r; + } + + r = amdgpu_ib_ring_tests(adev); + if (r) + DRM_ERROR("ib ring test failed (%d).\n", r); + + r = amdgpu_gem_debugfs_init(adev); + if (r) { + DRM_ERROR("registering gem debugfs failed (%d).\n", r); + } + + r = amdgpu_debugfs_regs_init(adev); + if (r) { + DRM_ERROR("registering register debugfs failed (%d).\n", r); + } + + if ((amdgpu_testing & 1)) { + if (adev->accel_working) + amdgpu_test_moves(adev); + else + DRM_INFO("amdgpu: acceleration disabled, skipping move tests\n"); + } + if ((amdgpu_testing & 2)) { + if (adev->accel_working) + amdgpu_test_syncing(adev); + else + DRM_INFO("amdgpu: acceleration disabled, skipping sync tests\n"); + } + if (amdgpu_benchmarking) { + if (adev->accel_working) + amdgpu_benchmark(adev, amdgpu_benchmarking); + else + DRM_INFO("amdgpu: acceleration disabled, skipping benchmarks\n"); + } + + /* enable clockgating, etc. after ib tests, etc. since some blocks require + * explicit gating rather than handling it automatically. + */ + r = amdgpu_late_init(adev); + if (r) + return r; + + return 0; +} + +static void amdgpu_debugfs_remove_files(struct amdgpu_device *adev); + +/** + * amdgpu_device_fini - tear down the driver + * + * @adev: amdgpu_device pointer + * + * Tear down the driver info (all asics). + * Called at driver shutdown. + */ +void amdgpu_device_fini(struct amdgpu_device *adev) +{ + int r; + + DRM_INFO("amdgpu: finishing device.\n"); + adev->shutdown = true; + /* evict vram memory */ + amdgpu_bo_evict_vram(adev); + amdgpu_ib_pool_fini(adev); + amdgpu_fence_driver_fini(adev); + amdgpu_fbdev_fini(adev); + r = amdgpu_fini(adev); + if (adev->ip_block_enabled) + kfree(adev->ip_block_enabled); + adev->ip_block_enabled = NULL; + adev->accel_working = false; + /* free i2c buses */ + amdgpu_i2c_fini(adev); + amdgpu_atombios_fini(adev); + kfree(adev->bios); + adev->bios = NULL; + vga_switcheroo_unregister_client(adev->pdev); + vga_client_register(adev->pdev, NULL, NULL, NULL); + if (adev->rio_mem) + pci_iounmap(adev->pdev, adev->rio_mem); + adev->rio_mem = NULL; + iounmap(adev->rmmio); + adev->rmmio = NULL; + amdgpu_doorbell_fini(adev); + amdgpu_debugfs_regs_cleanup(adev); + amdgpu_debugfs_remove_files(adev); +} + + +/* + * Suspend & resume. + */ +/** + * amdgpu_suspend_kms - initiate device suspend + * + * @pdev: drm dev pointer + * @state: suspend state + * + * Puts the hw in the suspend state (all asics). + * Returns 0 for success or an error on failure. + * Called at driver suspend. + */ +int amdgpu_suspend_kms(struct drm_device *dev, bool suspend, bool fbcon) +{ + struct amdgpu_device *adev; + struct drm_crtc *crtc; + struct drm_connector *connector; + int i, r; + bool force_completion = false; + + if (dev == NULL || dev->dev_private == NULL) { + return -ENODEV; + } + + adev = dev->dev_private; + + if (dev->switch_power_state == DRM_SWITCH_POWER_OFF) + return 0; + + drm_kms_helper_poll_disable(dev); + + /* turn off display hw */ + list_for_each_entry(connector, &dev->mode_config.connector_list, head) { + drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF); + } + + /* unpin the front buffers */ + list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { + struct amdgpu_framebuffer *rfb = to_amdgpu_framebuffer(crtc->primary->fb); + struct amdgpu_bo *robj; + + if (rfb == NULL || rfb->obj == NULL) { + continue; + } + robj = gem_to_amdgpu_bo(rfb->obj); + /* don't unpin kernel fb objects */ + if (!amdgpu_fbdev_robj_is_fb(adev, robj)) { + r = amdgpu_bo_reserve(robj, false); + if (r == 0) { + amdgpu_bo_unpin(robj); + amdgpu_bo_unreserve(robj); + } + } + } + /* evict vram memory */ + amdgpu_bo_evict_vram(adev); + + /* wait for gpu to finish processing current batch */ + for (i = 0; i < AMDGPU_MAX_RINGS; i++) { + struct amdgpu_ring *ring = adev->rings[i]; + if (!ring) + continue; + + r = amdgpu_fence_wait_empty(ring); + if (r) { + /* delay GPU reset to resume */ + force_completion = true; + } + } + if (force_completion) { + amdgpu_fence_driver_force_completion(adev); + } + + r = amdgpu_suspend(adev); + + /* evict remaining vram memory */ + amdgpu_bo_evict_vram(adev); + + pci_save_state(dev->pdev); + if (suspend) { + /* Shut down the device */ + pci_disable_device(dev->pdev); + pci_set_power_state(dev->pdev, PCI_D3hot); + } + + if (fbcon) { + console_lock(); + amdgpu_fbdev_set_suspend(adev, 1); + console_unlock(); + } + return 0; +} + +/** + * amdgpu_resume_kms - initiate device resume + * + * @pdev: drm dev pointer + * + * Bring the hw back to operating state (all asics). + * Returns 0 for success or an error on failure. + * Called at driver resume. + */ +int amdgpu_resume_kms(struct drm_device *dev, bool resume, bool fbcon) +{ + struct drm_connector *connector; + struct amdgpu_device *adev = dev->dev_private; + int r; + + if (dev->switch_power_state == DRM_SWITCH_POWER_OFF) + return 0; + + if (fbcon) { + console_lock(); + } + if (resume) { + pci_set_power_state(dev->pdev, PCI_D0); + pci_restore_state(dev->pdev); + if (pci_enable_device(dev->pdev)) { + if (fbcon) + console_unlock(); + return -1; + } + } + + /* post card */ + amdgpu_atom_asic_init(adev->mode_info.atom_context); + + r = amdgpu_resume(adev); + + r = amdgpu_ib_ring_tests(adev); + if (r) + DRM_ERROR("ib ring test failed (%d).\n", r); + + r = amdgpu_late_init(adev); + if (r) + return r; + + /* blat the mode back in */ + if (fbcon) { + drm_helper_resume_force_mode(dev); + /* turn on display hw */ + list_for_each_entry(connector, &dev->mode_config.connector_list, head) { + drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON); + } + } + + drm_kms_helper_poll_enable(dev); + + if (fbcon) { + amdgpu_fbdev_set_suspend(adev, 0); + console_unlock(); + } + + return 0; +} + +/** + * amdgpu_gpu_reset - reset the asic + * + * @adev: amdgpu device pointer + * + * Attempt the reset the GPU if it has hung (all asics). + * Returns 0 for success or an error on failure. + */ +int amdgpu_gpu_reset(struct amdgpu_device *adev) +{ + unsigned ring_sizes[AMDGPU_MAX_RINGS]; + uint32_t *ring_data[AMDGPU_MAX_RINGS]; + + bool saved = false; + + int i, r; + int resched; + + down_write(&adev->exclusive_lock); + + if (!adev->needs_reset) { + up_write(&adev->exclusive_lock); + return 0; + } + + adev->needs_reset = false; + + /* block TTM */ + resched = ttm_bo_lock_delayed_workqueue(&adev->mman.bdev); + + r = amdgpu_suspend(adev); + + for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { + struct amdgpu_ring *ring = adev->rings[i]; + if (!ring) + continue; + + ring_sizes[i] = amdgpu_ring_backup(ring, &ring_data[i]); + if (ring_sizes[i]) { + saved = true; + dev_info(adev->dev, "Saved %d dwords of commands " + "on ring %d.\n", ring_sizes[i], i); + } + } + +retry: + r = amdgpu_asic_reset(adev); + if (!r) { + dev_info(adev->dev, "GPU reset succeeded, trying to resume\n"); + r = amdgpu_resume(adev); + } + + if (!r) { + for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { + struct amdgpu_ring *ring = adev->rings[i]; + if (!ring) + continue; + + amdgpu_ring_restore(ring, ring_sizes[i], ring_data[i]); + ring_sizes[i] = 0; + ring_data[i] = NULL; + } + + r = amdgpu_ib_ring_tests(adev); + if (r) { + dev_err(adev->dev, "ib ring test failed (%d).\n", r); + if (saved) { + saved = false; + r = amdgpu_suspend(adev); + goto retry; + } + } + } else { + amdgpu_fence_driver_force_completion(adev); + for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { + if (adev->rings[i]) + kfree(ring_data[i]); + } + } + + drm_helper_resume_force_mode(adev->ddev); + + ttm_bo_unlock_delayed_workqueue(&adev->mman.bdev, resched); + if (r) { + /* bad news, how to tell it to userspace ? */ + dev_info(adev->dev, "GPU reset failed\n"); + } + + up_write(&adev->exclusive_lock); + return r; +} + + +/* + * Debugfs + */ +int amdgpu_debugfs_add_files(struct amdgpu_device *adev, + struct drm_info_list *files, + unsigned nfiles) +{ + unsigned i; + + for (i = 0; i < adev->debugfs_count; i++) { + if (adev->debugfs[i].files == files) { + /* Already registered */ + return 0; + } + } + + i = adev->debugfs_count + 1; + if (i > AMDGPU_DEBUGFS_MAX_COMPONENTS) { + DRM_ERROR("Reached maximum number of debugfs components.\n"); + DRM_ERROR("Report so we increase " + "AMDGPU_DEBUGFS_MAX_COMPONENTS.\n"); + return -EINVAL; + } + adev->debugfs[adev->debugfs_count].files = files; + adev->debugfs[adev->debugfs_count].num_files = nfiles; + adev->debugfs_count = i; +#if defined(CONFIG_DEBUG_FS) + drm_debugfs_create_files(files, nfiles, + adev->ddev->control->debugfs_root, + adev->ddev->control); + drm_debugfs_create_files(files, nfiles, + adev->ddev->primary->debugfs_root, + adev->ddev->primary); +#endif + return 0; +} + +static void amdgpu_debugfs_remove_files(struct amdgpu_device *adev) +{ +#if defined(CONFIG_DEBUG_FS) + unsigned i; + + for (i = 0; i < adev->debugfs_count; i++) { + drm_debugfs_remove_files(adev->debugfs[i].files, + adev->debugfs[i].num_files, + adev->ddev->control); + drm_debugfs_remove_files(adev->debugfs[i].files, + adev->debugfs[i].num_files, + adev->ddev->primary); + } +#endif +} + +#if defined(CONFIG_DEBUG_FS) + +static ssize_t amdgpu_debugfs_regs_read(struct file *f, char __user *buf, + size_t size, loff_t *pos) +{ + struct amdgpu_device *adev = f->f_inode->i_private; + ssize_t result = 0; + int r; + + if (size & 0x3 || *pos & 0x3) + return -EINVAL; + + while (size) { + uint32_t value; + + if (*pos > adev->rmmio_size) + return result; + + value = RREG32(*pos >> 2); + r = put_user(value, (uint32_t *)buf); + if (r) + return r; + + result += 4; + buf += 4; + *pos += 4; + size -= 4; + } + + return result; +} + +static ssize_t amdgpu_debugfs_regs_write(struct file *f, const char __user *buf, + size_t size, loff_t *pos) +{ + struct amdgpu_device *adev = f->f_inode->i_private; + ssize_t result = 0; + int r; + + if (size & 0x3 || *pos & 0x3) + return -EINVAL; + + while (size) { + uint32_t value; + + if (*pos > adev->rmmio_size) + return result; + + r = get_user(value, (uint32_t *)buf); + if (r) + return r; + + WREG32(*pos >> 2, value); + + result += 4; + buf += 4; + *pos += 4; + size -= 4; + } + + return result; +} + +static const struct file_operations amdgpu_debugfs_regs_fops = { + .owner = THIS_MODULE, + .read = amdgpu_debugfs_regs_read, + .write = amdgpu_debugfs_regs_write, + .llseek = default_llseek +}; + +static int amdgpu_debugfs_regs_init(struct amdgpu_device *adev) +{ + struct drm_minor *minor = adev->ddev->primary; + struct dentry *ent, *root = minor->debugfs_root; + + ent = debugfs_create_file("amdgpu_regs", S_IFREG | S_IRUGO, root, + adev, &amdgpu_debugfs_regs_fops); + if (IS_ERR(ent)) + return PTR_ERR(ent); + i_size_write(ent->d_inode, adev->rmmio_size); + adev->debugfs_regs = ent; + + return 0; +} + +static void amdgpu_debugfs_regs_cleanup(struct amdgpu_device *adev) +{ + debugfs_remove(adev->debugfs_regs); + adev->debugfs_regs = NULL; +} + +int amdgpu_debugfs_init(struct drm_minor *minor) +{ + return 0; +} + +void amdgpu_debugfs_cleanup(struct drm_minor *minor) +{ +} +#endif -- cgit v1.2.3 From a2e73f56fa6282481927ec43aa9362c03c2e2104 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Mon, 20 Apr 2015 17:09:27 -0400 Subject: drm/amdgpu: Add support for CIK parts MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This patch adds support for CIK parts. These parts are also supported by radeon which is the preferred option, so there is a config option to enable support for CIK parts in amdgpu for testing. Acked-by: Christian König Acked-by: Jammy Zhou Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/Makefile | 3 + drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 19 + drivers/gpu/drm/amd/amdgpu/ci_dpm.c | 6684 ++++++++++++++++++++++++++++ drivers/gpu/drm/amd/amdgpu/ci_dpm.h | 348 ++ drivers/gpu/drm/amd/amdgpu/ci_smc.c | 279 ++ drivers/gpu/drm/amd/amdgpu/cik.c | 2505 +++++++++++ drivers/gpu/drm/amd/amdgpu/cik.h | 33 + drivers/gpu/drm/amd/amdgpu/cik_dpm.h | 30 + drivers/gpu/drm/amd/amdgpu/cik_ih.c | 453 ++ drivers/gpu/drm/amd/amdgpu/cik_ih.h | 29 + drivers/gpu/drm/amd/amdgpu/cik_sdma.c | 1422 ++++++ drivers/gpu/drm/amd/amdgpu/cik_sdma.h | 29 + drivers/gpu/drm/amd/amdgpu/clearstate_ci.h | 944 ++++ drivers/gpu/drm/amd/amdgpu/dce_v8_0.c | 3830 ++++++++++++++++ drivers/gpu/drm/amd/amdgpu/dce_v8_0.h | 29 + drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c | 5635 +++++++++++++++++++++++ drivers/gpu/drm/amd/amdgpu/gfx_v7_0.h | 37 + drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c | 1307 ++++++ drivers/gpu/drm/amd/amdgpu/gmc_v7_0.h | 36 + drivers/gpu/drm/amd/amdgpu/kv_dpm.c | 3336 ++++++++++++++ drivers/gpu/drm/amd/amdgpu/kv_dpm.h | 229 + drivers/gpu/drm/amd/amdgpu/kv_smc.c | 219 + drivers/gpu/drm/amd/amdgpu/smu7.h | 170 + drivers/gpu/drm/amd/amdgpu/smu7_discrete.h | 514 +++ drivers/gpu/drm/amd/amdgpu/smu7_fusion.h | 300 ++ drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c | 888 ++++ drivers/gpu/drm/amd/amdgpu/uvd_v4_2.h | 29 + drivers/gpu/drm/amd/amdgpu/vce_v2_0.c | 642 +++ drivers/gpu/drm/amd/amdgpu/vce_v2_0.h | 29 + 29 files changed, 30008 insertions(+) create mode 100644 drivers/gpu/drm/amd/amdgpu/ci_dpm.c create mode 100644 drivers/gpu/drm/amd/amdgpu/ci_dpm.h create mode 100644 drivers/gpu/drm/amd/amdgpu/ci_smc.c create mode 100644 drivers/gpu/drm/amd/amdgpu/cik.c create mode 100644 drivers/gpu/drm/amd/amdgpu/cik.h create mode 100644 drivers/gpu/drm/amd/amdgpu/cik_dpm.h create mode 100644 drivers/gpu/drm/amd/amdgpu/cik_ih.c create mode 100644 drivers/gpu/drm/amd/amdgpu/cik_ih.h create mode 100644 drivers/gpu/drm/amd/amdgpu/cik_sdma.c create mode 100644 drivers/gpu/drm/amd/amdgpu/cik_sdma.h create mode 100644 drivers/gpu/drm/amd/amdgpu/clearstate_ci.h create mode 100644 drivers/gpu/drm/amd/amdgpu/dce_v8_0.c create mode 100644 drivers/gpu/drm/amd/amdgpu/dce_v8_0.h create mode 100644 drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c create mode 100644 drivers/gpu/drm/amd/amdgpu/gfx_v7_0.h create mode 100644 drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c create mode 100644 drivers/gpu/drm/amd/amdgpu/gmc_v7_0.h create mode 100644 drivers/gpu/drm/amd/amdgpu/kv_dpm.c create mode 100644 drivers/gpu/drm/amd/amdgpu/kv_dpm.h create mode 100644 drivers/gpu/drm/amd/amdgpu/kv_smc.c create mode 100644 drivers/gpu/drm/amd/amdgpu/smu7.h create mode 100644 drivers/gpu/drm/amd/amdgpu/smu7_discrete.h create mode 100644 drivers/gpu/drm/amd/amdgpu/smu7_fusion.h create mode 100644 drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c create mode 100644 drivers/gpu/drm/amd/amdgpu/uvd_v4_2.h create mode 100644 drivers/gpu/drm/amd/amdgpu/vce_v2_0.c create mode 100644 drivers/gpu/drm/amd/amdgpu/vce_v2_0.h (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_device.c') diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile index 01276a592bc5..aec28866945f 100644 --- a/drivers/gpu/drm/amd/amdgpu/Makefile +++ b/drivers/gpu/drm/amd/amdgpu/Makefile @@ -18,6 +18,9 @@ amdgpu-y += amdgpu_device.o amdgpu_kms.o \ amdgpu_prime.o amdgpu_vm.o amdgpu_ib.o amdgpu_pll.o \ amdgpu_ucode.o amdgpu_bo_list.o amdgpu_ctx.o amdgpu_sync.o +amdgpu-$(CONFIG_DRM_AMDGPU_CIK)+= cik.o gmc_v7_0.o cik_ih.o kv_smc.o kv_dpm.o \ + ci_smc.o ci_dpm.o dce_v8_0.o gfx_v7_0.o cik_sdma.o uvd_v4_2.o vce_v2_0.o + # add IH block amdgpu-y += \ amdgpu_irq.o \ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index cd4bb90fa85c..548e0843d95a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -38,6 +38,9 @@ #include "amdgpu_i2c.h" #include "atom.h" #include "amdgpu_atombios.h" +#ifdef CONFIG_DRM_AMDGPU_CIK +#include "cik.h" +#endif #include "bif/bif_4_1_d.h" static int amdgpu_debugfs_regs_init(struct amdgpu_device *adev); @@ -1154,6 +1157,22 @@ static int amdgpu_early_init(struct amdgpu_device *adev) int i, r = -EINVAL; switch (adev->asic_type) { +#ifdef CONFIG_DRM_AMDGPU_CIK + case CHIP_BONAIRE: + case CHIP_HAWAII: + case CHIP_KAVERI: + case CHIP_KABINI: + case CHIP_MULLINS: + if ((adev->asic_type == CHIP_BONAIRE) || (adev->asic_type == CHIP_HAWAII)) + adev->family = AMDGPU_FAMILY_CI; + else + adev->family = AMDGPU_FAMILY_KV; + + r = cik_set_ip_blocks(adev); + if (r) + return r; + break; +#endif default: /* FIXME: not supported yet */ return -EINVAL; diff --git a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c new file mode 100644 index 000000000000..5a9dad8e55c9 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c @@ -0,0 +1,6684 @@ +/* + * Copyright 2013 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#include +#include "drmP.h" +#include "amdgpu.h" +#include "amdgpu_pm.h" +#include "amdgpu_ucode.h" +#include "cikd.h" +#include "amdgpu_dpm.h" +#include "ci_dpm.h" +#include "gfx_v7_0.h" +#include "atom.h" +#include + +#include "smu/smu_7_0_1_d.h" +#include "smu/smu_7_0_1_sh_mask.h" + +#include "dce/dce_8_0_d.h" +#include "dce/dce_8_0_sh_mask.h" + +#include "bif/bif_4_1_d.h" +#include "bif/bif_4_1_sh_mask.h" + +#include "gca/gfx_7_2_d.h" +#include "gca/gfx_7_2_sh_mask.h" + +#include "gmc/gmc_7_1_d.h" +#include "gmc/gmc_7_1_sh_mask.h" + +MODULE_FIRMWARE("radeon/bonaire_smc.bin"); +MODULE_FIRMWARE("radeon/hawaii_smc.bin"); + +#define MC_CG_ARB_FREQ_F0 0x0a +#define MC_CG_ARB_FREQ_F1 0x0b +#define MC_CG_ARB_FREQ_F2 0x0c +#define MC_CG_ARB_FREQ_F3 0x0d + +#define SMC_RAM_END 0x40000 + +#define VOLTAGE_SCALE 4 +#define VOLTAGE_VID_OFFSET_SCALE1 625 +#define VOLTAGE_VID_OFFSET_SCALE2 100 + +static const struct ci_pt_defaults defaults_hawaii_xt = +{ + 1, 0xF, 0xFD, 0x19, 5, 0x14, 0, 0xB0000, + { 0x2E, 0x00, 0x00, 0x88, 0x00, 0x00, 0x72, 0x60, 0x51, 0xA7, 0x79, 0x6B, 0x90, 0xBD, 0x79 }, + { 0x217, 0x217, 0x217, 0x242, 0x242, 0x242, 0x269, 0x269, 0x269, 0x2A1, 0x2A1, 0x2A1, 0x2C9, 0x2C9, 0x2C9 } +}; + +static const struct ci_pt_defaults defaults_hawaii_pro = +{ + 1, 0xF, 0xFD, 0x19, 5, 0x14, 0, 0x65062, + { 0x2E, 0x00, 0x00, 0x88, 0x00, 0x00, 0x72, 0x60, 0x51, 0xA7, 0x79, 0x6B, 0x90, 0xBD, 0x79 }, + { 0x217, 0x217, 0x217, 0x242, 0x242, 0x242, 0x269, 0x269, 0x269, 0x2A1, 0x2A1, 0x2A1, 0x2C9, 0x2C9, 0x2C9 } +}; + +static const struct ci_pt_defaults defaults_bonaire_xt = +{ + 1, 0xF, 0xFD, 0x19, 5, 45, 0, 0xB0000, + { 0x79, 0x253, 0x25D, 0xAE, 0x72, 0x80, 0x83, 0x86, 0x6F, 0xC8, 0xC9, 0xC9, 0x2F, 0x4D, 0x61 }, + { 0x17C, 0x172, 0x180, 0x1BC, 0x1B3, 0x1BD, 0x206, 0x200, 0x203, 0x25D, 0x25A, 0x255, 0x2C3, 0x2C5, 0x2B4 } +}; + +static const struct ci_pt_defaults defaults_bonaire_pro = +{ + 1, 0xF, 0xFD, 0x19, 5, 45, 0, 0x65062, + { 0x8C, 0x23F, 0x244, 0xA6, 0x83, 0x85, 0x86, 0x86, 0x83, 0xDB, 0xDB, 0xDA, 0x67, 0x60, 0x5F }, + { 0x187, 0x193, 0x193, 0x1C7, 0x1D1, 0x1D1, 0x210, 0x219, 0x219, 0x266, 0x26C, 0x26C, 0x2C9, 0x2CB, 0x2CB } +}; + +static const struct ci_pt_defaults defaults_saturn_xt = +{ + 1, 0xF, 0xFD, 0x19, 5, 55, 0, 0x70000, + { 0x8C, 0x247, 0x249, 0xA6, 0x80, 0x81, 0x8B, 0x89, 0x86, 0xC9, 0xCA, 0xC9, 0x4D, 0x4D, 0x4D }, + { 0x187, 0x187, 0x187, 0x1C7, 0x1C7, 0x1C7, 0x210, 0x210, 0x210, 0x266, 0x266, 0x266, 0x2C9, 0x2C9, 0x2C9 } +}; + +static const struct ci_pt_defaults defaults_saturn_pro = +{ + 1, 0xF, 0xFD, 0x19, 5, 55, 0, 0x30000, + { 0x96, 0x21D, 0x23B, 0xA1, 0x85, 0x87, 0x83, 0x84, 0x81, 0xE6, 0xE6, 0xE6, 0x71, 0x6A, 0x6A }, + { 0x193, 0x19E, 0x19E, 0x1D2, 0x1DC, 0x1DC, 0x21A, 0x223, 0x223, 0x26E, 0x27E, 0x274, 0x2CF, 0x2D2, 0x2D2 } +}; + +static const struct ci_pt_config_reg didt_config_ci[] = +{ + { 0x10, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, + { 0x10, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, + { 0x10, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, + { 0x10, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, + { 0x11, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, + { 0x11, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, + { 0x11, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, + { 0x11, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, + { 0x12, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, + { 0x12, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, + { 0x12, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, + { 0x12, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, + { 0x2, 0x00003fff, 0, 0x4, CISLANDS_CONFIGREG_DIDT_IND }, + { 0x2, 0x03ff0000, 16, 0x80, CISLANDS_CONFIGREG_DIDT_IND }, + { 0x2, 0x78000000, 27, 0x3, CISLANDS_CONFIGREG_DIDT_IND }, + { 0x1, 0x0000ffff, 0, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND }, + { 0x1, 0xffff0000, 16, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND }, + { 0x0, 0x00000001, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, + { 0x30, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, + { 0x30, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, + { 0x30, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, + { 0x30, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, + { 0x31, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, + { 0x31, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, + { 0x31, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, + { 0x31, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, + { 0x32, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, + { 0x32, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, + { 0x32, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, + { 0x32, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, + { 0x22, 0x00003fff, 0, 0x4, CISLANDS_CONFIGREG_DIDT_IND }, + { 0x22, 0x03ff0000, 16, 0x80, CISLANDS_CONFIGREG_DIDT_IND }, + { 0x22, 0x78000000, 27, 0x3, CISLANDS_CONFIGREG_DIDT_IND }, + { 0x21, 0x0000ffff, 0, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND }, + { 0x21, 0xffff0000, 16, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND }, + { 0x20, 0x00000001, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, + { 0x50, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, + { 0x50, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, + { 0x50, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, + { 0x50, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, + { 0x51, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, + { 0x51, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, + { 0x51, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, + { 0x51, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, + { 0x52, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, + { 0x52, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, + { 0x52, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, + { 0x52, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, + { 0x42, 0x00003fff, 0, 0x4, CISLANDS_CONFIGREG_DIDT_IND }, + { 0x42, 0x03ff0000, 16, 0x80, CISLANDS_CONFIGREG_DIDT_IND }, + { 0x42, 0x78000000, 27, 0x3, CISLANDS_CONFIGREG_DIDT_IND }, + { 0x41, 0x0000ffff, 0, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND }, + { 0x41, 0xffff0000, 16, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND }, + { 0x40, 0x00000001, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, + { 0x70, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, + { 0x70, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, + { 0x70, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, + { 0x70, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, + { 0x71, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, + { 0x71, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, + { 0x71, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, + { 0x71, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, + { 0x72, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, + { 0x72, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, + { 0x72, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, + { 0x72, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, + { 0x62, 0x00003fff, 0, 0x4, CISLANDS_CONFIGREG_DIDT_IND }, + { 0x62, 0x03ff0000, 16, 0x80, CISLANDS_CONFIGREG_DIDT_IND }, + { 0x62, 0x78000000, 27, 0x3, CISLANDS_CONFIGREG_DIDT_IND }, + { 0x61, 0x0000ffff, 0, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND }, + { 0x61, 0xffff0000, 16, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND }, + { 0x60, 0x00000001, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, + { 0xFFFFFFFF } +}; + +static u8 ci_get_memory_module_index(struct amdgpu_device *adev) +{ + return (u8) ((RREG32(mmBIOS_SCRATCH_4) >> 16) & 0xff); +} + +#define MC_CG_ARB_FREQ_F0 0x0a +#define MC_CG_ARB_FREQ_F1 0x0b +#define MC_CG_ARB_FREQ_F2 0x0c +#define MC_CG_ARB_FREQ_F3 0x0d + +static int ci_copy_and_switch_arb_sets(struct amdgpu_device *adev, + u32 arb_freq_src, u32 arb_freq_dest) +{ + u32 mc_arb_dram_timing; + u32 mc_arb_dram_timing2; + u32 burst_time; + u32 mc_cg_config; + + switch (arb_freq_src) { + case MC_CG_ARB_FREQ_F0: + mc_arb_dram_timing = RREG32(mmMC_ARB_DRAM_TIMING); + mc_arb_dram_timing2 = RREG32(mmMC_ARB_DRAM_TIMING2); + burst_time = (RREG32(mmMC_ARB_BURST_TIME) & MC_ARB_BURST_TIME__STATE0_MASK) >> + MC_ARB_BURST_TIME__STATE0__SHIFT; + break; + case MC_CG_ARB_FREQ_F1: + mc_arb_dram_timing = RREG32(mmMC_ARB_DRAM_TIMING_1); + mc_arb_dram_timing2 = RREG32(mmMC_ARB_DRAM_TIMING2_1); + burst_time = (RREG32(mmMC_ARB_BURST_TIME) & MC_ARB_BURST_TIME__STATE1_MASK) >> + MC_ARB_BURST_TIME__STATE1__SHIFT; + break; + default: + return -EINVAL; + } + + switch (arb_freq_dest) { + case MC_CG_ARB_FREQ_F0: + WREG32(mmMC_ARB_DRAM_TIMING, mc_arb_dram_timing); + WREG32(mmMC_ARB_DRAM_TIMING2, mc_arb_dram_timing2); + WREG32_P(mmMC_ARB_BURST_TIME, (burst_time << MC_ARB_BURST_TIME__STATE0__SHIFT), + ~MC_ARB_BURST_TIME__STATE0_MASK); + break; + case MC_CG_ARB_FREQ_F1: + WREG32(mmMC_ARB_DRAM_TIMING_1, mc_arb_dram_timing); + WREG32(mmMC_ARB_DRAM_TIMING2_1, mc_arb_dram_timing2); + WREG32_P(mmMC_ARB_BURST_TIME, (burst_time << MC_ARB_BURST_TIME__STATE1__SHIFT), + ~MC_ARB_BURST_TIME__STATE1_MASK); + break; + default: + return -EINVAL; + } + + mc_cg_config = RREG32(mmMC_CG_CONFIG) | 0x0000000F; + WREG32(mmMC_CG_CONFIG, mc_cg_config); + WREG32_P(mmMC_ARB_CG, (arb_freq_dest) << MC_ARB_CG__CG_ARB_REQ__SHIFT, + ~MC_ARB_CG__CG_ARB_REQ_MASK); + + return 0; +} + +static u8 ci_get_ddr3_mclk_frequency_ratio(u32 memory_clock) +{ + u8 mc_para_index; + + if (memory_clock < 10000) + mc_para_index = 0; + else if (memory_clock >= 80000) + mc_para_index = 0x0f; + else + mc_para_index = (u8)((memory_clock - 10000) / 5000 + 1); + return mc_para_index; +} + +static u8 ci_get_mclk_frequency_ratio(u32 memory_clock, bool strobe_mode) +{ + u8 mc_para_index; + + if (strobe_mode) { + if (memory_clock < 12500) + mc_para_index = 0x00; + else if (memory_clock > 47500) + mc_para_index = 0x0f; + else + mc_para_index = (u8)((memory_clock - 10000) / 2500); + } else { + if (memory_clock < 65000) + mc_para_index = 0x00; + else if (memory_clock > 135000) + mc_para_index = 0x0f; + else + mc_para_index = (u8)((memory_clock - 60000) / 5000); + } + return mc_para_index; +} + +static void ci_trim_voltage_table_to_fit_state_table(struct amdgpu_device *adev, + u32 max_voltage_steps, + struct atom_voltage_table *voltage_table) +{ + unsigned int i, diff; + + if (voltage_table->count <= max_voltage_steps) + return; + + diff = voltage_table->count - max_voltage_steps; + + for (i = 0; i < max_voltage_steps; i++) + voltage_table->entries[i] = voltage_table->entries[i + diff]; + + voltage_table->count = max_voltage_steps; +} + +static int ci_get_std_voltage_value_sidd(struct amdgpu_device *adev, + struct atom_voltage_table_entry *voltage_table, + u16 *std_voltage_hi_sidd, u16 *std_voltage_lo_sidd); +static int ci_set_power_limit(struct amdgpu_device *adev, u32 n); +static int ci_set_overdrive_target_tdp(struct amdgpu_device *adev, + u32 target_tdp); +static int ci_update_uvd_dpm(struct amdgpu_device *adev, bool gate); +static void ci_dpm_set_dpm_funcs(struct amdgpu_device *adev); +static void ci_dpm_set_irq_funcs(struct amdgpu_device *adev); + +static PPSMC_Result amdgpu_ci_send_msg_to_smc_with_parameter(struct amdgpu_device *adev, + PPSMC_Msg msg, u32 parameter); +static void ci_thermal_start_smc_fan_control(struct amdgpu_device *adev); +static void ci_fan_ctrl_set_default_mode(struct amdgpu_device *adev); + +static struct ci_power_info *ci_get_pi(struct amdgpu_device *adev) +{ + struct ci_power_info *pi = adev->pm.dpm.priv; + + return pi; +} + +static struct ci_ps *ci_get_ps(struct amdgpu_ps *rps) +{ + struct ci_ps *ps = rps->ps_priv; + + return ps; +} + +static void ci_initialize_powertune_defaults(struct amdgpu_device *adev) +{ + struct ci_power_info *pi = ci_get_pi(adev); + + switch (adev->pdev->device) { + case 0x6649: + case 0x6650: + case 0x6651: + case 0x6658: + case 0x665C: + case 0x665D: + default: + pi->powertune_defaults = &defaults_bonaire_xt; + break; + case 0x6640: + case 0x6641: + case 0x6646: + case 0x6647: + pi->powertune_defaults = &defaults_saturn_xt; + break; + case 0x67B8: + case 0x67B0: + pi->powertune_defaults = &defaults_hawaii_xt; + break; + case 0x67BA: + case 0x67B1: + pi->powertune_defaults = &defaults_hawaii_pro; + break; + case 0x67A0: + case 0x67A1: + case 0x67A2: + case 0x67A8: + case 0x67A9: + case 0x67AA: + case 0x67B9: + case 0x67BE: + pi->powertune_defaults = &defaults_bonaire_xt; + break; + } + + pi->dte_tj_offset = 0; + + pi->caps_power_containment = true; + pi->caps_cac = false; + pi->caps_sq_ramping = false; + pi->caps_db_ramping = false; + pi->caps_td_ramping = false; + pi->caps_tcp_ramping = false; + + if (pi->caps_power_containment) { + pi->caps_cac = true; + if (adev->asic_type == CHIP_HAWAII) + pi->enable_bapm_feature = false; + else + pi->enable_bapm_feature = true; + pi->enable_tdc_limit_feature = true; + pi->enable_pkg_pwr_tracking_feature = true; + } +} + +static u8 ci_convert_to_vid(u16 vddc) +{ + return (6200 - (vddc * VOLTAGE_SCALE)) / 25; +} + +static int ci_populate_bapm_vddc_vid_sidd(struct amdgpu_device *adev) +{ + struct ci_power_info *pi = ci_get_pi(adev); + u8 *hi_vid = pi->smc_powertune_table.BapmVddCVidHiSidd; + u8 *lo_vid = pi->smc_powertune_table.BapmVddCVidLoSidd; + u8 *hi2_vid = pi->smc_powertune_table.BapmVddCVidHiSidd2; + u32 i; + + if (adev->pm.dpm.dyn_state.cac_leakage_table.entries == NULL) + return -EINVAL; + if (adev->pm.dpm.dyn_state.cac_leakage_table.count > 8) + return -EINVAL; + if (adev->pm.dpm.dyn_state.cac_leakage_table.count != + adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.count) + return -EINVAL; + + for (i = 0; i < adev->pm.dpm.dyn_state.cac_leakage_table.count; i++) { + if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_EVV) { + lo_vid[i] = ci_convert_to_vid(adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc1); + hi_vid[i] = ci_convert_to_vid(adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc2); + hi2_vid[i] = ci_convert_to_vid(adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc3); + } else { + lo_vid[i] = ci_convert_to_vid(adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc); + hi_vid[i] = ci_convert_to_vid((u16)adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].leakage); + } + } + return 0; +} + +static int ci_populate_vddc_vid(struct amdgpu_device *adev) +{ + struct ci_power_info *pi = ci_get_pi(adev); + u8 *vid = pi->smc_powertune_table.VddCVid; + u32 i; + + if (pi->vddc_voltage_table.count > 8) + return -EINVAL; + + for (i = 0; i < pi->vddc_voltage_table.count; i++) + vid[i] = ci_convert_to_vid(pi->vddc_voltage_table.entries[i].value); + + return 0; +} + +static int ci_populate_svi_load_line(struct amdgpu_device *adev) +{ + struct ci_power_info *pi = ci_get_pi(adev); + const struct ci_pt_defaults *pt_defaults = pi->powertune_defaults; + + pi->smc_powertune_table.SviLoadLineEn = pt_defaults->svi_load_line_en; + pi->smc_powertune_table.SviLoadLineVddC = pt_defaults->svi_load_line_vddc; + pi->smc_powertune_table.SviLoadLineTrimVddC = 3; + pi->smc_powertune_table.SviLoadLineOffsetVddC = 0; + + return 0; +} + +static int ci_populate_tdc_limit(struct amdgpu_device *adev) +{ + struct ci_power_info *pi = ci_get_pi(adev); + const struct ci_pt_defaults *pt_defaults = pi->powertune_defaults; + u16 tdc_limit; + + tdc_limit = adev->pm.dpm.dyn_state.cac_tdp_table->tdc * 256; + pi->smc_powertune_table.TDC_VDDC_PkgLimit = cpu_to_be16(tdc_limit); + pi->smc_powertune_table.TDC_VDDC_ThrottleReleaseLimitPerc = + pt_defaults->tdc_vddc_throttle_release_limit_perc; + pi->smc_powertune_table.TDC_MAWt = pt_defaults->tdc_mawt; + + return 0; +} + +static int ci_populate_dw8(struct amdgpu_device *adev) +{ + struct ci_power_info *pi = ci_get_pi(adev); + const struct ci_pt_defaults *pt_defaults = pi->powertune_defaults; + int ret; + + ret = amdgpu_ci_read_smc_sram_dword(adev, + SMU7_FIRMWARE_HEADER_LOCATION + + offsetof(SMU7_Firmware_Header, PmFuseTable) + + offsetof(SMU7_Discrete_PmFuses, TdcWaterfallCtl), + (u32 *)&pi->smc_powertune_table.TdcWaterfallCtl, + pi->sram_end); + if (ret) + return -EINVAL; + else + pi->smc_powertune_table.TdcWaterfallCtl = pt_defaults->tdc_waterfall_ctl; + + return 0; +} + +static int ci_populate_fuzzy_fan(struct amdgpu_device *adev) +{ + struct ci_power_info *pi = ci_get_pi(adev); + + if ((adev->pm.dpm.fan.fan_output_sensitivity & (1 << 15)) || + (adev->pm.dpm.fan.fan_output_sensitivity == 0)) + adev->pm.dpm.fan.fan_output_sensitivity = + adev->pm.dpm.fan.default_fan_output_sensitivity; + + pi->smc_powertune_table.FuzzyFan_PwmSetDelta = + cpu_to_be16(adev->pm.dpm.fan.fan_output_sensitivity); + + return 0; +} + +static int ci_min_max_v_gnbl_pm_lid_from_bapm_vddc(struct amdgpu_device *adev) +{ + struct ci_power_info *pi = ci_get_pi(adev); + u8 *hi_vid = pi->smc_powertune_table.BapmVddCVidHiSidd; + u8 *lo_vid = pi->smc_powertune_table.BapmVddCVidLoSidd; + int i, min, max; + + min = max = hi_vid[0]; + for (i = 0; i < 8; i++) { + if (0 != hi_vid[i]) { + if (min > hi_vid[i]) + min = hi_vid[i]; + if (max < hi_vid[i]) + max = hi_vid[i]; + } + + if (0 != lo_vid[i]) { + if (min > lo_vid[i]) + min = lo_vid[i]; + if (max < lo_vid[i]) + max = lo_vid[i]; + } + } + + if ((min == 0) || (max == 0)) + return -EINVAL; + pi->smc_powertune_table.GnbLPMLMaxVid = (u8)max; + pi->smc_powertune_table.GnbLPMLMinVid = (u8)min; + + return 0; +} + +static int ci_populate_bapm_vddc_base_leakage_sidd(struct amdgpu_device *adev) +{ + struct ci_power_info *pi = ci_get_pi(adev); + u16 hi_sidd = pi->smc_powertune_table.BapmVddCBaseLeakageHiSidd; + u16 lo_sidd = pi->smc_powertune_table.BapmVddCBaseLeakageLoSidd; + struct amdgpu_cac_tdp_table *cac_tdp_table = + adev->pm.dpm.dyn_state.cac_tdp_table; + + hi_sidd = cac_tdp_table->high_cac_leakage / 100 * 256; + lo_sidd = cac_tdp_table->low_cac_leakage / 100 * 256; + + pi->smc_powertune_table.BapmVddCBaseLeakageHiSidd = cpu_to_be16(hi_sidd); + pi->smc_powertune_table.BapmVddCBaseLeakageLoSidd = cpu_to_be16(lo_sidd); + + return 0; +} + +static int ci_populate_bapm_parameters_in_dpm_table(struct amdgpu_device *adev) +{ + struct ci_power_info *pi = ci_get_pi(adev); + const struct ci_pt_defaults *pt_defaults = pi->powertune_defaults; + SMU7_Discrete_DpmTable *dpm_table = &pi->smc_state_table; + struct amdgpu_cac_tdp_table *cac_tdp_table = + adev->pm.dpm.dyn_state.cac_tdp_table; + struct amdgpu_ppm_table *ppm = adev->pm.dpm.dyn_state.ppm_table; + int i, j, k; + const u16 *def1; + const u16 *def2; + + dpm_table->DefaultTdp = cac_tdp_table->tdp * 256; + dpm_table->TargetTdp = cac_tdp_table->configurable_tdp * 256; + + dpm_table->DTETjOffset = (u8)pi->dte_tj_offset; + dpm_table->GpuTjMax = + (u8)(pi->thermal_temp_setting.temperature_high / 1000); + dpm_table->GpuTjHyst = 8; + + dpm_table->DTEAmbientTempBase = pt_defaults->dte_ambient_temp_base; + + if (ppm) { + dpm_table->PPM_PkgPwrLimit = cpu_to_be16((u16)ppm->dgpu_tdp * 256 / 1000); + dpm_table->PPM_TemperatureLimit = cpu_to_be16((u16)ppm->tj_max * 256); + } else { + dpm_table->PPM_PkgPwrLimit = cpu_to_be16(0); + dpm_table->PPM_TemperatureLimit = cpu_to_be16(0); + } + + dpm_table->BAPM_TEMP_GRADIENT = cpu_to_be32(pt_defaults->bapm_temp_gradient); + def1 = pt_defaults->bapmti_r; + def2 = pt_defaults->bapmti_rc; + + for (i = 0; i < SMU7_DTE_ITERATIONS; i++) { + for (j = 0; j < SMU7_DTE_SOURCES; j++) { + for (k = 0; k < SMU7_DTE_SINKS; k++) { + dpm_table->BAPMTI_R[i][j][k] = cpu_to_be16(*def1); + dpm_table->BAPMTI_RC[i][j][k] = cpu_to_be16(*def2); + def1++; + def2++; + } + } + } + + return 0; +} + +static int ci_populate_pm_base(struct amdgpu_device *adev) +{ + struct ci_power_info *pi = ci_get_pi(adev); + u32 pm_fuse_table_offset; + int ret; + + if (pi->caps_power_containment) { + ret = amdgpu_ci_read_smc_sram_dword(adev, + SMU7_FIRMWARE_HEADER_LOCATION + + offsetof(SMU7_Firmware_Header, PmFuseTable), + &pm_fuse_table_offset, pi->sram_end); + if (ret) + return ret; + ret = ci_populate_bapm_vddc_vid_sidd(adev); + if (ret) + return ret; + ret = ci_populate_vddc_vid(adev); + if (ret) + return ret; + ret = ci_populate_svi_load_line(adev); + if (ret) + return ret; + ret = ci_populate_tdc_limit(adev); + if (ret) + return ret; + ret = ci_populate_dw8(adev); + if (ret) + return ret; + ret = ci_populate_fuzzy_fan(adev); + if (ret) + return ret; + ret = ci_min_max_v_gnbl_pm_lid_from_bapm_vddc(adev); + if (ret) + return ret; + ret = ci_populate_bapm_vddc_base_leakage_sidd(adev); + if (ret) + return ret; + ret = amdgpu_ci_copy_bytes_to_smc(adev, pm_fuse_table_offset, + (u8 *)&pi->smc_powertune_table, + sizeof(SMU7_Discrete_PmFuses), pi->sram_end); + if (ret) + return ret; + } + + return 0; +} + +static void ci_do_enable_didt(struct amdgpu_device *adev, const bool enable) +{ + struct ci_power_info *pi = ci_get_pi(adev); + u32 data; + + if (pi->caps_sq_ramping) { + data = RREG32_DIDT(ixDIDT_SQ_CTRL0); + if (enable) + data |= DIDT_SQ_CTRL0__DIDT_CTRL_EN_MASK; + else + data &= ~DIDT_SQ_CTRL0__DIDT_CTRL_EN_MASK; + WREG32_DIDT(ixDIDT_SQ_CTRL0, data); + } + + if (pi->caps_db_ramping) { + data = RREG32_DIDT(ixDIDT_DB_CTRL0); + if (enable) + data |= DIDT_DB_CTRL0__DIDT_CTRL_EN_MASK; + else + data &= ~DIDT_DB_CTRL0__DIDT_CTRL_EN_MASK; + WREG32_DIDT(ixDIDT_DB_CTRL0, data); + } + + if (pi->caps_td_ramping) { + data = RREG32_DIDT(ixDIDT_TD_CTRL0); + if (enable) + data |= DIDT_TD_CTRL0__DIDT_CTRL_EN_MASK; + else + data &= ~DIDT_TD_CTRL0__DIDT_CTRL_EN_MASK; + WREG32_DIDT(ixDIDT_TD_CTRL0, data); + } + + if (pi->caps_tcp_ramping) { + data = RREG32_DIDT(ixDIDT_TCP_CTRL0); + if (enable) + data |= DIDT_TCP_CTRL0__DIDT_CTRL_EN_MASK; + else + data &= ~DIDT_TCP_CTRL0__DIDT_CTRL_EN_MASK; + WREG32_DIDT(ixDIDT_TCP_CTRL0, data); + } +} + +static int ci_program_pt_config_registers(struct amdgpu_device *adev, + const struct ci_pt_config_reg *cac_config_regs) +{ + const struct ci_pt_config_reg *config_regs = cac_config_regs; + u32 data; + u32 cache = 0; + + if (config_regs == NULL) + return -EINVAL; + + while (config_regs->offset != 0xFFFFFFFF) { + if (config_regs->type == CISLANDS_CONFIGREG_CACHE) { + cache |= ((config_regs->value << config_regs->shift) & config_regs->mask); + } else { + switch (config_regs->type) { + case CISLANDS_CONFIGREG_SMC_IND: + data = RREG32_SMC(config_regs->offset); + break; + case CISLANDS_CONFIGREG_DIDT_IND: + data = RREG32_DIDT(config_regs->offset); + break; + default: + data = RREG32(config_regs->offset); + break; + } + + data &= ~config_regs->mask; + data |= ((config_regs->value << config_regs->shift) & config_regs->mask); + data |= cache; + + switch (config_regs->type) { + case CISLANDS_CONFIGREG_SMC_IND: + WREG32_SMC(config_regs->offset, data); + break; + case CISLANDS_CONFIGREG_DIDT_IND: + WREG32_DIDT(config_regs->offset, data); + break; + default: + WREG32(config_regs->offset, data); + break; + } + cache = 0; + } + config_regs++; + } + return 0; +} + +static int ci_enable_didt(struct amdgpu_device *adev, bool enable) +{ + struct ci_power_info *pi = ci_get_pi(adev); + int ret; + + if (pi->caps_sq_ramping || pi->caps_db_ramping || + pi->caps_td_ramping || pi->caps_tcp_ramping) { + gfx_v7_0_enter_rlc_safe_mode(adev); + + if (enable) { + ret = ci_program_pt_config_registers(adev, didt_config_ci); + if (ret) { + gfx_v7_0_exit_rlc_safe_mode(adev); + return ret; + } + } + + ci_do_enable_didt(adev, enable); + + gfx_v7_0_exit_rlc_safe_mode(adev); + } + + return 0; +} + +static int ci_enable_power_containment(struct amdgpu_device *adev, bool enable) +{ + struct ci_power_info *pi = ci_get_pi(adev); + PPSMC_Result smc_result; + int ret = 0; + + if (enable) { + pi->power_containment_features = 0; + if (pi->caps_power_containment) { + if (pi->enable_bapm_feature) { + smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_EnableDTE); + if (smc_result != PPSMC_Result_OK) + ret = -EINVAL; + else + pi->power_containment_features |= POWERCONTAINMENT_FEATURE_BAPM; + } + + if (pi->enable_tdc_limit_feature) { + smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_TDCLimitEnable); + if (smc_result != PPSMC_Result_OK) + ret = -EINVAL; + else + pi->power_containment_features |= POWERCONTAINMENT_FEATURE_TDCLimit; + } + + if (pi->enable_pkg_pwr_tracking_feature) { + smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_PkgPwrLimitEnable); + if (smc_result != PPSMC_Result_OK) { + ret = -EINVAL; + } else { + struct amdgpu_cac_tdp_table *cac_tdp_table = + adev->pm.dpm.dyn_state.cac_tdp_table; + u32 default_pwr_limit = + (u32)(cac_tdp_table->maximum_power_delivery_limit * 256); + + pi->power_containment_features |= POWERCONTAINMENT_FEATURE_PkgPwrLimit; + + ci_set_power_limit(adev, default_pwr_limit); + } + } + } + } else { + if (pi->caps_power_containment && pi->power_containment_features) { + if (pi->power_containment_features & POWERCONTAINMENT_FEATURE_TDCLimit) + amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_TDCLimitDisable); + + if (pi->power_containment_features & POWERCONTAINMENT_FEATURE_BAPM) + amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_DisableDTE); + + if (pi->power_containment_features & POWERCONTAINMENT_FEATURE_PkgPwrLimit) + amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_PkgPwrLimitDisable); + pi->power_containment_features = 0; + } + } + + return ret; +} + +static int ci_enable_smc_cac(struct amdgpu_device *adev, bool enable) +{ + struct ci_power_info *pi = ci_get_pi(adev); + PPSMC_Result smc_result; + int ret = 0; + + if (pi->caps_cac) { + if (enable) { + smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_EnableCac); + if (smc_result != PPSMC_Result_OK) { + ret = -EINVAL; + pi->cac_enabled = false; + } else { + pi->cac_enabled = true; + } + } else if (pi->cac_enabled) { + amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_DisableCac); + pi->cac_enabled = false; + } + } + + return ret; +} + +static int ci_enable_thermal_based_sclk_dpm(struct amdgpu_device *adev, + bool enable) +{ + struct ci_power_info *pi = ci_get_pi(adev); + PPSMC_Result smc_result = PPSMC_Result_OK; + + if (pi->thermal_sclk_dpm_enabled) { + if (enable) + smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_ENABLE_THERMAL_DPM); + else + smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_DISABLE_THERMAL_DPM); + } + + if (smc_result == PPSMC_Result_OK) + return 0; + else + return -EINVAL; +} + +static int ci_power_control_set_level(struct amdgpu_device *adev) +{ + struct ci_power_info *pi = ci_get_pi(adev); + struct amdgpu_cac_tdp_table *cac_tdp_table = + adev->pm.dpm.dyn_state.cac_tdp_table; + s32 adjust_percent; + s32 target_tdp; + int ret = 0; + bool adjust_polarity = false; /* ??? */ + + if (pi->caps_power_containment) { + adjust_percent = adjust_polarity ? + adev->pm.dpm.tdp_adjustment : (-1 * adev->pm.dpm.tdp_adjustment); + target_tdp = ((100 + adjust_percent) * + (s32)cac_tdp_table->configurable_tdp) / 100; + + ret = ci_set_overdrive_target_tdp(adev, (u32)target_tdp); + } + + return ret; +} + +static void ci_dpm_powergate_uvd(struct amdgpu_device *adev, bool gate) +{ + struct ci_power_info *pi = ci_get_pi(adev); + + if (pi->uvd_power_gated == gate) + return; + + pi->uvd_power_gated = gate; + + ci_update_uvd_dpm(adev, gate); +} + +static bool ci_dpm_vblank_too_short(struct amdgpu_device *adev) +{ + u32 vblank_time = amdgpu_dpm_get_vblank_time(adev); + u32 switch_limit = adev->mc.is_gddr5 ? 450 : 300; + + if (vblank_time < switch_limit) + return true; + else + return false; + +} + +static void ci_apply_state_adjust_rules(struct amdgpu_device *adev, + struct amdgpu_ps *rps) +{ + struct ci_ps *ps = ci_get_ps(rps); + struct ci_power_info *pi = ci_get_pi(adev); + struct amdgpu_clock_and_voltage_limits *max_limits; + bool disable_mclk_switching; + u32 sclk, mclk; + int i; + + if (rps->vce_active) { + rps->evclk = adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].evclk; + rps->ecclk = adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].ecclk; + } else { + rps->evclk = 0; + rps->ecclk = 0; + } + + if ((adev->pm.dpm.new_active_crtc_count > 1) || + ci_dpm_vblank_too_short(adev)) + disable_mclk_switching = true; + else + disable_mclk_switching = false; + + if ((rps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) == ATOM_PPLIB_CLASSIFICATION_UI_BATTERY) + pi->battery_state = true; + else + pi->battery_state = false; + + if (adev->pm.dpm.ac_power) + max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_ac; + else + max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_dc; + + if (adev->pm.dpm.ac_power == false) { + for (i = 0; i < ps->performance_level_count; i++) { + if (ps->performance_levels[i].mclk > max_limits->mclk) + ps->performance_levels[i].mclk = max_limits->mclk; + if (ps->performance_levels[i].sclk > max_limits->sclk) + ps->performance_levels[i].sclk = max_limits->sclk; + } + } + + /* XXX validate the min clocks required for display */ + + if (disable_mclk_switching) { + mclk = ps->performance_levels[ps->performance_level_count - 1].mclk; + sclk = ps->performance_levels[0].sclk; + } else { + mclk = ps->performance_levels[0].mclk; + sclk = ps->performance_levels[0].sclk; + } + + if (rps->vce_active) { + if (sclk < adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].sclk) + sclk = adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].sclk; + if (mclk < adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].mclk) + mclk = adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].mclk; + } + + ps->performance_levels[0].sclk = sclk; + ps->performance_levels[0].mclk = mclk; + + if (ps->performance_levels[1].sclk < ps->performance_levels[0].sclk) + ps->performance_levels[1].sclk = ps->performance_levels[0].sclk; + + if (disable_mclk_switching) { + if (ps->performance_levels[0].mclk < ps->performance_levels[1].mclk) + ps->performance_levels[0].mclk = ps->performance_levels[1].mclk; + } else { + if (ps->performance_levels[1].mclk < ps->performance_levels[0].mclk) + ps->performance_levels[1].mclk = ps->performance_levels[0].mclk; + } +} + +static int ci_thermal_set_temperature_range(struct amdgpu_device *adev, + int min_temp, int max_temp) +{ + int low_temp = 0 * 1000; + int high_temp = 255 * 1000; + u32 tmp; + + if (low_temp < min_temp) + low_temp = min_temp; + if (high_temp > max_temp) + high_temp = max_temp; + if (high_temp < low_temp) { + DRM_ERROR("invalid thermal range: %d - %d\n", low_temp, high_temp); + return -EINVAL; + } + + tmp = RREG32_SMC(ixCG_THERMAL_INT); + tmp &= ~(CG_THERMAL_INT__DIG_THERM_INTH_MASK | CG_THERMAL_INT__DIG_THERM_INTL_MASK); + tmp |= ((high_temp / 1000) << CG_THERMAL_INT__DIG_THERM_INTH__SHIFT) | + ((low_temp / 1000)) << CG_THERMAL_INT__DIG_THERM_INTL__SHIFT; + WREG32_SMC(ixCG_THERMAL_INT, tmp); + +#if 0 + /* XXX: need to figure out how to handle this properly */ + tmp = RREG32_SMC(ixCG_THERMAL_CTRL); + tmp &= DIG_THERM_DPM_MASK; + tmp |= DIG_THERM_DPM(high_temp / 1000); + WREG32_SMC(ixCG_THERMAL_CTRL, tmp); +#endif + + adev->pm.dpm.thermal.min_temp = low_temp; + adev->pm.dpm.thermal.max_temp = high_temp; + return 0; +} + +static int ci_thermal_enable_alert(struct amdgpu_device *adev, + bool enable) +{ + u32 thermal_int = RREG32_SMC(ixCG_THERMAL_INT); + PPSMC_Result result; + + if (enable) { + thermal_int &= ~(CG_THERMAL_INT_CTRL__THERM_INTH_MASK_MASK | + CG_THERMAL_INT_CTRL__THERM_INTL_MASK_MASK); + WREG32_SMC(ixCG_THERMAL_INT, thermal_int); + result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_Thermal_Cntl_Enable); + if (result != PPSMC_Result_OK) { + DRM_DEBUG_KMS("Could not enable thermal interrupts.\n"); + return -EINVAL; + } + } else { + thermal_int |= CG_THERMAL_INT_CTRL__THERM_INTH_MASK_MASK | + CG_THERMAL_INT_CTRL__THERM_INTL_MASK_MASK; + WREG32_SMC(ixCG_THERMAL_INT, thermal_int); + result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_Thermal_Cntl_Disable); + if (result != PPSMC_Result_OK) { + DRM_DEBUG_KMS("Could not disable thermal interrupts.\n"); + return -EINVAL; + } + } + + return 0; +} + +static void ci_fan_ctrl_set_static_mode(struct amdgpu_device *adev, u32 mode) +{ + struct ci_power_info *pi = ci_get_pi(adev); + u32 tmp; + + if (pi->fan_ctrl_is_in_default_mode) { + tmp = (RREG32_SMC(ixCG_FDO_CTRL2) & CG_FDO_CTRL2__FDO_PWM_MODE_MASK) + >> CG_FDO_CTRL2__FDO_PWM_MODE__SHIFT; + pi->fan_ctrl_default_mode = tmp; + tmp = (RREG32_SMC(ixCG_FDO_CTRL2) & CG_FDO_CTRL2__TMIN_MASK) + >> CG_FDO_CTRL2__TMIN__SHIFT; + pi->t_min = tmp; + pi->fan_ctrl_is_in_default_mode = false; + } + + tmp = RREG32_SMC(ixCG_FDO_CTRL2) & ~CG_FDO_CTRL2__TMIN_MASK; + tmp |= 0 << CG_FDO_CTRL2__TMIN__SHIFT; + WREG32_SMC(ixCG_FDO_CTRL2, tmp); + + tmp = RREG32_SMC(ixCG_FDO_CTRL2) & ~CG_FDO_CTRL2__FDO_PWM_MODE_MASK; + tmp |= mode << CG_FDO_CTRL2__FDO_PWM_MODE__SHIFT; + WREG32_SMC(ixCG_FDO_CTRL2, tmp); +} + +static int ci_thermal_setup_fan_table(struct amdgpu_device *adev) +{ + struct ci_power_info *pi = ci_get_pi(adev); + SMU7_Discrete_FanTable fan_table = { FDO_MODE_HARDWARE }; + u32 duty100; + u32 t_diff1, t_diff2, pwm_diff1, pwm_diff2; + u16 fdo_min, slope1, slope2; + u32 reference_clock, tmp; + int ret; + u64 tmp64; + + if (!pi->fan_table_start) { + adev->pm.dpm.fan.ucode_fan_control = false; + return 0; + } + + duty100 = (RREG32_SMC(ixCG_FDO_CTRL1) & CG_FDO_CTRL1__FMAX_DUTY100_MASK) + >> CG_FDO_CTRL1__FMAX_DUTY100__SHIFT; + + if (duty100 == 0) { + adev->pm.dpm.fan.ucode_fan_control = false; + return 0; + } + + tmp64 = (u64)adev->pm.dpm.fan.pwm_min * duty100; + do_div(tmp64, 10000); + fdo_min = (u16)tmp64; + + t_diff1 = adev->pm.dpm.fan.t_med - adev->pm.dpm.fan.t_min; + t_diff2 = adev->pm.dpm.fan.t_high - adev->pm.dpm.fan.t_med; + + pwm_diff1 = adev->pm.dpm.fan.pwm_med - adev->pm.dpm.fan.pwm_min; + pwm_diff2 = adev->pm.dpm.fan.pwm_high - adev->pm.dpm.fan.pwm_med; + + slope1 = (u16)((50 + ((16 * duty100 * pwm_diff1) / t_diff1)) / 100); + slope2 = (u16)((50 + ((16 * duty100 * pwm_diff2) / t_diff2)) / 100); + + fan_table.TempMin = cpu_to_be16((50 + adev->pm.dpm.fan.t_min) / 100); + fan_table.TempMed = cpu_to_be16((50 + adev->pm.dpm.fan.t_med) / 100); + fan_table.TempMax = cpu_to_be16((50 + adev->pm.dpm.fan.t_max) / 100); + + fan_table.Slope1 = cpu_to_be16(slope1); + fan_table.Slope2 = cpu_to_be16(slope2); + + fan_table.FdoMin = cpu_to_be16(fdo_min); + + fan_table.HystDown = cpu_to_be16(adev->pm.dpm.fan.t_hyst); + + fan_table.HystUp = cpu_to_be16(1); + + fan_table.HystSlope = cpu_to_be16(1); + + fan_table.TempRespLim = cpu_to_be16(5); + + reference_clock = amdgpu_asic_get_xclk(adev); + + fan_table.RefreshPeriod = cpu_to_be32((adev->pm.dpm.fan.cycle_delay * + reference_clock) / 1600); + + fan_table.FdoMax = cpu_to_be16((u16)duty100); + + tmp = (RREG32_SMC(ixCG_MULT_THERMAL_CTRL) & CG_MULT_THERMAL_CTRL__TEMP_SEL_MASK) + >> CG_MULT_THERMAL_CTRL__TEMP_SEL__SHIFT; + fan_table.TempSrc = (uint8_t)tmp; + + ret = amdgpu_ci_copy_bytes_to_smc(adev, + pi->fan_table_start, + (u8 *)(&fan_table), + sizeof(fan_table), + pi->sram_end); + + if (ret) { + DRM_ERROR("Failed to load fan table to the SMC."); + adev->pm.dpm.fan.ucode_fan_control = false; + } + + return 0; +} + +static int ci_fan_ctrl_start_smc_fan_control(struct amdgpu_device *adev) +{ + struct ci_power_info *pi = ci_get_pi(adev); + PPSMC_Result ret; + + if (pi->caps_od_fuzzy_fan_control_support) { + ret = amdgpu_ci_send_msg_to_smc_with_parameter(adev, + PPSMC_StartFanControl, + FAN_CONTROL_FUZZY); + if (ret != PPSMC_Result_OK) + return -EINVAL; + ret = amdgpu_ci_send_msg_to_smc_with_parameter(adev, + PPSMC_MSG_SetFanPwmMax, + adev->pm.dpm.fan.default_max_fan_pwm); + if (ret != PPSMC_Result_OK) + return -EINVAL; + } else { + ret = amdgpu_ci_send_msg_to_smc_with_parameter(adev, + PPSMC_StartFanControl, + FAN_CONTROL_TABLE); + if (ret != PPSMC_Result_OK) + return -EINVAL; + } + + pi->fan_is_controlled_by_smc = true; + return 0; +} + + +static int ci_fan_ctrl_stop_smc_fan_control(struct amdgpu_device *adev) +{ + PPSMC_Result ret; + struct ci_power_info *pi = ci_get_pi(adev); + + ret = amdgpu_ci_send_msg_to_smc(adev, PPSMC_StopFanControl); + if (ret == PPSMC_Result_OK) { + pi->fan_is_controlled_by_smc = false; + return 0; + } else { + return -EINVAL; + } +} + +static int ci_dpm_get_fan_speed_percent(struct amdgpu_device *adev, + u32 *speed) +{ + u32 duty, duty100; + u64 tmp64; + + if (adev->pm.no_fan) + return -ENOENT; + + duty100 = (RREG32_SMC(ixCG_FDO_CTRL1) & CG_FDO_CTRL1__FMAX_DUTY100_MASK) + >> CG_FDO_CTRL1__FMAX_DUTY100__SHIFT; + duty = (RREG32_SMC(ixCG_THERMAL_STATUS) & CG_THERMAL_STATUS__FDO_PWM_DUTY_MASK) + >> CG_THERMAL_STATUS__FDO_PWM_DUTY__SHIFT; + + if (duty100 == 0) + return -EINVAL; + + tmp64 = (u64)duty * 100; + do_div(tmp64, duty100); + *speed = (u32)tmp64; + + if (*speed > 100) + *speed = 100; + + return 0; +} + +static int ci_dpm_set_fan_speed_percent(struct amdgpu_device *adev, + u32 speed) +{ + u32 tmp; + u32 duty, duty100; + u64 tmp64; + struct ci_power_info *pi = ci_get_pi(adev); + + if (adev->pm.no_fan) + return -ENOENT; + + if (pi->fan_is_controlled_by_smc) + return -EINVAL; + + if (speed > 100) + return -EINVAL; + + duty100 = (RREG32_SMC(ixCG_FDO_CTRL1) & CG_FDO_CTRL1__FMAX_DUTY100_MASK) + >> CG_FDO_CTRL1__FMAX_DUTY100__SHIFT; + + if (duty100 == 0) + return -EINVAL; + + tmp64 = (u64)speed * duty100; + do_div(tmp64, 100); + duty = (u32)tmp64; + + tmp = RREG32_SMC(ixCG_FDO_CTRL0) & ~CG_FDO_CTRL0__FDO_STATIC_DUTY_MASK; + tmp |= duty << CG_FDO_CTRL0__FDO_STATIC_DUTY__SHIFT; + WREG32_SMC(ixCG_FDO_CTRL0, tmp); + + return 0; +} + +static void ci_dpm_set_fan_control_mode(struct amdgpu_device *adev, u32 mode) +{ + if (mode) { + /* stop auto-manage */ + if (adev->pm.dpm.fan.ucode_fan_control) + ci_fan_ctrl_stop_smc_fan_control(adev); + ci_fan_ctrl_set_static_mode(adev, mode); + } else { + /* restart auto-manage */ + if (adev->pm.dpm.fan.ucode_fan_control) + ci_thermal_start_smc_fan_control(adev); + else + ci_fan_ctrl_set_default_mode(adev); + } +} + +static u32 ci_dpm_get_fan_control_mode(struct amdgpu_device *adev) +{ + struct ci_power_info *pi = ci_get_pi(adev); + u32 tmp; + + if (pi->fan_is_controlled_by_smc) + return 0; + + tmp = RREG32_SMC(ixCG_FDO_CTRL2) & CG_FDO_CTRL2__FDO_PWM_MODE_MASK; + return (tmp >> CG_FDO_CTRL2__FDO_PWM_MODE__SHIFT); +} + +#if 0 +static int ci_fan_ctrl_get_fan_speed_rpm(struct amdgpu_device *adev, + u32 *speed) +{ + u32 tach_period; + u32 xclk = amdgpu_asic_get_xclk(adev); + + if (adev->pm.no_fan) + return -ENOENT; + + if (adev->pm.fan_pulses_per_revolution == 0) + return -ENOENT; + + tach_period = (RREG32_SMC(ixCG_TACH_STATUS) & CG_TACH_STATUS__TACH_PERIOD_MASK) + >> CG_TACH_STATUS__TACH_PERIOD__SHIFT; + if (tach_period == 0) + return -ENOENT; + + *speed = 60 * xclk * 10000 / tach_period; + + return 0; +} + +static int ci_fan_ctrl_set_fan_speed_rpm(struct amdgpu_device *adev, + u32 speed) +{ + u32 tach_period, tmp; + u32 xclk = amdgpu_asic_get_xclk(adev); + + if (adev->pm.no_fan) + return -ENOENT; + + if (adev->pm.fan_pulses_per_revolution == 0) + return -ENOENT; + + if ((speed < adev->pm.fan_min_rpm) || + (speed > adev->pm.fan_max_rpm)) + return -EINVAL; + + if (adev->pm.dpm.fan.ucode_fan_control) + ci_fan_ctrl_stop_smc_fan_control(adev); + + tach_period = 60 * xclk * 10000 / (8 * speed); + tmp = RREG32_SMC(ixCG_TACH_CTRL) & ~CG_TACH_CTRL__TARGET_PERIOD_MASK; + tmp |= tach_period << CG_TACH_CTRL__TARGET_PERIOD__SHIFT; + WREG32_SMC(CG_TACH_CTRL, tmp); + + ci_fan_ctrl_set_static_mode(adev, FDO_PWM_MODE_STATIC_RPM); + + return 0; +} +#endif + +static void ci_fan_ctrl_set_default_mode(struct amdgpu_device *adev) +{ + struct ci_power_info *pi = ci_get_pi(adev); + u32 tmp; + + if (!pi->fan_ctrl_is_in_default_mode) { + tmp = RREG32_SMC(ixCG_FDO_CTRL2) & ~CG_FDO_CTRL2__FDO_PWM_MODE_MASK; + tmp |= pi->fan_ctrl_default_mode << CG_FDO_CTRL2__FDO_PWM_MODE__SHIFT; + WREG32_SMC(ixCG_FDO_CTRL2, tmp); + + tmp = RREG32_SMC(ixCG_FDO_CTRL2) & ~CG_FDO_CTRL2__TMIN_MASK; + tmp |= pi->t_min << CG_FDO_CTRL2__TMIN__SHIFT; + WREG32_SMC(ixCG_FDO_CTRL2, tmp); + pi->fan_ctrl_is_in_default_mode = true; + } +} + +static void ci_thermal_start_smc_fan_control(struct amdgpu_device *adev) +{ + if (adev->pm.dpm.fan.ucode_fan_control) { + ci_fan_ctrl_start_smc_fan_control(adev); + ci_fan_ctrl_set_static_mode(adev, FDO_PWM_MODE_STATIC); + } +} + +static void ci_thermal_initialize(struct amdgpu_device *adev) +{ + u32 tmp; + + if (adev->pm.fan_pulses_per_revolution) { + tmp = RREG32_SMC(ixCG_TACH_CTRL) & ~CG_TACH_CTRL__EDGE_PER_REV_MASK; + tmp |= (adev->pm.fan_pulses_per_revolution - 1) + << CG_TACH_CTRL__EDGE_PER_REV__SHIFT; + WREG32_SMC(ixCG_TACH_CTRL, tmp); + } + + tmp = RREG32_SMC(ixCG_FDO_CTRL2) & ~CG_FDO_CTRL2__TACH_PWM_RESP_RATE_MASK; + tmp |= 0x28 << CG_FDO_CTRL2__TACH_PWM_RESP_RATE__SHIFT; + WREG32_SMC(ixCG_FDO_CTRL2, tmp); +} + +static int ci_thermal_start_thermal_controller(struct amdgpu_device *adev) +{ + int ret; + + ci_thermal_initialize(adev); + ret = ci_thermal_set_temperature_range(adev, CISLANDS_TEMP_RANGE_MIN, CISLANDS_TEMP_RANGE_MAX); + if (ret) + return ret; + ret = ci_thermal_enable_alert(adev, true); + if (ret) + return ret; + if (adev->pm.dpm.fan.ucode_fan_control) { + ret = ci_thermal_setup_fan_table(adev); + if (ret) + return ret; + ci_thermal_start_smc_fan_control(adev); + } + + return 0; +} + +static void ci_thermal_stop_thermal_controller(struct amdgpu_device *adev) +{ + if (!adev->pm.no_fan) + ci_fan_ctrl_set_default_mode(adev); +} + +#if 0 +static int ci_read_smc_soft_register(struct amdgpu_device *adev, + u16 reg_offset, u32 *value) +{ + struct ci_power_info *pi = ci_get_pi(adev); + + return amdgpu_ci_read_smc_sram_dword(adev, + pi->soft_regs_start + reg_offset, + value, pi->sram_end); +} +#endif + +static int ci_write_smc_soft_register(struct amdgpu_device *adev, + u16 reg_offset, u32 value) +{ + struct ci_power_info *pi = ci_get_pi(adev); + + return amdgpu_ci_write_smc_sram_dword(adev, + pi->soft_regs_start + reg_offset, + value, pi->sram_end); +} + +static void ci_init_fps_limits(struct amdgpu_device *adev) +{ + struct ci_power_info *pi = ci_get_pi(adev); + SMU7_Discrete_DpmTable *table = &pi->smc_state_table; + + if (pi->caps_fps) { + u16 tmp; + + tmp = 45; + table->FpsHighT = cpu_to_be16(tmp); + + tmp = 30; + table->FpsLowT = cpu_to_be16(tmp); + } +} + +static int ci_update_sclk_t(struct amdgpu_device *adev) +{ + struct ci_power_info *pi = ci_get_pi(adev); + int ret = 0; + u32 low_sclk_interrupt_t = 0; + + if (pi->caps_sclk_throttle_low_notification) { + low_sclk_interrupt_t = cpu_to_be32(pi->low_sclk_interrupt_t); + + ret = amdgpu_ci_copy_bytes_to_smc(adev, + pi->dpm_table_start + + offsetof(SMU7_Discrete_DpmTable, LowSclkInterruptT), + (u8 *)&low_sclk_interrupt_t, + sizeof(u32), pi->sram_end); + + } + + return ret; +} + +static void ci_get_leakage_voltages(struct amdgpu_device *adev) +{ + struct ci_power_info *pi = ci_get_pi(adev); + u16 leakage_id, virtual_voltage_id; + u16 vddc, vddci; + int i; + + pi->vddc_leakage.count = 0; + pi->vddci_leakage.count = 0; + + if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_EVV) { + for (i = 0; i < CISLANDS_MAX_LEAKAGE_COUNT; i++) { + virtual_voltage_id = ATOM_VIRTUAL_VOLTAGE_ID0 + i; + if (amdgpu_atombios_get_voltage_evv(adev, virtual_voltage_id, &vddc) != 0) + continue; + if (vddc != 0 && vddc != virtual_voltage_id) { + pi->vddc_leakage.actual_voltage[pi->vddc_leakage.count] = vddc; + pi->vddc_leakage.leakage_id[pi->vddc_leakage.count] = virtual_voltage_id; + pi->vddc_leakage.count++; + } + } + } else if (amdgpu_atombios_get_leakage_id_from_vbios(adev, &leakage_id) == 0) { + for (i = 0; i < CISLANDS_MAX_LEAKAGE_COUNT; i++) { + virtual_voltage_id = ATOM_VIRTUAL_VOLTAGE_ID0 + i; + if (amdgpu_atombios_get_leakage_vddc_based_on_leakage_params(adev, &vddc, &vddci, + virtual_voltage_id, + leakage_id) == 0) { + if (vddc != 0 && vddc != virtual_voltage_id) { + pi->vddc_leakage.actual_voltage[pi->vddc_leakage.count] = vddc; + pi->vddc_leakage.leakage_id[pi->vddc_leakage.count] = virtual_voltage_id; + pi->vddc_leakage.count++; + } + if (vddci != 0 && vddci != virtual_voltage_id) { + pi->vddci_leakage.actual_voltage[pi->vddci_leakage.count] = vddci; + pi->vddci_leakage.leakage_id[pi->vddci_leakage.count] = virtual_voltage_id; + pi->vddci_leakage.count++; + } + } + } + } +} + +static void ci_set_dpm_event_sources(struct amdgpu_device *adev, u32 sources) +{ + struct ci_power_info *pi = ci_get_pi(adev); + bool want_thermal_protection; + enum amdgpu_dpm_event_src dpm_event_src; + u32 tmp; + + switch (sources) { + case 0: + default: + want_thermal_protection = false; + break; + case (1 << AMDGPU_DPM_AUTO_THROTTLE_SRC_THERMAL): + want_thermal_protection = true; + dpm_event_src = AMDGPU_DPM_EVENT_SRC_DIGITAL; + break; + case (1 << AMDGPU_DPM_AUTO_THROTTLE_SRC_EXTERNAL): + want_thermal_protection = true; + dpm_event_src = AMDGPU_DPM_EVENT_SRC_EXTERNAL; + break; + case ((1 << AMDGPU_DPM_AUTO_THROTTLE_SRC_EXTERNAL) | + (1 << AMDGPU_DPM_AUTO_THROTTLE_SRC_THERMAL)): + want_thermal_protection = true; + dpm_event_src = AMDGPU_DPM_EVENT_SRC_DIGIAL_OR_EXTERNAL; + break; + } + + if (want_thermal_protection) { +#if 0 + /* XXX: need to figure out how to handle this properly */ + tmp = RREG32_SMC(ixCG_THERMAL_CTRL); + tmp &= DPM_EVENT_SRC_MASK; + tmp |= DPM_EVENT_SRC(dpm_event_src); + WREG32_SMC(ixCG_THERMAL_CTRL, tmp); +#endif + + tmp = RREG32_SMC(ixGENERAL_PWRMGT); + if (pi->thermal_protection) + tmp &= ~GENERAL_PWRMGT__THERMAL_PROTECTION_DIS_MASK; + else + tmp |= GENERAL_PWRMGT__THERMAL_PROTECTION_DIS_MASK; + WREG32_SMC(ixGENERAL_PWRMGT, tmp); + } else { + tmp = RREG32_SMC(ixGENERAL_PWRMGT); + tmp |= GENERAL_PWRMGT__THERMAL_PROTECTION_DIS_MASK; + WREG32_SMC(ixGENERAL_PWRMGT, tmp); + } +} + +static void ci_enable_auto_throttle_source(struct amdgpu_device *adev, + enum amdgpu_dpm_auto_throttle_src source, + bool enable) +{ + struct ci_power_info *pi = ci_get_pi(adev); + + if (enable) { + if (!(pi->active_auto_throttle_sources & (1 << source))) { + pi->active_auto_throttle_sources |= 1 << source; + ci_set_dpm_event_sources(adev, pi->active_auto_throttle_sources); + } + } else { + if (pi->active_auto_throttle_sources & (1 << source)) { + pi->active_auto_throttle_sources &= ~(1 << source); + ci_set_dpm_event_sources(adev, pi->active_auto_throttle_sources); + } + } +} + +static void ci_enable_vr_hot_gpio_interrupt(struct amdgpu_device *adev) +{ + if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_REGULATOR_HOT) + amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_EnableVRHotGPIOInterrupt); +} + +static int ci_unfreeze_sclk_mclk_dpm(struct amdgpu_device *adev) +{ + struct ci_power_info *pi = ci_get_pi(adev); + PPSMC_Result smc_result; + + if (!pi->need_update_smu7_dpm_table) + return 0; + + if ((!pi->sclk_dpm_key_disabled) && + (pi->need_update_smu7_dpm_table & (DPMTABLE_OD_UPDATE_SCLK | DPMTABLE_UPDATE_SCLK))) { + smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_SCLKDPM_UnfreezeLevel); + if (smc_result != PPSMC_Result_OK) + return -EINVAL; + } + + if ((!pi->mclk_dpm_key_disabled) && + (pi->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK)) { + smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_MCLKDPM_UnfreezeLevel); + if (smc_result != PPSMC_Result_OK) + return -EINVAL; + } + + pi->need_update_smu7_dpm_table = 0; + return 0; +} + +static int ci_enable_sclk_mclk_dpm(struct amdgpu_device *adev, bool enable) +{ + struct ci_power_info *pi = ci_get_pi(adev); + PPSMC_Result smc_result; + + if (enable) { + if (!pi->sclk_dpm_key_disabled) { + smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_DPM_Enable); + if (smc_result != PPSMC_Result_OK) + return -EINVAL; + } + + if (!pi->mclk_dpm_key_disabled) { + smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_MCLKDPM_Enable); + if (smc_result != PPSMC_Result_OK) + return -EINVAL; + + WREG32_P(mmMC_SEQ_CNTL_3, MC_SEQ_CNTL_3__CAC_EN_MASK, + ~MC_SEQ_CNTL_3__CAC_EN_MASK); + + WREG32_SMC(ixLCAC_MC0_CNTL, 0x05); + WREG32_SMC(ixLCAC_MC1_CNTL, 0x05); + WREG32_SMC(ixLCAC_CPL_CNTL, 0x100005); + + udelay(10); + + WREG32_SMC(ixLCAC_MC0_CNTL, 0x400005); + WREG32_SMC(ixLCAC_MC1_CNTL, 0x400005); + WREG32_SMC(ixLCAC_CPL_CNTL, 0x500005); + } + } else { + if (!pi->sclk_dpm_key_disabled) { + smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_DPM_Disable); + if (smc_result != PPSMC_Result_OK) + return -EINVAL; + } + + if (!pi->mclk_dpm_key_disabled) { + smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_MCLKDPM_Disable); + if (smc_result != PPSMC_Result_OK) + return -EINVAL; + } + } + + return 0; +} + +static int ci_start_dpm(struct amdgpu_device *adev) +{ + struct ci_power_info *pi = ci_get_pi(adev); + PPSMC_Result smc_result; + int ret; + u32 tmp; + + tmp = RREG32_SMC(ixGENERAL_PWRMGT); + tmp |= GENERAL_PWRMGT__GLOBAL_PWRMGT_EN_MASK; + WREG32_SMC(ixGENERAL_PWRMGT, tmp); + + tmp = RREG32_SMC(ixSCLK_PWRMGT_CNTL); + tmp |= SCLK_PWRMGT_CNTL__DYNAMIC_PM_EN_MASK; + WREG32_SMC(ixSCLK_PWRMGT_CNTL, tmp); + + ci_write_smc_soft_register(adev, offsetof(SMU7_SoftRegisters, VoltageChangeTimeout), 0x1000); + + WREG32_P(mmBIF_LNCNT_RESET, 0, ~BIF_LNCNT_RESET__RESET_LNCNT_EN_MASK); + + smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_Voltage_Cntl_Enable); + if (smc_result != PPSMC_Result_OK) + return -EINVAL; + + ret = ci_enable_sclk_mclk_dpm(adev, true); + if (ret) + return ret; + + if (!pi->pcie_dpm_key_disabled) { + smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_PCIeDPM_Enable); + if (smc_result != PPSMC_Result_OK) + return -EINVAL; + } + + return 0; +} + +static int ci_freeze_sclk_mclk_dpm(struct amdgpu_device *adev) +{ + struct ci_power_info *pi = ci_get_pi(adev); + PPSMC_Result smc_result; + + if (!pi->need_update_smu7_dpm_table) + return 0; + + if ((!pi->sclk_dpm_key_disabled) && + (pi->need_update_smu7_dpm_table & (DPMTABLE_OD_UPDATE_SCLK | DPMTABLE_UPDATE_SCLK))) { + smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_SCLKDPM_FreezeLevel); + if (smc_result != PPSMC_Result_OK) + return -EINVAL; + } + + if ((!pi->mclk_dpm_key_disabled) && + (pi->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK)) { + smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_MCLKDPM_FreezeLevel); + if (smc_result != PPSMC_Result_OK) + return -EINVAL; + } + + return 0; +} + +static int ci_stop_dpm(struct amdgpu_device *adev) +{ + struct ci_power_info *pi = ci_get_pi(adev); + PPSMC_Result smc_result; + int ret; + u32 tmp; + + tmp = RREG32_SMC(ixGENERAL_PWRMGT); + tmp &= ~GENERAL_PWRMGT__GLOBAL_PWRMGT_EN_MASK; + WREG32_SMC(ixGENERAL_PWRMGT, tmp); + + tmp = RREG32_SMC(ixSCLK_PWRMGT_CNTL); + tmp &= ~SCLK_PWRMGT_CNTL__DYNAMIC_PM_EN_MASK; + WREG32_SMC(ixSCLK_PWRMGT_CNTL, tmp); + + if (!pi->pcie_dpm_key_disabled) { + smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_PCIeDPM_Disable); + if (smc_result != PPSMC_Result_OK) + return -EINVAL; + } + + ret = ci_enable_sclk_mclk_dpm(adev, false); + if (ret) + return ret; + + smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_Voltage_Cntl_Disable); + if (smc_result != PPSMC_Result_OK) + return -EINVAL; + + return 0; +} + +static void ci_enable_sclk_control(struct amdgpu_device *adev, bool enable) +{ + u32 tmp = RREG32_SMC(ixSCLK_PWRMGT_CNTL); + + if (enable) + tmp &= ~SCLK_PWRMGT_CNTL__SCLK_PWRMGT_OFF_MASK; + else + tmp |= SCLK_PWRMGT_CNTL__SCLK_PWRMGT_OFF_MASK; + WREG32_SMC(ixSCLK_PWRMGT_CNTL, tmp); +} + +#if 0 +static int ci_notify_hw_of_power_source(struct amdgpu_device *adev, + bool ac_power) +{ + struct ci_power_info *pi = ci_get_pi(adev); + struct amdgpu_cac_tdp_table *cac_tdp_table = + adev->pm.dpm.dyn_state.cac_tdp_table; + u32 power_limit; + + if (ac_power) + power_limit = (u32)(cac_tdp_table->maximum_power_delivery_limit * 256); + else + power_limit = (u32)(cac_tdp_table->battery_power_limit * 256); + + ci_set_power_limit(adev, power_limit); + + if (pi->caps_automatic_dc_transition) { + if (ac_power) + amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_RunningOnAC); + else + amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_Remove_DC_Clamp); + } + + return 0; +} +#endif + +static PPSMC_Result amdgpu_ci_send_msg_to_smc_with_parameter(struct amdgpu_device *adev, + PPSMC_Msg msg, u32 parameter) +{ + WREG32(mmSMC_MSG_ARG_0, parameter); + return amdgpu_ci_send_msg_to_smc(adev, msg); +} + +static PPSMC_Result amdgpu_ci_send_msg_to_smc_return_parameter(struct amdgpu_device *adev, + PPSMC_Msg msg, u32 *parameter) +{ + PPSMC_Result smc_result; + + smc_result = amdgpu_ci_send_msg_to_smc(adev, msg); + + if ((smc_result == PPSMC_Result_OK) && parameter) + *parameter = RREG32(mmSMC_MSG_ARG_0); + + return smc_result; +} + +static int ci_dpm_force_state_sclk(struct amdgpu_device *adev, u32 n) +{ + struct ci_power_info *pi = ci_get_pi(adev); + + if (!pi->sclk_dpm_key_disabled) { + PPSMC_Result smc_result = + amdgpu_ci_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_SCLKDPM_SetEnabledMask, 1 << n); + if (smc_result != PPSMC_Result_OK) + return -EINVAL; + } + + return 0; +} + +static int ci_dpm_force_state_mclk(struct amdgpu_device *adev, u32 n) +{ + struct ci_power_info *pi = ci_get_pi(adev); + + if (!pi->mclk_dpm_key_disabled) { + PPSMC_Result smc_result = + amdgpu_ci_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_MCLKDPM_SetEnabledMask, 1 << n); + if (smc_result != PPSMC_Result_OK) + return -EINVAL; + } + + return 0; +} + +static int ci_dpm_force_state_pcie(struct amdgpu_device *adev, u32 n) +{ + struct ci_power_info *pi = ci_get_pi(adev); + + if (!pi->pcie_dpm_key_disabled) { + PPSMC_Result smc_result = + amdgpu_ci_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_PCIeDPM_ForceLevel, n); + if (smc_result != PPSMC_Result_OK) + return -EINVAL; + } + + return 0; +} + +static int ci_set_power_limit(struct amdgpu_device *adev, u32 n) +{ + struct ci_power_info *pi = ci_get_pi(adev); + + if (pi->power_containment_features & POWERCONTAINMENT_FEATURE_PkgPwrLimit) { + PPSMC_Result smc_result = + amdgpu_ci_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_PkgPwrSetLimit, n); + if (smc_result != PPSMC_Result_OK) + return -EINVAL; + } + + return 0; +} + +static int ci_set_overdrive_target_tdp(struct amdgpu_device *adev, + u32 target_tdp) +{ + PPSMC_Result smc_result = + amdgpu_ci_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_OverDriveSetTargetTdp, target_tdp); + if (smc_result != PPSMC_Result_OK) + return -EINVAL; + return 0; +} + +#if 0 +static int ci_set_boot_state(struct amdgpu_device *adev) +{ + return ci_enable_sclk_mclk_dpm(adev, false); +} +#endif + +static u32 ci_get_average_sclk_freq(struct amdgpu_device *adev) +{ + u32 sclk_freq; + PPSMC_Result smc_result = + amdgpu_ci_send_msg_to_smc_return_parameter(adev, + PPSMC_MSG_API_GetSclkFrequency, + &sclk_freq); + if (smc_result != PPSMC_Result_OK) + sclk_freq = 0; + + return sclk_freq; +} + +static u32 ci_get_average_mclk_freq(struct amdgpu_device *adev) +{ + u32 mclk_freq; + PPSMC_Result smc_result = + amdgpu_ci_send_msg_to_smc_return_parameter(adev, + PPSMC_MSG_API_GetMclkFrequency, + &mclk_freq); + if (smc_result != PPSMC_Result_OK) + mclk_freq = 0; + + return mclk_freq; +} + +static void ci_dpm_start_smc(struct amdgpu_device *adev) +{ + int i; + + amdgpu_ci_program_jump_on_start(adev); + amdgpu_ci_start_smc_clock(adev); + amdgpu_ci_start_smc(adev); + for (i = 0; i < adev->usec_timeout; i++) { + if (RREG32_SMC(ixFIRMWARE_FLAGS) & FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK) + break; + } +} + +static void ci_dpm_stop_smc(struct amdgpu_device *adev) +{ + amdgpu_ci_reset_smc(adev); + amdgpu_ci_stop_smc_clock(adev); +} + +static int ci_process_firmware_header(struct amdgpu_device *adev) +{ + struct ci_power_info *pi = ci_get_pi(adev); + u32 tmp; + int ret; + + ret = amdgpu_ci_read_smc_sram_dword(adev, + SMU7_FIRMWARE_HEADER_LOCATION + + offsetof(SMU7_Firmware_Header, DpmTable), + &tmp, pi->sram_end); + if (ret) + return ret; + + pi->dpm_table_start = tmp; + + ret = amdgpu_ci_read_smc_sram_dword(adev, + SMU7_FIRMWARE_HEADER_LOCATION + + offsetof(SMU7_Firmware_Header, SoftRegisters), + &tmp, pi->sram_end); + if (ret) + return ret; + + pi->soft_regs_start = tmp; + + ret = amdgpu_ci_read_smc_sram_dword(adev, + SMU7_FIRMWARE_HEADER_LOCATION + + offsetof(SMU7_Firmware_Header, mcRegisterTable), + &tmp, pi->sram_end); + if (ret) + return ret; + + pi->mc_reg_table_start = tmp; + + ret = amdgpu_ci_read_smc_sram_dword(adev, + SMU7_FIRMWARE_HEADER_LOCATION + + offsetof(SMU7_Firmware_Header, FanTable), + &tmp, pi->sram_end); + if (ret) + return ret; + + pi->fan_table_start = tmp; + + ret = amdgpu_ci_read_smc_sram_dword(adev, + SMU7_FIRMWARE_HEADER_LOCATION + + offsetof(SMU7_Firmware_Header, mcArbDramTimingTable), + &tmp, pi->sram_end); + if (ret) + return ret; + + pi->arb_table_start = tmp; + + return 0; +} + +static void ci_read_clock_registers(struct amdgpu_device *adev) +{ + struct ci_power_info *pi = ci_get_pi(adev); + + pi->clock_registers.cg_spll_func_cntl = + RREG32_SMC(ixCG_SPLL_FUNC_CNTL); + pi->clock_registers.cg_spll_func_cntl_2 = + RREG32_SMC(ixCG_SPLL_FUNC_CNTL_2); + pi->clock_registers.cg_spll_func_cntl_3 = + RREG32_SMC(ixCG_SPLL_FUNC_CNTL_3); + pi->clock_registers.cg_spll_func_cntl_4 = + RREG32_SMC(ixCG_SPLL_FUNC_CNTL_4); + pi->clock_registers.cg_spll_spread_spectrum = + RREG32_SMC(ixCG_SPLL_SPREAD_SPECTRUM); + pi->clock_registers.cg_spll_spread_spectrum_2 = + RREG32_SMC(ixCG_SPLL_SPREAD_SPECTRUM_2); + pi->clock_registers.dll_cntl = RREG32(mmDLL_CNTL); + pi->clock_registers.mclk_pwrmgt_cntl = RREG32(mmMCLK_PWRMGT_CNTL); + pi->clock_registers.mpll_ad_func_cntl = RREG32(mmMPLL_AD_FUNC_CNTL); + pi->clock_registers.mpll_dq_func_cntl = RREG32(mmMPLL_DQ_FUNC_CNTL); + pi->clock_registers.mpll_func_cntl = RREG32(mmMPLL_FUNC_CNTL); + pi->clock_registers.mpll_func_cntl_1 = RREG32(mmMPLL_FUNC_CNTL_1); + pi->clock_registers.mpll_func_cntl_2 = RREG32(mmMPLL_FUNC_CNTL_2); + pi->clock_registers.mpll_ss1 = RREG32(mmMPLL_SS1); + pi->clock_registers.mpll_ss2 = RREG32(mmMPLL_SS2); +} + +static void ci_init_sclk_t(struct amdgpu_device *adev) +{ + struct ci_power_info *pi = ci_get_pi(adev); + + pi->low_sclk_interrupt_t = 0; +} + +static void ci_enable_thermal_protection(struct amdgpu_device *adev, + bool enable) +{ + u32 tmp = RREG32_SMC(ixGENERAL_PWRMGT); + + if (enable) + tmp &= ~GENERAL_PWRMGT__THERMAL_PROTECTION_DIS_MASK; + else + tmp |= GENERAL_PWRMGT__THERMAL_PROTECTION_DIS_MASK; + WREG32_SMC(ixGENERAL_PWRMGT, tmp); +} + +static void ci_enable_acpi_power_management(struct amdgpu_device *adev) +{ + u32 tmp = RREG32_SMC(ixGENERAL_PWRMGT); + + tmp |= GENERAL_PWRMGT__STATIC_PM_EN_MASK; + + WREG32_SMC(ixGENERAL_PWRMGT, tmp); +} + +#if 0 +static int ci_enter_ulp_state(struct amdgpu_device *adev) +{ + + WREG32(mmSMC_MESSAGE_0, PPSMC_MSG_SwitchToMinimumPower); + + udelay(25000); + + return 0; +} + +static int ci_exit_ulp_state(struct amdgpu_device *adev) +{ + int i; + + WREG32(mmSMC_MESSAGE_0, PPSMC_MSG_ResumeFromMinimumPower); + + udelay(7000); + + for (i = 0; i < adev->usec_timeout; i++) { + if (RREG32(mmSMC_RESP_0) == 1) + break; + udelay(1000); + } + + return 0; +} +#endif + +static int ci_notify_smc_display_change(struct amdgpu_device *adev, + bool has_display) +{ + PPSMC_Msg msg = has_display ? PPSMC_MSG_HasDisplay : PPSMC_MSG_NoDisplay; + + return (amdgpu_ci_send_msg_to_smc(adev, msg) == PPSMC_Result_OK) ? 0 : -EINVAL; +} + +static int ci_enable_ds_master_switch(struct amdgpu_device *adev, + bool enable) +{ + struct ci_power_info *pi = ci_get_pi(adev); + + if (enable) { + if (pi->caps_sclk_ds) { + if (amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_MASTER_DeepSleep_ON) != PPSMC_Result_OK) + return -EINVAL; + } else { + if (amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_MASTER_DeepSleep_OFF) != PPSMC_Result_OK) + return -EINVAL; + } + } else { + if (pi->caps_sclk_ds) { + if (amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_MASTER_DeepSleep_OFF) != PPSMC_Result_OK) + return -EINVAL; + } + } + + return 0; +} + +static void ci_program_display_gap(struct amdgpu_device *adev) +{ + u32 tmp = RREG32_SMC(ixCG_DISPLAY_GAP_CNTL); + u32 pre_vbi_time_in_us; + u32 frame_time_in_us; + u32 ref_clock = adev->clock.spll.reference_freq; + u32 refresh_rate = amdgpu_dpm_get_vrefresh(adev); + u32 vblank_time = amdgpu_dpm_get_vblank_time(adev); + + tmp &= ~CG_DISPLAY_GAP_CNTL__DISP_GAP_MASK; + if (adev->pm.dpm.new_active_crtc_count > 0) + tmp |= (AMDGPU_PM_DISPLAY_GAP_VBLANK_OR_WM << CG_DISPLAY_GAP_CNTL__DISP_GAP__SHIFT); + else + tmp |= (AMDGPU_PM_DISPLAY_GAP_IGNORE << CG_DISPLAY_GAP_CNTL__DISP_GAP__SHIFT); + WREG32_SMC(ixCG_DISPLAY_GAP_CNTL, tmp); + + if (refresh_rate == 0) + refresh_rate = 60; + if (vblank_time == 0xffffffff) + vblank_time = 500; + frame_time_in_us = 1000000 / refresh_rate; + pre_vbi_time_in_us = + frame_time_in_us - 200 - vblank_time; + tmp = pre_vbi_time_in_us * (ref_clock / 100); + + WREG32_SMC(ixCG_DISPLAY_GAP_CNTL2, tmp); + ci_write_smc_soft_register(adev, offsetof(SMU7_SoftRegisters, PreVBlankGap), 0x64); + ci_write_smc_soft_register(adev, offsetof(SMU7_SoftRegisters, VBlankTimeout), (frame_time_in_us - pre_vbi_time_in_us)); + + + ci_notify_smc_display_change(adev, (adev->pm.dpm.new_active_crtc_count == 1)); + +} + +static void ci_enable_spread_spectrum(struct amdgpu_device *adev, bool enable) +{ + struct ci_power_info *pi = ci_get_pi(adev); + u32 tmp; + + if (enable) { + if (pi->caps_sclk_ss_support) { + tmp = RREG32_SMC(ixGENERAL_PWRMGT); + tmp |= GENERAL_PWRMGT__DYN_SPREAD_SPECTRUM_EN_MASK; + WREG32_SMC(ixGENERAL_PWRMGT, tmp); + } + } else { + tmp = RREG32_SMC(ixCG_SPLL_SPREAD_SPECTRUM); + tmp &= ~CG_SPLL_SPREAD_SPECTRUM__SSEN_MASK; + WREG32_SMC(ixCG_SPLL_SPREAD_SPECTRUM, tmp); + + tmp = RREG32_SMC(ixGENERAL_PWRMGT); + tmp &= ~GENERAL_PWRMGT__DYN_SPREAD_SPECTRUM_EN_MASK; + WREG32_SMC(ixGENERAL_PWRMGT, tmp); + } +} + +static void ci_program_sstp(struct amdgpu_device *adev) +{ + WREG32_SMC(ixCG_STATIC_SCREEN_PARAMETER, + ((CISLANDS_SSTU_DFLT << CG_STATIC_SCREEN_PARAMETER__STATIC_SCREEN_THRESHOLD_UNIT__SHIFT) | + (CISLANDS_SST_DFLT << CG_STATIC_SCREEN_PARAMETER__STATIC_SCREEN_THRESHOLD__SHIFT))); +} + +static void ci_enable_display_gap(struct amdgpu_device *adev) +{ + u32 tmp = RREG32_SMC(ixCG_DISPLAY_GAP_CNTL); + + tmp &= ~(CG_DISPLAY_GAP_CNTL__DISP_GAP_MASK | + CG_DISPLAY_GAP_CNTL__DISP_GAP_MCHG_MASK); + tmp |= ((AMDGPU_PM_DISPLAY_GAP_IGNORE << CG_DISPLAY_GAP_CNTL__DISP_GAP__SHIFT) | + (AMDGPU_PM_DISPLAY_GAP_VBLANK << CG_DISPLAY_GAP_CNTL__DISP_GAP_MCHG__SHIFT)); + + WREG32_SMC(ixCG_DISPLAY_GAP_CNTL, tmp); +} + +static void ci_program_vc(struct amdgpu_device *adev) +{ + u32 tmp; + + tmp = RREG32_SMC(ixSCLK_PWRMGT_CNTL); + tmp &= ~(SCLK_PWRMGT_CNTL__RESET_SCLK_CNT_MASK | SCLK_PWRMGT_CNTL__RESET_BUSY_CNT_MASK); + WREG32_SMC(ixSCLK_PWRMGT_CNTL, tmp); + + WREG32_SMC(ixCG_FREQ_TRAN_VOTING_0, CISLANDS_VRC_DFLT0); + WREG32_SMC(ixCG_FREQ_TRAN_VOTING_1, CISLANDS_VRC_DFLT1); + WREG32_SMC(ixCG_FREQ_TRAN_VOTING_2, CISLANDS_VRC_DFLT2); + WREG32_SMC(ixCG_FREQ_TRAN_VOTING_3, CISLANDS_VRC_DFLT3); + WREG32_SMC(ixCG_FREQ_TRAN_VOTING_4, CISLANDS_VRC_DFLT4); + WREG32_SMC(ixCG_FREQ_TRAN_VOTING_5, CISLANDS_VRC_DFLT5); + WREG32_SMC(ixCG_FREQ_TRAN_VOTING_6, CISLANDS_VRC_DFLT6); + WREG32_SMC(ixCG_FREQ_TRAN_VOTING_7, CISLANDS_VRC_DFLT7); +} + +static void ci_clear_vc(struct amdgpu_device *adev) +{ + u32 tmp; + + tmp = RREG32_SMC(ixSCLK_PWRMGT_CNTL); + tmp |= (SCLK_PWRMGT_CNTL__RESET_SCLK_CNT_MASK | SCLK_PWRMGT_CNTL__RESET_BUSY_CNT_MASK); + WREG32_SMC(ixSCLK_PWRMGT_CNTL, tmp); + + WREG32_SMC(ixCG_FREQ_TRAN_VOTING_0, 0); + WREG32_SMC(ixCG_FREQ_TRAN_VOTING_1, 0); + WREG32_SMC(ixCG_FREQ_TRAN_VOTING_2, 0); + WREG32_SMC(ixCG_FREQ_TRAN_VOTING_3, 0); + WREG32_SMC(ixCG_FREQ_TRAN_VOTING_4, 0); + WREG32_SMC(ixCG_FREQ_TRAN_VOTING_5, 0); + WREG32_SMC(ixCG_FREQ_TRAN_VOTING_6, 0); + WREG32_SMC(ixCG_FREQ_TRAN_VOTING_7, 0); +} + +static int ci_upload_firmware(struct amdgpu_device *adev) +{ + struct ci_power_info *pi = ci_get_pi(adev); + int i, ret; + + for (i = 0; i < adev->usec_timeout; i++) { + if (RREG32_SMC(ixRCU_UC_EVENTS) & RCU_UC_EVENTS__boot_seq_done_MASK) + break; + } + WREG32_SMC(ixSMC_SYSCON_MISC_CNTL, 1); + + amdgpu_ci_stop_smc_clock(adev); + amdgpu_ci_reset_smc(adev); + + ret = amdgpu_ci_load_smc_ucode(adev, pi->sram_end); + + return ret; + +} + +static int ci_get_svi2_voltage_table(struct amdgpu_device *adev, + struct amdgpu_clock_voltage_dependency_table *voltage_dependency_table, + struct atom_voltage_table *voltage_table) +{ + u32 i; + + if (voltage_dependency_table == NULL) + return -EINVAL; + + voltage_table->mask_low = 0; + voltage_table->phase_delay = 0; + + voltage_table->count = voltage_dependency_table->count; + for (i = 0; i < voltage_table->count; i++) { + voltage_table->entries[i].value = voltage_dependency_table->entries[i].v; + voltage_table->entries[i].smio_low = 0; + } + + return 0; +} + +static int ci_construct_voltage_tables(struct amdgpu_device *adev) +{ + struct ci_power_info *pi = ci_get_pi(adev); + int ret; + + if (pi->voltage_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO) { + ret = amdgpu_atombios_get_voltage_table(adev, VOLTAGE_TYPE_VDDC, + VOLTAGE_OBJ_GPIO_LUT, + &pi->vddc_voltage_table); + if (ret) + return ret; + } else if (pi->voltage_control == CISLANDS_VOLTAGE_CONTROL_BY_SVID2) { + ret = ci_get_svi2_voltage_table(adev, + &adev->pm.dpm.dyn_state.vddc_dependency_on_mclk, + &pi->vddc_voltage_table); + if (ret) + return ret; + } + + if (pi->vddc_voltage_table.count > SMU7_MAX_LEVELS_VDDC) + ci_trim_voltage_table_to_fit_state_table(adev, SMU7_MAX_LEVELS_VDDC, + &pi->vddc_voltage_table); + + if (pi->vddci_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO) { + ret = amdgpu_atombios_get_voltage_table(adev, VOLTAGE_TYPE_VDDCI, + VOLTAGE_OBJ_GPIO_LUT, + &pi->vddci_voltage_table); + if (ret) + return ret; + } else if (pi->vddci_control == CISLANDS_VOLTAGE_CONTROL_BY_SVID2) { + ret = ci_get_svi2_voltage_table(adev, + &adev->pm.dpm.dyn_state.vddci_dependency_on_mclk, + &pi->vddci_voltage_table); + if (ret) + return ret; + } + + if (pi->vddci_voltage_table.count > SMU7_MAX_LEVELS_VDDCI) + ci_trim_voltage_table_to_fit_state_table(adev, SMU7_MAX_LEVELS_VDDCI, + &pi->vddci_voltage_table); + + if (pi->mvdd_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO) { + ret = amdgpu_atombios_get_voltage_table(adev, VOLTAGE_TYPE_MVDDC, + VOLTAGE_OBJ_GPIO_LUT, + &pi->mvdd_voltage_table); + if (ret) + return ret; + } else if (pi->mvdd_control == CISLANDS_VOLTAGE_CONTROL_BY_SVID2) { + ret = ci_get_svi2_voltage_table(adev, + &adev->pm.dpm.dyn_state.mvdd_dependency_on_mclk, + &pi->mvdd_voltage_table); + if (ret) + return ret; + } + + if (pi->mvdd_voltage_table.count > SMU7_MAX_LEVELS_MVDD) + ci_trim_voltage_table_to_fit_state_table(adev, SMU7_MAX_LEVELS_MVDD, + &pi->mvdd_voltage_table); + + return 0; +} + +static void ci_populate_smc_voltage_table(struct amdgpu_device *adev, + struct atom_voltage_table_entry *voltage_table, + SMU7_Discrete_VoltageLevel *smc_voltage_table) +{ + int ret; + + ret = ci_get_std_voltage_value_sidd(adev, voltage_table, + &smc_voltage_table->StdVoltageHiSidd, + &smc_voltage_table->StdVoltageLoSidd); + + if (ret) { + smc_voltage_table->StdVoltageHiSidd = voltage_table->value * VOLTAGE_SCALE; + smc_voltage_table->StdVoltageLoSidd = voltage_table->value * VOLTAGE_SCALE; + } + + smc_voltage_table->Voltage = cpu_to_be16(voltage_table->value * VOLTAGE_SCALE); + smc_voltage_table->StdVoltageHiSidd = + cpu_to_be16(smc_voltage_table->StdVoltageHiSidd); + smc_voltage_table->StdVoltageLoSidd = + cpu_to_be16(smc_voltage_table->StdVoltageLoSidd); +} + +static int ci_populate_smc_vddc_table(struct amdgpu_device *adev, + SMU7_Discrete_DpmTable *table) +{ + struct ci_power_info *pi = ci_get_pi(adev); + unsigned int count; + + table->VddcLevelCount = pi->vddc_voltage_table.count; + for (count = 0; count < table->VddcLevelCount; count++) { + ci_populate_smc_voltage_table(adev, + &pi->vddc_voltage_table.entries[count], + &table->VddcLevel[count]); + + if (pi->voltage_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO) + table->VddcLevel[count].Smio |= + pi->vddc_voltage_table.entries[count].smio_low; + else + table->VddcLevel[count].Smio = 0; + } + table->VddcLevelCount = cpu_to_be32(table->VddcLevelCount); + + return 0; +} + +static int ci_populate_smc_vddci_table(struct amdgpu_device *adev, + SMU7_Discrete_DpmTable *table) +{ + unsigned int count; + struct ci_power_info *pi = ci_get_pi(adev); + + table->VddciLevelCount = pi->vddci_voltage_table.count; + for (count = 0; count < table->VddciLevelCount; count++) { + ci_populate_smc_voltage_table(adev, + &pi->vddci_voltage_table.entries[count], + &table->VddciLevel[count]); + + if (pi->vddci_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO) + table->VddciLevel[count].Smio |= + pi->vddci_voltage_table.entries[count].smio_low; + else + table->VddciLevel[count].Smio = 0; + } + table->VddciLevelCount = cpu_to_be32(table->VddciLevelCount); + + return 0; +} + +static int ci_populate_smc_mvdd_table(struct amdgpu_device *adev, + SMU7_Discrete_DpmTable *table) +{ + struct ci_power_info *pi = ci_get_pi(adev); + unsigned int count; + + table->MvddLevelCount = pi->mvdd_voltage_table.count; + for (count = 0; count < table->MvddLevelCount; count++) { + ci_populate_smc_voltage_table(adev, + &pi->mvdd_voltage_table.entries[count], + &table->MvddLevel[count]); + + if (pi->mvdd_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO) + table->MvddLevel[count].Smio |= + pi->mvdd_voltage_table.entries[count].smio_low; + else + table->MvddLevel[count].Smio = 0; + } + table->MvddLevelCount = cpu_to_be32(table->MvddLevelCount); + + return 0; +} + +static int ci_populate_smc_voltage_tables(struct amdgpu_device *adev, + SMU7_Discrete_DpmTable *table) +{ + int ret; + + ret = ci_populate_smc_vddc_table(adev, table); + if (ret) + return ret; + + ret = ci_populate_smc_vddci_table(adev, table); + if (ret) + return ret; + + ret = ci_populate_smc_mvdd_table(adev, table); + if (ret) + return ret; + + return 0; +} + +static int ci_populate_mvdd_value(struct amdgpu_device *adev, u32 mclk, + SMU7_Discrete_VoltageLevel *voltage) +{ + struct ci_power_info *pi = ci_get_pi(adev); + u32 i = 0; + + if (pi->mvdd_control != CISLANDS_VOLTAGE_CONTROL_NONE) { + for (i = 0; i < adev->pm.dpm.dyn_state.mvdd_dependency_on_mclk.count; i++) { + if (mclk <= adev->pm.dpm.dyn_state.mvdd_dependency_on_mclk.entries[i].clk) { + voltage->Voltage = pi->mvdd_voltage_table.entries[i].value; + break; + } + } + + if (i >= adev->pm.dpm.dyn_state.mvdd_dependency_on_mclk.count) + return -EINVAL; + } + + return -EINVAL; +} + +static int ci_get_std_voltage_value_sidd(struct amdgpu_device *adev, + struct atom_voltage_table_entry *voltage_table, + u16 *std_voltage_hi_sidd, u16 *std_voltage_lo_sidd) +{ + u16 v_index, idx; + bool voltage_found = false; + *std_voltage_hi_sidd = voltage_table->value * VOLTAGE_SCALE; + *std_voltage_lo_sidd = voltage_table->value * VOLTAGE_SCALE; + + if (adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries == NULL) + return -EINVAL; + + if (adev->pm.dpm.dyn_state.cac_leakage_table.entries) { + for (v_index = 0; (u32)v_index < adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.count; v_index++) { + if (voltage_table->value == + adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[v_index].v) { + voltage_found = true; + if ((u32)v_index < adev->pm.dpm.dyn_state.cac_leakage_table.count) + idx = v_index; + else + idx = adev->pm.dpm.dyn_state.cac_leakage_table.count - 1; + *std_voltage_lo_sidd = + adev->pm.dpm.dyn_state.cac_leakage_table.entries[idx].vddc * VOLTAGE_SCALE; + *std_voltage_hi_sidd = + adev->pm.dpm.dyn_state.cac_leakage_table.entries[idx].leakage * VOLTAGE_SCALE; + break; + } + } + + if (!voltage_found) { + for (v_index = 0; (u32)v_index < adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.count; v_index++) { + if (voltage_table->value <= + adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[v_index].v) { + voltage_found = true; + if ((u32)v_index < adev->pm.dpm.dyn_state.cac_leakage_table.count) + idx = v_index; + else + idx = adev->pm.dpm.dyn_state.cac_leakage_table.count - 1; + *std_voltage_lo_sidd = + adev->pm.dpm.dyn_state.cac_leakage_table.entries[idx].vddc * VOLTAGE_SCALE; + *std_voltage_hi_sidd = + adev->pm.dpm.dyn_state.cac_leakage_table.entries[idx].leakage * VOLTAGE_SCALE; + break; + } + } + } + } + + return 0; +} + +static void ci_populate_phase_value_based_on_sclk(struct amdgpu_device *adev, + const struct amdgpu_phase_shedding_limits_table *limits, + u32 sclk, + u32 *phase_shedding) +{ + unsigned int i; + + *phase_shedding = 1; + + for (i = 0; i < limits->count; i++) { + if (sclk < limits->entries[i].sclk) { + *phase_shedding = i; + break; + } + } +} + +static void ci_populate_phase_value_based_on_mclk(struct amdgpu_device *adev, + const struct amdgpu_phase_shedding_limits_table *limits, + u32 mclk, + u32 *phase_shedding) +{ + unsigned int i; + + *phase_shedding = 1; + + for (i = 0; i < limits->count; i++) { + if (mclk < limits->entries[i].mclk) { + *phase_shedding = i; + break; + } + } +} + +static int ci_init_arb_table_index(struct amdgpu_device *adev) +{ + struct ci_power_info *pi = ci_get_pi(adev); + u32 tmp; + int ret; + + ret = amdgpu_ci_read_smc_sram_dword(adev, pi->arb_table_start, + &tmp, pi->sram_end); + if (ret) + return ret; + + tmp &= 0x00FFFFFF; + tmp |= MC_CG_ARB_FREQ_F1 << 24; + + return amdgpu_ci_write_smc_sram_dword(adev, pi->arb_table_start, + tmp, pi->sram_end); +} + +static int ci_get_dependency_volt_by_clk(struct amdgpu_device *adev, + struct amdgpu_clock_voltage_dependency_table *allowed_clock_voltage_table, + u32 clock, u32 *voltage) +{ + u32 i = 0; + + if (allowed_clock_voltage_table->count == 0) + return -EINVAL; + + for (i = 0; i < allowed_clock_voltage_table->count; i++) { + if (allowed_clock_voltage_table->entries[i].clk >= clock) { + *voltage = allowed_clock_voltage_table->entries[i].v; + return 0; + } + } + + *voltage = allowed_clock_voltage_table->entries[i-1].v; + + return 0; +} + +static u8 ci_get_sleep_divider_id_from_clock(struct amdgpu_device *adev, + u32 sclk, u32 min_sclk_in_sr) +{ + u32 i; + u32 tmp; + u32 min = (min_sclk_in_sr > CISLAND_MINIMUM_ENGINE_CLOCK) ? + min_sclk_in_sr : CISLAND_MINIMUM_ENGINE_CLOCK; + + if (sclk < min) + return 0; + + for (i = CISLAND_MAX_DEEPSLEEP_DIVIDER_ID; ; i--) { + tmp = sclk / (1 << i); + if (tmp >= min || i == 0) + break; + } + + return (u8)i; +} + +static int ci_initial_switch_from_arb_f0_to_f1(struct amdgpu_device *adev) +{ + return ci_copy_and_switch_arb_sets(adev, MC_CG_ARB_FREQ_F0, MC_CG_ARB_FREQ_F1); +} + +static int ci_reset_to_default(struct amdgpu_device *adev) +{ + return (amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_ResetToDefaults) == PPSMC_Result_OK) ? + 0 : -EINVAL; +} + +static int ci_force_switch_to_arb_f0(struct amdgpu_device *adev) +{ + u32 tmp; + + tmp = (RREG32_SMC(ixSMC_SCRATCH9) & 0x0000ff00) >> 8; + + if (tmp == MC_CG_ARB_FREQ_F0) + return 0; + + return ci_copy_and_switch_arb_sets(adev, tmp, MC_CG_ARB_FREQ_F0); +} + +static void ci_register_patching_mc_arb(struct amdgpu_device *adev, + const u32 engine_clock, + const u32 memory_clock, + u32 *dram_timimg2) +{ + bool patch; + u32 tmp, tmp2; + + tmp = RREG32(mmMC_SEQ_MISC0); + patch = ((tmp & 0x0000f00) == 0x300) ? true : false; + + if (patch && + ((adev->pdev->device == 0x67B0) || + (adev->pdev->device == 0x67B1))) { + if ((memory_clock > 100000) && (memory_clock <= 125000)) { + tmp2 = (((0x31 * engine_clock) / 125000) - 1) & 0xff; + *dram_timimg2 &= ~0x00ff0000; + *dram_timimg2 |= tmp2 << 16; + } else if ((memory_clock > 125000) && (memory_clock <= 137500)) { + tmp2 = (((0x36 * engine_clock) / 137500) - 1) & 0xff; + *dram_timimg2 &= ~0x00ff0000; + *dram_timimg2 |= tmp2 << 16; + } + } +} + +static int ci_populate_memory_timing_parameters(struct amdgpu_device *adev, + u32 sclk, + u32 mclk, + SMU7_Discrete_MCArbDramTimingTableEntry *arb_regs) +{ + u32 dram_timing; + u32 dram_timing2; + u32 burst_time; + + amdgpu_atombios_set_engine_dram_timings(adev, sclk, mclk); + + dram_timing = RREG32(mmMC_ARB_DRAM_TIMING); + dram_timing2 = RREG32(mmMC_ARB_DRAM_TIMING2); + burst_time = RREG32(mmMC_ARB_BURST_TIME) & MC_ARB_BURST_TIME__STATE0_MASK; + + ci_register_patching_mc_arb(adev, sclk, mclk, &dram_timing2); + + arb_regs->McArbDramTiming = cpu_to_be32(dram_timing); + arb_regs->McArbDramTiming2 = cpu_to_be32(dram_timing2); + arb_regs->McArbBurstTime = (u8)burst_time; + + return 0; +} + +static int ci_do_program_memory_timing_parameters(struct amdgpu_device *adev) +{ + struct ci_power_info *pi = ci_get_pi(adev); + SMU7_Discrete_MCArbDramTimingTable arb_regs; + u32 i, j; + int ret = 0; + + memset(&arb_regs, 0, sizeof(SMU7_Discrete_MCArbDramTimingTable)); + + for (i = 0; i < pi->dpm_table.sclk_table.count; i++) { + for (j = 0; j < pi->dpm_table.mclk_table.count; j++) { + ret = ci_populate_memory_timing_parameters(adev, + pi->dpm_table.sclk_table.dpm_levels[i].value, + pi->dpm_table.mclk_table.dpm_levels[j].value, + &arb_regs.entries[i][j]); + if (ret) + break; + } + } + + if (ret == 0) + ret = amdgpu_ci_copy_bytes_to_smc(adev, + pi->arb_table_start, + (u8 *)&arb_regs, + sizeof(SMU7_Discrete_MCArbDramTimingTable), + pi->sram_end); + + return ret; +} + +static int ci_program_memory_timing_parameters(struct amdgpu_device *adev) +{ + struct ci_power_info *pi = ci_get_pi(adev); + + if (pi->need_update_smu7_dpm_table == 0) + return 0; + + return ci_do_program_memory_timing_parameters(adev); +} + +static void ci_populate_smc_initial_state(struct amdgpu_device *adev, + struct amdgpu_ps *amdgpu_boot_state) +{ + struct ci_ps *boot_state = ci_get_ps(amdgpu_boot_state); + struct ci_power_info *pi = ci_get_pi(adev); + u32 level = 0; + + for (level = 0; level < adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.count; level++) { + if (adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[level].clk >= + boot_state->performance_levels[0].sclk) { + pi->smc_state_table.GraphicsBootLevel = level; + break; + } + } + + for (level = 0; level < adev->pm.dpm.dyn_state.vddc_dependency_on_mclk.count; level++) { + if (adev->pm.dpm.dyn_state.vddc_dependency_on_mclk.entries[level].clk >= + boot_state->performance_levels[0].mclk) { + pi->smc_state_table.MemoryBootLevel = level; + break; + } + } +} + +static u32 ci_get_dpm_level_enable_mask_value(struct ci_single_dpm_table *dpm_table) +{ + u32 i; + u32 mask_value = 0; + + for (i = dpm_table->count; i > 0; i--) { + mask_value = mask_value << 1; + if (dpm_table->dpm_levels[i-1].enabled) + mask_value |= 0x1; + else + mask_value &= 0xFFFFFFFE; + } + + return mask_value; +} + +static void ci_populate_smc_link_level(struct amdgpu_device *adev, + SMU7_Discrete_DpmTable *table) +{ + struct ci_power_info *pi = ci_get_pi(adev); + struct ci_dpm_table *dpm_table = &pi->dpm_table; + u32 i; + + for (i = 0; i < dpm_table->pcie_speed_table.count; i++) { + table->LinkLevel[i].PcieGenSpeed = + (u8)dpm_table->pcie_speed_table.dpm_levels[i].value; + table->LinkLevel[i].PcieLaneCount = + amdgpu_encode_pci_lane_width(dpm_table->pcie_speed_table.dpm_levels[i].param1); + table->LinkLevel[i].EnabledForActivity = 1; + table->LinkLevel[i].DownT = cpu_to_be32(5); + table->LinkLevel[i].UpT = cpu_to_be32(30); + } + + pi->smc_state_table.LinkLevelCount = (u8)dpm_table->pcie_speed_table.count; + pi->dpm_level_enable_mask.pcie_dpm_enable_mask = + ci_get_dpm_level_enable_mask_value(&dpm_table->pcie_speed_table); +} + +static int ci_populate_smc_uvd_level(struct amdgpu_device *adev, + SMU7_Discrete_DpmTable *table) +{ + u32 count; + struct atom_clock_dividers dividers; + int ret = -EINVAL; + + table->UvdLevelCount = + adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count; + + for (count = 0; count < table->UvdLevelCount; count++) { + table->UvdLevel[count].VclkFrequency = + adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[count].vclk; + table->UvdLevel[count].DclkFrequency = + adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[count].dclk; + table->UvdLevel[count].MinVddc = + adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[count].v * VOLTAGE_SCALE; + table->UvdLevel[count].MinVddcPhases = 1; + + ret = amdgpu_atombios_get_clock_dividers(adev, + COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK, + table->UvdLevel[count].VclkFrequency, false, ÷rs); + if (ret) + return ret; + + table->UvdLevel[count].VclkDivider = (u8)dividers.post_divider; + + ret = amdgpu_atombios_get_clock_dividers(adev, + COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK, + table->UvdLevel[count].DclkFrequency, false, ÷rs); + if (ret) + return ret; + + table->UvdLevel[count].DclkDivider = (u8)dividers.post_divider; + + table->UvdLevel[count].VclkFrequency = cpu_to_be32(table->UvdLevel[count].VclkFrequency); + table->UvdLevel[count].DclkFrequency = cpu_to_be32(table->UvdLevel[count].DclkFrequency); + table->UvdLevel[count].MinVddc = cpu_to_be16(table->UvdLevel[count].MinVddc); + } + + return ret; +} + +static int ci_populate_smc_vce_level(struct amdgpu_device *adev, + SMU7_Discrete_DpmTable *table) +{ + u32 count; + struct atom_clock_dividers dividers; + int ret = -EINVAL; + + table->VceLevelCount = + adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.count; + + for (count = 0; count < table->VceLevelCount; count++) { + table->VceLevel[count].Frequency = + adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[count].evclk; + table->VceLevel[count].MinVoltage = + (u16)adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[count].v * VOLTAGE_SCALE; + table->VceLevel[count].MinPhases = 1; + + ret = amdgpu_atombios_get_clock_dividers(adev, + COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK, + table->VceLevel[count].Frequency, false, ÷rs); + if (ret) + return ret; + + table->VceLevel[count].Divider = (u8)dividers.post_divider; + + table->VceLevel[count].Frequency = cpu_to_be32(table->VceLevel[count].Frequency); + table->VceLevel[count].MinVoltage = cpu_to_be16(table->VceLevel[count].MinVoltage); + } + + return ret; + +} + +static int ci_populate_smc_acp_level(struct amdgpu_device *adev, + SMU7_Discrete_DpmTable *table) +{ + u32 count; + struct atom_clock_dividers dividers; + int ret = -EINVAL; + + table->AcpLevelCount = (u8) + (adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.count); + + for (count = 0; count < table->AcpLevelCount; count++) { + table->AcpLevel[count].Frequency = + adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[count].clk; + table->AcpLevel[count].MinVoltage = + adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[count].v; + table->AcpLevel[count].MinPhases = 1; + + ret = amdgpu_atombios_get_clock_dividers(adev, + COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK, + table->AcpLevel[count].Frequency, false, ÷rs); + if (ret) + return ret; + + table->AcpLevel[count].Divider = (u8)dividers.post_divider; + + table->AcpLevel[count].Frequency = cpu_to_be32(table->AcpLevel[count].Frequency); + table->AcpLevel[count].MinVoltage = cpu_to_be16(table->AcpLevel[count].MinVoltage); + } + + return ret; +} + +static int ci_populate_smc_samu_level(struct amdgpu_device *adev, + SMU7_Discrete_DpmTable *table) +{ + u32 count; + struct atom_clock_dividers dividers; + int ret = -EINVAL; + + table->SamuLevelCount = + adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.count; + + for (count = 0; count < table->SamuLevelCount; count++) { + table->SamuLevel[count].Frequency = + adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[count].clk; + table->SamuLevel[count].MinVoltage = + adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[count].v * VOLTAGE_SCALE; + table->SamuLevel[count].MinPhases = 1; + + ret = amdgpu_atombios_get_clock_dividers(adev, + COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK, + table->SamuLevel[count].Frequency, false, ÷rs); + if (ret) + return ret; + + table->SamuLevel[count].Divider = (u8)dividers.post_divider; + + table->SamuLevel[count].Frequency = cpu_to_be32(table->SamuLevel[count].Frequency); + table->SamuLevel[count].MinVoltage = cpu_to_be16(table->SamuLevel[count].MinVoltage); + } + + return ret; +} + +static int ci_calculate_mclk_params(struct amdgpu_device *adev, + u32 memory_clock, + SMU7_Discrete_MemoryLevel *mclk, + bool strobe_mode, + bool dll_state_on) +{ + struct ci_power_info *pi = ci_get_pi(adev); + u32 dll_cntl = pi->clock_registers.dll_cntl; + u32 mclk_pwrmgt_cntl = pi->clock_registers.mclk_pwrmgt_cntl; + u32 mpll_ad_func_cntl = pi->clock_registers.mpll_ad_func_cntl; + u32 mpll_dq_func_cntl = pi->clock_registers.mpll_dq_func_cntl; + u32 mpll_func_cntl = pi->clock_registers.mpll_func_cntl; + u32 mpll_func_cntl_1 = pi->clock_registers.mpll_func_cntl_1; + u32 mpll_func_cntl_2 = pi->clock_registers.mpll_func_cntl_2; + u32 mpll_ss1 = pi->clock_registers.mpll_ss1; + u32 mpll_ss2 = pi->clock_registers.mpll_ss2; + struct atom_mpll_param mpll_param; + int ret; + + ret = amdgpu_atombios_get_memory_pll_dividers(adev, memory_clock, strobe_mode, &mpll_param); + if (ret) + return ret; + + mpll_func_cntl &= ~MPLL_FUNC_CNTL__BWCTRL_MASK; + mpll_func_cntl |= (mpll_param.bwcntl << MPLL_FUNC_CNTL__BWCTRL__SHIFT); + + mpll_func_cntl_1 &= ~(MPLL_FUNC_CNTL_1__CLKF_MASK | MPLL_FUNC_CNTL_1__CLKFRAC_MASK | + MPLL_FUNC_CNTL_1__VCO_MODE_MASK); + mpll_func_cntl_1 |= (mpll_param.clkf) << MPLL_FUNC_CNTL_1__CLKF__SHIFT | + (mpll_param.clkfrac << MPLL_FUNC_CNTL_1__CLKFRAC__SHIFT) | + (mpll_param.vco_mode << MPLL_FUNC_CNTL_1__VCO_MODE__SHIFT); + + mpll_ad_func_cntl &= ~MPLL_AD_FUNC_CNTL__YCLK_POST_DIV_MASK; + mpll_ad_func_cntl |= (mpll_param.post_div << MPLL_AD_FUNC_CNTL__YCLK_POST_DIV__SHIFT); + + if (adev->mc.is_gddr5) { + mpll_dq_func_cntl &= ~(MPLL_DQ_FUNC_CNTL__YCLK_SEL_MASK | + MPLL_AD_FUNC_CNTL__YCLK_POST_DIV_MASK); + mpll_dq_func_cntl |= (mpll_param.yclk_sel << MPLL_DQ_FUNC_CNTL__YCLK_SEL__SHIFT) | + (mpll_param.post_div << MPLL_AD_FUNC_CNTL__YCLK_POST_DIV__SHIFT); + } + + if (pi->caps_mclk_ss_support) { + struct amdgpu_atom_ss ss; + u32 freq_nom; + u32 tmp; + u32 reference_clock = adev->clock.mpll.reference_freq; + + if (mpll_param.qdr == 1) + freq_nom = memory_clock * 4 * (1 << mpll_param.post_div); + else + freq_nom = memory_clock * 2 * (1 << mpll_param.post_div); + + tmp = (freq_nom / reference_clock); + tmp = tmp * tmp; + if (amdgpu_atombios_get_asic_ss_info(adev, &ss, + ASIC_INTERNAL_MEMORY_SS, freq_nom)) { + u32 clks = reference_clock * 5 / ss.rate; + u32 clkv = (u32)((((131 * ss.percentage * ss.rate) / 100) * tmp) / freq_nom); + + mpll_ss1 &= ~MPLL_SS1__CLKV_MASK; + mpll_ss1 |= (clkv << MPLL_SS1__CLKV__SHIFT); + + mpll_ss2 &= ~MPLL_SS2__CLKS_MASK; + mpll_ss2 |= (clks << MPLL_SS2__CLKS__SHIFT); + } + } + + mclk_pwrmgt_cntl &= ~MCLK_PWRMGT_CNTL__DLL_SPEED_MASK; + mclk_pwrmgt_cntl |= (mpll_param.dll_speed << MCLK_PWRMGT_CNTL__DLL_SPEED__SHIFT); + + if (dll_state_on) + mclk_pwrmgt_cntl |= MCLK_PWRMGT_CNTL__MRDCK0_PDNB_MASK | + MCLK_PWRMGT_CNTL__MRDCK1_PDNB_MASK; + else + mclk_pwrmgt_cntl &= ~(MCLK_PWRMGT_CNTL__MRDCK0_PDNB_MASK | + MCLK_PWRMGT_CNTL__MRDCK1_PDNB_MASK); + + mclk->MclkFrequency = memory_clock; + mclk->MpllFuncCntl = mpll_func_cntl; + mclk->MpllFuncCntl_1 = mpll_func_cntl_1; + mclk->MpllFuncCntl_2 = mpll_func_cntl_2; + mclk->MpllAdFuncCntl = mpll_ad_func_cntl; + mclk->MpllDqFuncCntl = mpll_dq_func_cntl; + mclk->MclkPwrmgtCntl = mclk_pwrmgt_cntl; + mclk->DllCntl = dll_cntl; + mclk->MpllSs1 = mpll_ss1; + mclk->MpllSs2 = mpll_ss2; + + return 0; +} + +static int ci_populate_single_memory_level(struct amdgpu_device *adev, + u32 memory_clock, + SMU7_Discrete_MemoryLevel *memory_level) +{ + struct ci_power_info *pi = ci_get_pi(adev); + int ret; + bool dll_state_on; + + if (adev->pm.dpm.dyn_state.vddc_dependency_on_mclk.entries) { + ret = ci_get_dependency_volt_by_clk(adev, + &adev->pm.dpm.dyn_state.vddc_dependency_on_mclk, + memory_clock, &memory_level->MinVddc); + if (ret) + return ret; + } + + if (adev->pm.dpm.dyn_state.vddci_dependency_on_mclk.entries) { + ret = ci_get_dependency_volt_by_clk(adev, + &adev->pm.dpm.dyn_state.vddci_dependency_on_mclk, + memory_clock, &memory_level->MinVddci); + if (ret) + return ret; + } + + if (adev->pm.dpm.dyn_state.mvdd_dependency_on_mclk.entries) { + ret = ci_get_dependency_volt_by_clk(adev, + &adev->pm.dpm.dyn_state.mvdd_dependency_on_mclk, + memory_clock, &memory_level->MinMvdd); + if (ret) + return ret; + } + + memory_level->MinVddcPhases = 1; + + if (pi->vddc_phase_shed_control) + ci_populate_phase_value_based_on_mclk(adev, + &adev->pm.dpm.dyn_state.phase_shedding_limits_table, + memory_clock, + &memory_level->MinVddcPhases); + + memory_level->EnabledForThrottle = 1; + memory_level->EnabledForActivity = 1; + memory_level->UpH = 0; + memory_level->DownH = 100; + memory_level->VoltageDownH = 0; + memory_level->ActivityLevel = (u16)pi->mclk_activity_target; + + memory_level->StutterEnable = false; + memory_level->StrobeEnable = false; + memory_level->EdcReadEnable = false; + memory_level->EdcWriteEnable = false; + memory_level->RttEnable = false; + + memory_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW; + + if (pi->mclk_stutter_mode_threshold && + (memory_clock <= pi->mclk_stutter_mode_threshold) && + (pi->uvd_enabled == false) && + (RREG32(mmDPG_PIPE_STUTTER_CONTROL) & DPG_PIPE_STUTTER_CONTROL__STUTTER_ENABLE_MASK) && + (adev->pm.dpm.new_active_crtc_count <= 2)) + memory_level->StutterEnable = true; + + if (pi->mclk_strobe_mode_threshold && + (memory_clock <= pi->mclk_strobe_mode_threshold)) + memory_level->StrobeEnable = 1; + + if (adev->mc.is_gddr5) { + memory_level->StrobeRatio = + ci_get_mclk_frequency_ratio(memory_clock, memory_level->StrobeEnable); + if (pi->mclk_edc_enable_threshold && + (memory_clock > pi->mclk_edc_enable_threshold)) + memory_level->EdcReadEnable = true; + + if (pi->mclk_edc_wr_enable_threshold && + (memory_clock > pi->mclk_edc_wr_enable_threshold)) + memory_level->EdcWriteEnable = true; + + if (memory_level->StrobeEnable) { + if (ci_get_mclk_frequency_ratio(memory_clock, true) >= + ((RREG32(mmMC_SEQ_MISC7) >> 16) & 0xf)) + dll_state_on = ((RREG32(mmMC_SEQ_MISC5) >> 1) & 0x1) ? true : false; + else + dll_state_on = ((RREG32(mmMC_SEQ_MISC6) >> 1) & 0x1) ? true : false; + } else { + dll_state_on = pi->dll_default_on; + } + } else { + memory_level->StrobeRatio = ci_get_ddr3_mclk_frequency_ratio(memory_clock); + dll_state_on = ((RREG32(mmMC_SEQ_MISC5) >> 1) & 0x1) ? true : false; + } + + ret = ci_calculate_mclk_params(adev, memory_clock, memory_level, memory_level->StrobeEnable, dll_state_on); + if (ret) + return ret; + + memory_level->MinVddc = cpu_to_be32(memory_level->MinVddc * VOLTAGE_SCALE); + memory_level->MinVddcPhases = cpu_to_be32(memory_level->MinVddcPhases); + memory_level->MinVddci = cpu_to_be32(memory_level->MinVddci * VOLTAGE_SCALE); + memory_level->MinMvdd = cpu_to_be32(memory_level->MinMvdd * VOLTAGE_SCALE); + + memory_level->MclkFrequency = cpu_to_be32(memory_level->MclkFrequency); + memory_level->ActivityLevel = cpu_to_be16(memory_level->ActivityLevel); + memory_level->MpllFuncCntl = cpu_to_be32(memory_level->MpllFuncCntl); + memory_level->MpllFuncCntl_1 = cpu_to_be32(memory_level->MpllFuncCntl_1); + memory_level->MpllFuncCntl_2 = cpu_to_be32(memory_level->MpllFuncCntl_2); + memory_level->MpllAdFuncCntl = cpu_to_be32(memory_level->MpllAdFuncCntl); + memory_level->MpllDqFuncCntl = cpu_to_be32(memory_level->MpllDqFuncCntl); + memory_level->MclkPwrmgtCntl = cpu_to_be32(memory_level->MclkPwrmgtCntl); + memory_level->DllCntl = cpu_to_be32(memory_level->DllCntl); + memory_level->MpllSs1 = cpu_to_be32(memory_level->MpllSs1); + memory_level->MpllSs2 = cpu_to_be32(memory_level->MpllSs2); + + return 0; +} + +static int ci_populate_smc_acpi_level(struct amdgpu_device *adev, + SMU7_Discrete_DpmTable *table) +{ + struct ci_power_info *pi = ci_get_pi(adev); + struct atom_clock_dividers dividers; + SMU7_Discrete_VoltageLevel voltage_level; + u32 spll_func_cntl = pi->clock_registers.cg_spll_func_cntl; + u32 spll_func_cntl_2 = pi->clock_registers.cg_spll_func_cntl_2; + u32 dll_cntl = pi->clock_registers.dll_cntl; + u32 mclk_pwrmgt_cntl = pi->clock_registers.mclk_pwrmgt_cntl; + int ret; + + table->ACPILevel.Flags &= ~PPSMC_SWSTATE_FLAG_DC; + + if (pi->acpi_vddc) + table->ACPILevel.MinVddc = cpu_to_be32(pi->acpi_vddc * VOLTAGE_SCALE); + else + table->ACPILevel.MinVddc = cpu_to_be32(pi->min_vddc_in_pp_table * VOLTAGE_SCALE); + + table->ACPILevel.MinVddcPhases = pi->vddc_phase_shed_control ? 0 : 1; + + table->ACPILevel.SclkFrequency = adev->clock.spll.reference_freq; + + ret = amdgpu_atombios_get_clock_dividers(adev, + COMPUTE_GPUCLK_INPUT_FLAG_SCLK, + table->ACPILevel.SclkFrequency, false, ÷rs); + if (ret) + return ret; + + table->ACPILevel.SclkDid = (u8)dividers.post_divider; + table->ACPILevel.DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW; + table->ACPILevel.DeepSleepDivId = 0; + + spll_func_cntl &= ~CG_SPLL_FUNC_CNTL__SPLL_PWRON_MASK; + spll_func_cntl |= CG_SPLL_FUNC_CNTL__SPLL_RESET_MASK; + + spll_func_cntl_2 &= ~CG_SPLL_FUNC_CNTL_2__SCLK_MUX_SEL_MASK; + spll_func_cntl_2 |= (4 << CG_SPLL_FUNC_CNTL_2__SCLK_MUX_SEL__SHIFT); + + table->ACPILevel.CgSpllFuncCntl = spll_func_cntl; + table->ACPILevel.CgSpllFuncCntl2 = spll_func_cntl_2; + table->ACPILevel.CgSpllFuncCntl3 = pi->clock_registers.cg_spll_func_cntl_3; + table->ACPILevel.CgSpllFuncCntl4 = pi->clock_registers.cg_spll_func_cntl_4; + table->ACPILevel.SpllSpreadSpectrum = pi->clock_registers.cg_spll_spread_spectrum; + table->ACPILevel.SpllSpreadSpectrum2 = pi->clock_registers.cg_spll_spread_spectrum_2; + table->ACPILevel.CcPwrDynRm = 0; + table->ACPILevel.CcPwrDynRm1 = 0; + + table->ACPILevel.Flags = cpu_to_be32(table->ACPILevel.Flags); + table->ACPILevel.MinVddcPhases = cpu_to_be32(table->ACPILevel.MinVddcPhases); + table->ACPILevel.SclkFrequency = cpu_to_be32(table->ACPILevel.SclkFrequency); + table->ACPILevel.CgSpllFuncCntl = cpu_to_be32(table->ACPILevel.CgSpllFuncCntl); + table->ACPILevel.CgSpllFuncCntl2 = cpu_to_be32(table->ACPILevel.CgSpllFuncCntl2); + table->ACPILevel.CgSpllFuncCntl3 = cpu_to_be32(table->ACPILevel.CgSpllFuncCntl3); + table->ACPILevel.CgSpllFuncCntl4 = cpu_to_be32(table->ACPILevel.CgSpllFuncCntl4); + table->ACPILevel.SpllSpreadSpectrum = cpu_to_be32(table->ACPILevel.SpllSpreadSpectrum); + table->ACPILevel.SpllSpreadSpectrum2 = cpu_to_be32(table->ACPILevel.SpllSpreadSpectrum2); + table->ACPILevel.CcPwrDynRm = cpu_to_be32(table->ACPILevel.CcPwrDynRm); + table->ACPILevel.CcPwrDynRm1 = cpu_to_be32(table->ACPILevel.CcPwrDynRm1); + + table->MemoryACPILevel.MinVddc = table->ACPILevel.MinVddc; + table->MemoryACPILevel.MinVddcPhases = table->ACPILevel.MinVddcPhases; + + if (pi->vddci_control != CISLANDS_VOLTAGE_CONTROL_NONE) { + if (pi->acpi_vddci) + table->MemoryACPILevel.MinVddci = + cpu_to_be32(pi->acpi_vddci * VOLTAGE_SCALE); + else + table->MemoryACPILevel.MinVddci = + cpu_to_be32(pi->min_vddci_in_pp_table * VOLTAGE_SCALE); + } + + if (ci_populate_mvdd_value(adev, 0, &voltage_level)) + table->MemoryACPILevel.MinMvdd = 0; + else + table->MemoryACPILevel.MinMvdd = + cpu_to_be32(voltage_level.Voltage * VOLTAGE_SCALE); + + mclk_pwrmgt_cntl |= MCLK_PWRMGT_CNTL__MRDCK0_RESET_MASK | + MCLK_PWRMGT_CNTL__MRDCK1_RESET_MASK; + mclk_pwrmgt_cntl &= ~(MCLK_PWRMGT_CNTL__MRDCK0_PDNB_MASK | + MCLK_PWRMGT_CNTL__MRDCK1_PDNB_MASK); + + dll_cntl &= ~(DLL_CNTL__MRDCK0_BYPASS_MASK | DLL_CNTL__MRDCK1_BYPASS_MASK); + + table->MemoryACPILevel.DllCntl = cpu_to_be32(dll_cntl); + table->MemoryACPILevel.MclkPwrmgtCntl = cpu_to_be32(mclk_pwrmgt_cntl); + table->MemoryACPILevel.MpllAdFuncCntl = + cpu_to_be32(pi->clock_registers.mpll_ad_func_cntl); + table->MemoryACPILevel.MpllDqFuncCntl = + cpu_to_be32(pi->clock_registers.mpll_dq_func_cntl); + table->MemoryACPILevel.MpllFuncCntl = + cpu_to_be32(pi->clock_registers.mpll_func_cntl); + table->MemoryACPILevel.MpllFuncCntl_1 = + cpu_to_be32(pi->clock_registers.mpll_func_cntl_1); + table->MemoryACPILevel.MpllFuncCntl_2 = + cpu_to_be32(pi->clock_registers.mpll_func_cntl_2); + table->MemoryACPILevel.MpllSs1 = cpu_to_be32(pi->clock_registers.mpll_ss1); + table->MemoryACPILevel.MpllSs2 = cpu_to_be32(pi->clock_registers.mpll_ss2); + + table->MemoryACPILevel.EnabledForThrottle = 0; + table->MemoryACPILevel.EnabledForActivity = 0; + table->MemoryACPILevel.UpH = 0; + table->MemoryACPILevel.DownH = 100; + table->MemoryACPILevel.VoltageDownH = 0; + table->MemoryACPILevel.ActivityLevel = + cpu_to_be16((u16)pi->mclk_activity_target); + + table->MemoryACPILevel.StutterEnable = false; + table->MemoryACPILevel.StrobeEnable = false; + table->MemoryACPILevel.EdcReadEnable = false; + table->MemoryACPILevel.EdcWriteEnable = false; + table->MemoryACPILevel.RttEnable = false; + + return 0; +} + + +static int ci_enable_ulv(struct amdgpu_device *adev, bool enable) +{ + struct ci_power_info *pi = ci_get_pi(adev); + struct ci_ulv_parm *ulv = &pi->ulv; + + if (ulv->supported) { + if (enable) + return (amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_EnableULV) == PPSMC_Result_OK) ? + 0 : -EINVAL; + else + return (amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_DisableULV) == PPSMC_Result_OK) ? + 0 : -EINVAL; + } + + return 0; +} + +static int ci_populate_ulv_level(struct amdgpu_device *adev, + SMU7_Discrete_Ulv *state) +{ + struct ci_power_info *pi = ci_get_pi(adev); + u16 ulv_voltage = adev->pm.dpm.backbias_response_time; + + state->CcPwrDynRm = 0; + state->CcPwrDynRm1 = 0; + + if (ulv_voltage == 0) { + pi->ulv.supported = false; + return 0; + } + + if (pi->voltage_control != CISLANDS_VOLTAGE_CONTROL_BY_SVID2) { + if (ulv_voltage > adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[0].v) + state->VddcOffset = 0; + else + state->VddcOffset = + adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[0].v - ulv_voltage; + } else { + if (ulv_voltage > adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[0].v) + state->VddcOffsetVid = 0; + else + state->VddcOffsetVid = (u8) + ((adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[0].v - ulv_voltage) * + VOLTAGE_VID_OFFSET_SCALE2 / VOLTAGE_VID_OFFSET_SCALE1); + } + state->VddcPhase = pi->vddc_phase_shed_control ? 0 : 1; + + state->CcPwrDynRm = cpu_to_be32(state->CcPwrDynRm); + state->CcPwrDynRm1 = cpu_to_be32(state->CcPwrDynRm1); + state->VddcOffset = cpu_to_be16(state->VddcOffset); + + return 0; +} + +static int ci_calculate_sclk_params(struct amdgpu_device *adev, + u32 engine_clock, + SMU7_Discrete_GraphicsLevel *sclk) +{ + struct ci_power_info *pi = ci_get_pi(adev); + struct atom_clock_dividers dividers; + u32 spll_func_cntl_3 = pi->clock_registers.cg_spll_func_cntl_3; + u32 spll_func_cntl_4 = pi->clock_registers.cg_spll_func_cntl_4; + u32 cg_spll_spread_spectrum = pi->clock_registers.cg_spll_spread_spectrum; + u32 cg_spll_spread_spectrum_2 = pi->clock_registers.cg_spll_spread_spectrum_2; + u32 reference_clock = adev->clock.spll.reference_freq; + u32 reference_divider; + u32 fbdiv; + int ret; + + ret = amdgpu_atombios_get_clock_dividers(adev, + COMPUTE_GPUCLK_INPUT_FLAG_SCLK, + engine_clock, false, ÷rs); + if (ret) + return ret; + + reference_divider = 1 + dividers.ref_div; + fbdiv = dividers.fb_div & 0x3FFFFFF; + + spll_func_cntl_3 &= ~CG_SPLL_FUNC_CNTL_3__SPLL_FB_DIV_MASK; + spll_func_cntl_3 |= (fbdiv << CG_SPLL_FUNC_CNTL_3__SPLL_FB_DIV__SHIFT); + spll_func_cntl_3 |= CG_SPLL_FUNC_CNTL_3__SPLL_DITHEN_MASK; + + if (pi->caps_sclk_ss_support) { + struct amdgpu_atom_ss ss; + u32 vco_freq = engine_clock * dividers.post_div; + + if (amdgpu_atombios_get_asic_ss_info(adev, &ss, + ASIC_INTERNAL_ENGINE_SS, vco_freq)) { + u32 clk_s = reference_clock * 5 / (reference_divider * ss.rate); + u32 clk_v = 4 * ss.percentage * fbdiv / (clk_s * 10000); + + cg_spll_spread_spectrum &= ~(CG_SPLL_SPREAD_SPECTRUM__CLKS_MASK | CG_SPLL_SPREAD_SPECTRUM__SSEN_MASK); + cg_spll_spread_spectrum |= (clk_s << CG_SPLL_SPREAD_SPECTRUM__CLKS__SHIFT); + cg_spll_spread_spectrum |= (1 << CG_SPLL_SPREAD_SPECTRUM__SSEN__SHIFT); + + cg_spll_spread_spectrum_2 &= ~CG_SPLL_SPREAD_SPECTRUM_2__CLKV_MASK; + cg_spll_spread_spectrum_2 |= (clk_v << CG_SPLL_SPREAD_SPECTRUM_2__CLKV__SHIFT); + } + } + + sclk->SclkFrequency = engine_clock; + sclk->CgSpllFuncCntl3 = spll_func_cntl_3; + sclk->CgSpllFuncCntl4 = spll_func_cntl_4; + sclk->SpllSpreadSpectrum = cg_spll_spread_spectrum; + sclk->SpllSpreadSpectrum2 = cg_spll_spread_spectrum_2; + sclk->SclkDid = (u8)dividers.post_divider; + + return 0; +} + +static int ci_populate_single_graphic_level(struct amdgpu_device *adev, + u32 engine_clock, + u16 sclk_activity_level_t, + SMU7_Discrete_GraphicsLevel *graphic_level) +{ + struct ci_power_info *pi = ci_get_pi(adev); + int ret; + + ret = ci_calculate_sclk_params(adev, engine_clock, graphic_level); + if (ret) + return ret; + + ret = ci_get_dependency_volt_by_clk(adev, + &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk, + engine_clock, &graphic_level->MinVddc); + if (ret) + return ret; + + graphic_level->SclkFrequency = engine_clock; + + graphic_level->Flags = 0; + graphic_level->MinVddcPhases = 1; + + if (pi->vddc_phase_shed_control) + ci_populate_phase_value_based_on_sclk(adev, + &adev->pm.dpm.dyn_state.phase_shedding_limits_table, + engine_clock, + &graphic_level->MinVddcPhases); + + graphic_level->ActivityLevel = sclk_activity_level_t; + + graphic_level->CcPwrDynRm = 0; + graphic_level->CcPwrDynRm1 = 0; + graphic_level->EnabledForThrottle = 1; + graphic_level->UpH = 0; + graphic_level->DownH = 0; + graphic_level->VoltageDownH = 0; + graphic_level->PowerThrottle = 0; + + if (pi->caps_sclk_ds) + graphic_level->DeepSleepDivId = ci_get_sleep_divider_id_from_clock(adev, + engine_clock, + CISLAND_MINIMUM_ENGINE_CLOCK); + + graphic_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW; + + graphic_level->Flags = cpu_to_be32(graphic_level->Flags); + graphic_level->MinVddc = cpu_to_be32(graphic_level->MinVddc * VOLTAGE_SCALE); + graphic_level->MinVddcPhases = cpu_to_be32(graphic_level->MinVddcPhases); + graphic_level->SclkFrequency = cpu_to_be32(graphic_level->SclkFrequency); + graphic_level->ActivityLevel = cpu_to_be16(graphic_level->ActivityLevel); + graphic_level->CgSpllFuncCntl3 = cpu_to_be32(graphic_level->CgSpllFuncCntl3); + graphic_level->CgSpllFuncCntl4 = cpu_to_be32(graphic_level->CgSpllFuncCntl4); + graphic_level->SpllSpreadSpectrum = cpu_to_be32(graphic_level->SpllSpreadSpectrum); + graphic_level->SpllSpreadSpectrum2 = cpu_to_be32(graphic_level->SpllSpreadSpectrum2); + graphic_level->CcPwrDynRm = cpu_to_be32(graphic_level->CcPwrDynRm); + graphic_level->CcPwrDynRm1 = cpu_to_be32(graphic_level->CcPwrDynRm1); + graphic_level->EnabledForActivity = 1; + + return 0; +} + +static int ci_populate_all_graphic_levels(struct amdgpu_device *adev) +{ + struct ci_power_info *pi = ci_get_pi(adev); + struct ci_dpm_table *dpm_table = &pi->dpm_table; + u32 level_array_address = pi->dpm_table_start + + offsetof(SMU7_Discrete_DpmTable, GraphicsLevel); + u32 level_array_size = sizeof(SMU7_Discrete_GraphicsLevel) * + SMU7_MAX_LEVELS_GRAPHICS; + SMU7_Discrete_GraphicsLevel *levels = pi->smc_state_table.GraphicsLevel; + u32 i, ret; + + memset(levels, 0, level_array_size); + + for (i = 0; i < dpm_table->sclk_table.count; i++) { + ret = ci_populate_single_graphic_level(adev, + dpm_table->sclk_table.dpm_levels[i].value, + (u16)pi->activity_target[i], + &pi->smc_state_table.GraphicsLevel[i]); + if (ret) + return ret; + if (i > 1) + pi->smc_state_table.GraphicsLevel[i].DeepSleepDivId = 0; + if (i == (dpm_table->sclk_table.count - 1)) + pi->smc_state_table.GraphicsLevel[i].DisplayWatermark = + PPSMC_DISPLAY_WATERMARK_HIGH; + } + + pi->smc_state_table.GraphicsDpmLevelCount = (u8)dpm_table->sclk_table.count; + pi->dpm_level_enable_mask.sclk_dpm_enable_mask = + ci_get_dpm_level_enable_mask_value(&dpm_table->sclk_table); + + ret = amdgpu_ci_copy_bytes_to_smc(adev, level_array_address, + (u8 *)levels, level_array_size, + pi->sram_end); + if (ret) + return ret; + + return 0; +} + +static int ci_populate_ulv_state(struct amdgpu_device *adev, + SMU7_Discrete_Ulv *ulv_level) +{ + return ci_populate_ulv_level(adev, ulv_level); +} + +static int ci_populate_all_memory_levels(struct amdgpu_device *adev) +{ + struct ci_power_info *pi = ci_get_pi(adev); + struct ci_dpm_table *dpm_table = &pi->dpm_table; + u32 level_array_address = pi->dpm_table_start + + offsetof(SMU7_Discrete_DpmTable, MemoryLevel); + u32 level_array_size = sizeof(SMU7_Discrete_MemoryLevel) * + SMU7_MAX_LEVELS_MEMORY; + SMU7_Discrete_MemoryLevel *levels = pi->smc_state_table.MemoryLevel; + u32 i, ret; + + memset(levels, 0, level_array_size); + + for (i = 0; i < dpm_table->mclk_table.count; i++) { + if (dpm_table->mclk_table.dpm_levels[i].value == 0) + return -EINVAL; + ret = ci_populate_single_memory_level(adev, + dpm_table->mclk_table.dpm_levels[i].value, + &pi->smc_state_table.MemoryLevel[i]); + if (ret) + return ret; + } + + if ((dpm_table->mclk_table.count >= 2) && + ((adev->pdev->device == 0x67B0) || (adev->pdev->device == 0x67B1))) { + pi->smc_state_table.MemoryLevel[1].MinVddc = + pi->smc_state_table.MemoryLevel[0].MinVddc; + pi->smc_state_table.MemoryLevel[1].MinVddcPhases = + pi->smc_state_table.MemoryLevel[0].MinVddcPhases; + } + + pi->smc_state_table.MemoryLevel[0].ActivityLevel = cpu_to_be16(0x1F); + + pi->smc_state_table.MemoryDpmLevelCount = (u8)dpm_table->mclk_table.count; + pi->dpm_level_enable_mask.mclk_dpm_enable_mask = + ci_get_dpm_level_enable_mask_value(&dpm_table->mclk_table); + + pi->smc_state_table.MemoryLevel[dpm_table->mclk_table.count - 1].DisplayWatermark = + PPSMC_DISPLAY_WATERMARK_HIGH; + + ret = amdgpu_ci_copy_bytes_to_smc(adev, level_array_address, + (u8 *)levels, level_array_size, + pi->sram_end); + if (ret) + return ret; + + return 0; +} + +static void ci_reset_single_dpm_table(struct amdgpu_device *adev, + struct ci_single_dpm_table* dpm_table, + u32 count) +{ + u32 i; + + dpm_table->count = count; + for (i = 0; i < MAX_REGULAR_DPM_NUMBER; i++) + dpm_table->dpm_levels[i].enabled = false; +} + +static void ci_setup_pcie_table_entry(struct ci_single_dpm_table* dpm_table, + u32 index, u32 pcie_gen, u32 pcie_lanes) +{ + dpm_table->dpm_levels[index].value = pcie_gen; + dpm_table->dpm_levels[index].param1 = pcie_lanes; + dpm_table->dpm_levels[index].enabled = true; +} + +static int ci_setup_default_pcie_tables(struct amdgpu_device *adev) +{ + struct ci_power_info *pi = ci_get_pi(adev); + + if (!pi->use_pcie_performance_levels && !pi->use_pcie_powersaving_levels) + return -EINVAL; + + if (pi->use_pcie_performance_levels && !pi->use_pcie_powersaving_levels) { + pi->pcie_gen_powersaving = pi->pcie_gen_performance; + pi->pcie_lane_powersaving = pi->pcie_lane_performance; + } else if (!pi->use_pcie_performance_levels && pi->use_pcie_powersaving_levels) { + pi->pcie_gen_performance = pi->pcie_gen_powersaving; + pi->pcie_lane_performance = pi->pcie_lane_powersaving; + } + + ci_reset_single_dpm_table(adev, + &pi->dpm_table.pcie_speed_table, + SMU7_MAX_LEVELS_LINK); + + if (adev->asic_type == CHIP_BONAIRE) + ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 0, + pi->pcie_gen_powersaving.min, + pi->pcie_lane_powersaving.max); + else + ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 0, + pi->pcie_gen_powersaving.min, + pi->pcie_lane_powersaving.min); + ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 1, + pi->pcie_gen_performance.min, + pi->pcie_lane_performance.min); + ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 2, + pi->pcie_gen_powersaving.min, + pi->pcie_lane_powersaving.max); + ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 3, + pi->pcie_gen_performance.min, + pi->pcie_lane_performance.max); + ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 4, + pi->pcie_gen_powersaving.max, + pi->pcie_lane_powersaving.max); + ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 5, + pi->pcie_gen_performance.max, + pi->pcie_lane_performance.max); + + pi->dpm_table.pcie_speed_table.count = 6; + + return 0; +} + +static int ci_setup_default_dpm_tables(struct amdgpu_device *adev) +{ + struct ci_power_info *pi = ci_get_pi(adev); + struct amdgpu_clock_voltage_dependency_table *allowed_sclk_vddc_table = + &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk; + struct amdgpu_clock_voltage_dependency_table *allowed_mclk_table = + &adev->pm.dpm.dyn_state.vddc_dependency_on_mclk; + struct amdgpu_cac_leakage_table *std_voltage_table = + &adev->pm.dpm.dyn_state.cac_leakage_table; + u32 i; + + if (allowed_sclk_vddc_table == NULL) + return -EINVAL; + if (allowed_sclk_vddc_table->count < 1) + return -EINVAL; + if (allowed_mclk_table == NULL) + return -EINVAL; + if (allowed_mclk_table->count < 1) + return -EINVAL; + + memset(&pi->dpm_table, 0, sizeof(struct ci_dpm_table)); + + ci_reset_single_dpm_table(adev, + &pi->dpm_table.sclk_table, + SMU7_MAX_LEVELS_GRAPHICS); + ci_reset_single_dpm_table(adev, + &pi->dpm_table.mclk_table, + SMU7_MAX_LEVELS_MEMORY); + ci_reset_single_dpm_table(adev, + &pi->dpm_table.vddc_table, + SMU7_MAX_LEVELS_VDDC); + ci_reset_single_dpm_table(adev, + &pi->dpm_table.vddci_table, + SMU7_MAX_LEVELS_VDDCI); + ci_reset_single_dpm_table(adev, + &pi->dpm_table.mvdd_table, + SMU7_MAX_LEVELS_MVDD); + + pi->dpm_table.sclk_table.count = 0; + for (i = 0; i < allowed_sclk_vddc_table->count; i++) { + if ((i == 0) || + (pi->dpm_table.sclk_table.dpm_levels[pi->dpm_table.sclk_table.count-1].value != + allowed_sclk_vddc_table->entries[i].clk)) { + pi->dpm_table.sclk_table.dpm_levels[pi->dpm_table.sclk_table.count].value = + allowed_sclk_vddc_table->entries[i].clk; + pi->dpm_table.sclk_table.dpm_levels[pi->dpm_table.sclk_table.count].enabled = + (i == 0) ? true : false; + pi->dpm_table.sclk_table.count++; + } + } + + pi->dpm_table.mclk_table.count = 0; + for (i = 0; i < allowed_mclk_table->count; i++) { + if ((i == 0) || + (pi->dpm_table.mclk_table.dpm_levels[pi->dpm_table.mclk_table.count-1].value != + allowed_mclk_table->entries[i].clk)) { + pi->dpm_table.mclk_table.dpm_levels[pi->dpm_table.mclk_table.count].value = + allowed_mclk_table->entries[i].clk; + pi->dpm_table.mclk_table.dpm_levels[pi->dpm_table.mclk_table.count].enabled = + (i == 0) ? true : false; + pi->dpm_table.mclk_table.count++; + } + } + + for (i = 0; i < allowed_sclk_vddc_table->count; i++) { + pi->dpm_table.vddc_table.dpm_levels[i].value = + allowed_sclk_vddc_table->entries[i].v; + pi->dpm_table.vddc_table.dpm_levels[i].param1 = + std_voltage_table->entries[i].leakage; + pi->dpm_table.vddc_table.dpm_levels[i].enabled = true; + } + pi->dpm_table.vddc_table.count = allowed_sclk_vddc_table->count; + + allowed_mclk_table = &adev->pm.dpm.dyn_state.vddci_dependency_on_mclk; + if (allowed_mclk_table) { + for (i = 0; i < allowed_mclk_table->count; i++) { + pi->dpm_table.vddci_table.dpm_levels[i].value = + allowed_mclk_table->entries[i].v; + pi->dpm_table.vddci_table.dpm_levels[i].enabled = true; + } + pi->dpm_table.vddci_table.count = allowed_mclk_table->count; + } + + allowed_mclk_table = &adev->pm.dpm.dyn_state.mvdd_dependency_on_mclk; + if (allowed_mclk_table) { + for (i = 0; i < allowed_mclk_table->count; i++) { + pi->dpm_table.mvdd_table.dpm_levels[i].value = + allowed_mclk_table->entries[i].v; + pi->dpm_table.mvdd_table.dpm_levels[i].enabled = true; + } + pi->dpm_table.mvdd_table.count = allowed_mclk_table->count; + } + + ci_setup_default_pcie_tables(adev); + + return 0; +} + +static int ci_find_boot_level(struct ci_single_dpm_table *table, + u32 value, u32 *boot_level) +{ + u32 i; + int ret = -EINVAL; + + for(i = 0; i < table->count; i++) { + if (value == table->dpm_levels[i].value) { + *boot_level = i; + ret = 0; + } + } + + return ret; +} + +static int ci_init_smc_table(struct amdgpu_device *adev) +{ + struct ci_power_info *pi = ci_get_pi(adev); + struct ci_ulv_parm *ulv = &pi->ulv; + struct amdgpu_ps *amdgpu_boot_state = adev->pm.dpm.boot_ps; + SMU7_Discrete_DpmTable *table = &pi->smc_state_table; + int ret; + + ret = ci_setup_default_dpm_tables(adev); + if (ret) + return ret; + + if (pi->voltage_control != CISLANDS_VOLTAGE_CONTROL_NONE) + ci_populate_smc_voltage_tables(adev, table); + + ci_init_fps_limits(adev); + + if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_HARDWAREDC) + table->SystemFlags |= PPSMC_SYSTEMFLAG_GPIO_DC; + + if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_STEPVDDC) + table->SystemFlags |= PPSMC_SYSTEMFLAG_STEPVDDC; + + if (adev->mc.is_gddr5) + table->SystemFlags |= PPSMC_SYSTEMFLAG_GDDR5; + + if (ulv->supported) { + ret = ci_populate_ulv_state(adev, &pi->smc_state_table.Ulv); + if (ret) + return ret; + WREG32_SMC(ixCG_ULV_PARAMETER, ulv->cg_ulv_parameter); + } + + ret = ci_populate_all_graphic_levels(adev); + if (ret) + return ret; + + ret = ci_populate_all_memory_levels(adev); + if (ret) + return ret; + + ci_populate_smc_link_level(adev, table); + + ret = ci_populate_smc_acpi_level(adev, table); + if (ret) + return ret; + + ret = ci_populate_smc_vce_level(adev, table); + if (ret) + return ret; + + ret = ci_populate_smc_acp_level(adev, table); + if (ret) + return ret; + + ret = ci_populate_smc_samu_level(adev, table); + if (ret) + return ret; + + ret = ci_do_program_memory_timing_parameters(adev); + if (ret) + return ret; + + ret = ci_populate_smc_uvd_level(adev, table); + if (ret) + return ret; + + table->UvdBootLevel = 0; + table->VceBootLevel = 0; + table->AcpBootLevel = 0; + table->SamuBootLevel = 0; + table->GraphicsBootLevel = 0; + table->MemoryBootLevel = 0; + + ret = ci_find_boot_level(&pi->dpm_table.sclk_table, + pi->vbios_boot_state.sclk_bootup_value, + (u32 *)&pi->smc_state_table.GraphicsBootLevel); + + ret = ci_find_boot_level(&pi->dpm_table.mclk_table, + pi->vbios_boot_state.mclk_bootup_value, + (u32 *)&pi->smc_state_table.MemoryBootLevel); + + table->BootVddc = pi->vbios_boot_state.vddc_bootup_value; + table->BootVddci = pi->vbios_boot_state.vddci_bootup_value; + table->BootMVdd = pi->vbios_boot_state.mvdd_bootup_value; + + ci_populate_smc_initial_state(adev, amdgpu_boot_state); + + ret = ci_populate_bapm_parameters_in_dpm_table(adev); + if (ret) + return ret; + + table->UVDInterval = 1; + table->VCEInterval = 1; + table->ACPInterval = 1; + table->SAMUInterval = 1; + table->GraphicsVoltageChangeEnable = 1; + table->GraphicsThermThrottleEnable = 1; + table->GraphicsInterval = 1; + table->VoltageInterval = 1; + table->ThermalInterval = 1; + table->TemperatureLimitHigh = (u16)((pi->thermal_temp_setting.temperature_high * + CISLANDS_Q88_FORMAT_CONVERSION_UNIT) / 1000); + table->TemperatureLimitLow = (u16)((pi->thermal_temp_setting.temperature_low * + CISLANDS_Q88_FORMAT_CONVERSION_UNIT) / 1000); + table->MemoryVoltageChangeEnable = 1; + table->MemoryInterval = 1; + table->VoltageResponseTime = 0; + table->VddcVddciDelta = 4000; + table->PhaseResponseTime = 0; + table->MemoryThermThrottleEnable = 1; + table->PCIeBootLinkLevel = pi->dpm_table.pcie_speed_table.count - 1; + table->PCIeGenInterval = 1; + if (pi->voltage_control == CISLANDS_VOLTAGE_CONTROL_BY_SVID2) + table->SVI2Enable = 1; + else + table->SVI2Enable = 0; + + table->ThermGpio = 17; + table->SclkStepSize = 0x4000; + + table->SystemFlags = cpu_to_be32(table->SystemFlags); + table->SmioMaskVddcVid = cpu_to_be32(table->SmioMaskVddcVid); + table->SmioMaskVddcPhase = cpu_to_be32(table->SmioMaskVddcPhase); + table->SmioMaskVddciVid = cpu_to_be32(table->SmioMaskVddciVid); + table->SmioMaskMvddVid = cpu_to_be32(table->SmioMaskMvddVid); + table->SclkStepSize = cpu_to_be32(table->SclkStepSize); + table->TemperatureLimitHigh = cpu_to_be16(table->TemperatureLimitHigh); + table->TemperatureLimitLow = cpu_to_be16(table->TemperatureLimitLow); + table->VddcVddciDelta = cpu_to_be16(table->VddcVddciDelta); + table->VoltageResponseTime = cpu_to_be16(table->VoltageResponseTime); + table->PhaseResponseTime = cpu_to_be16(table->PhaseResponseTime); + table->BootVddc = cpu_to_be16(table->BootVddc * VOLTAGE_SCALE); + table->BootVddci = cpu_to_be16(table->BootVddci * VOLTAGE_SCALE); + table->BootMVdd = cpu_to_be16(table->BootMVdd * VOLTAGE_SCALE); + + ret = amdgpu_ci_copy_bytes_to_smc(adev, + pi->dpm_table_start + + offsetof(SMU7_Discrete_DpmTable, SystemFlags), + (u8 *)&table->SystemFlags, + sizeof(SMU7_Discrete_DpmTable) - 3 * sizeof(SMU7_PIDController), + pi->sram_end); + if (ret) + return ret; + + return 0; +} + +static void ci_trim_single_dpm_states(struct amdgpu_device *adev, + struct ci_single_dpm_table *dpm_table, + u32 low_limit, u32 high_limit) +{ + u32 i; + + for (i = 0; i < dpm_table->count; i++) { + if ((dpm_table->dpm_levels[i].value < low_limit) || + (dpm_table->dpm_levels[i].value > high_limit)) + dpm_table->dpm_levels[i].enabled = false; + else + dpm_table->dpm_levels[i].enabled = true; + } +} + +static void ci_trim_pcie_dpm_states(struct amdgpu_device *adev, + u32 speed_low, u32 lanes_low, + u32 speed_high, u32 lanes_high) +{ + struct ci_power_info *pi = ci_get_pi(adev); + struct ci_single_dpm_table *pcie_table = &pi->dpm_table.pcie_speed_table; + u32 i, j; + + for (i = 0; i < pcie_table->count; i++) { + if ((pcie_table->dpm_levels[i].value < speed_low) || + (pcie_table->dpm_levels[i].param1 < lanes_low) || + (pcie_table->dpm_levels[i].value > speed_high) || + (pcie_table->dpm_levels[i].param1 > lanes_high)) + pcie_table->dpm_levels[i].enabled = false; + else + pcie_table->dpm_levels[i].enabled = true; + } + + for (i = 0; i < pcie_table->count; i++) { + if (pcie_table->dpm_levels[i].enabled) { + for (j = i + 1; j < pcie_table->count; j++) { + if (pcie_table->dpm_levels[j].enabled) { + if ((pcie_table->dpm_levels[i].value == pcie_table->dpm_levels[j].value) && + (pcie_table->dpm_levels[i].param1 == pcie_table->dpm_levels[j].param1)) + pcie_table->dpm_levels[j].enabled = false; + } + } + } + } +} + +static int ci_trim_dpm_states(struct amdgpu_device *adev, + struct amdgpu_ps *amdgpu_state) +{ + struct ci_ps *state = ci_get_ps(amdgpu_state); + struct ci_power_info *pi = ci_get_pi(adev); + u32 high_limit_count; + + if (state->performance_level_count < 1) + return -EINVAL; + + if (state->performance_level_count == 1) + high_limit_count = 0; + else + high_limit_count = 1; + + ci_trim_single_dpm_states(adev, + &pi->dpm_table.sclk_table, + state->performance_levels[0].sclk, + state->performance_levels[high_limit_count].sclk); + + ci_trim_single_dpm_states(adev, + &pi->dpm_table.mclk_table, + state->performance_levels[0].mclk, + state->performance_levels[high_limit_count].mclk); + + ci_trim_pcie_dpm_states(adev, + state->performance_levels[0].pcie_gen, + state->performance_levels[0].pcie_lane, + state->performance_levels[high_limit_count].pcie_gen, + state->performance_levels[high_limit_count].pcie_lane); + + return 0; +} + +static int ci_apply_disp_minimum_voltage_request(struct amdgpu_device *adev) +{ + struct amdgpu_clock_voltage_dependency_table *disp_voltage_table = + &adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk; + struct amdgpu_clock_voltage_dependency_table *vddc_table = + &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk; + u32 requested_voltage = 0; + u32 i; + + if (disp_voltage_table == NULL) + return -EINVAL; + if (!disp_voltage_table->count) + return -EINVAL; + + for (i = 0; i < disp_voltage_table->count; i++) { + if (adev->clock.current_dispclk == disp_voltage_table->entries[i].clk) + requested_voltage = disp_voltage_table->entries[i].v; + } + + for (i = 0; i < vddc_table->count; i++) { + if (requested_voltage <= vddc_table->entries[i].v) { + requested_voltage = vddc_table->entries[i].v; + return (amdgpu_ci_send_msg_to_smc_with_parameter(adev, + PPSMC_MSG_VddC_Request, + requested_voltage * VOLTAGE_SCALE) == PPSMC_Result_OK) ? + 0 : -EINVAL; + } + } + + return -EINVAL; +} + +static int ci_upload_dpm_level_enable_mask(struct amdgpu_device *adev) +{ + struct ci_power_info *pi = ci_get_pi(adev); + PPSMC_Result result; + + ci_apply_disp_minimum_voltage_request(adev); + + if (!pi->sclk_dpm_key_disabled) { + if (pi->dpm_level_enable_mask.sclk_dpm_enable_mask) { + result = amdgpu_ci_send_msg_to_smc_with_parameter(adev, + PPSMC_MSG_SCLKDPM_SetEnabledMask, + pi->dpm_level_enable_mask.sclk_dpm_enable_mask); + if (result != PPSMC_Result_OK) + return -EINVAL; + } + } + + if (!pi->mclk_dpm_key_disabled) { + if (pi->dpm_level_enable_mask.mclk_dpm_enable_mask) { + result = amdgpu_ci_send_msg_to_smc_with_parameter(adev, + PPSMC_MSG_MCLKDPM_SetEnabledMask, + pi->dpm_level_enable_mask.mclk_dpm_enable_mask); + if (result != PPSMC_Result_OK) + return -EINVAL; + } + } + +#if 0 + if (!pi->pcie_dpm_key_disabled) { + if (pi->dpm_level_enable_mask.pcie_dpm_enable_mask) { + result = amdgpu_ci_send_msg_to_smc_with_parameter(adev, + PPSMC_MSG_PCIeDPM_SetEnabledMask, + pi->dpm_level_enable_mask.pcie_dpm_enable_mask); + if (result != PPSMC_Result_OK) + return -EINVAL; + } + } +#endif + + return 0; +} + +static void ci_find_dpm_states_clocks_in_dpm_table(struct amdgpu_device *adev, + struct amdgpu_ps *amdgpu_state) +{ + struct ci_power_info *pi = ci_get_pi(adev); + struct ci_ps *state = ci_get_ps(amdgpu_state); + struct ci_single_dpm_table *sclk_table = &pi->dpm_table.sclk_table; + u32 sclk = state->performance_levels[state->performance_level_count-1].sclk; + struct ci_single_dpm_table *mclk_table = &pi->dpm_table.mclk_table; + u32 mclk = state->performance_levels[state->performance_level_count-1].mclk; + u32 i; + + pi->need_update_smu7_dpm_table = 0; + + for (i = 0; i < sclk_table->count; i++) { + if (sclk == sclk_table->dpm_levels[i].value) + break; + } + + if (i >= sclk_table->count) { + pi->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_SCLK; + } else { + /* XXX check display min clock requirements */ + if (CISLAND_MINIMUM_ENGINE_CLOCK != CISLAND_MINIMUM_ENGINE_CLOCK) + pi->need_update_smu7_dpm_table |= DPMTABLE_UPDATE_SCLK; + } + + for (i = 0; i < mclk_table->count; i++) { + if (mclk == mclk_table->dpm_levels[i].value) + break; + } + + if (i >= mclk_table->count) + pi->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_MCLK; + + if (adev->pm.dpm.current_active_crtc_count != + adev->pm.dpm.new_active_crtc_count) + pi->need_update_smu7_dpm_table |= DPMTABLE_UPDATE_MCLK; +} + +static int ci_populate_and_upload_sclk_mclk_dpm_levels(struct amdgpu_device *adev, + struct amdgpu_ps *amdgpu_state) +{ + struct ci_power_info *pi = ci_get_pi(adev); + struct ci_ps *state = ci_get_ps(amdgpu_state); + u32 sclk = state->performance_levels[state->performance_level_count-1].sclk; + u32 mclk = state->performance_levels[state->performance_level_count-1].mclk; + struct ci_dpm_table *dpm_table = &pi->dpm_table; + int ret; + + if (!pi->need_update_smu7_dpm_table) + return 0; + + if (pi->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_SCLK) + dpm_table->sclk_table.dpm_levels[dpm_table->sclk_table.count-1].value = sclk; + + if (pi->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK) + dpm_table->mclk_table.dpm_levels[dpm_table->mclk_table.count-1].value = mclk; + + if (pi->need_update_smu7_dpm_table & (DPMTABLE_OD_UPDATE_SCLK | DPMTABLE_UPDATE_SCLK)) { + ret = ci_populate_all_graphic_levels(adev); + if (ret) + return ret; + } + + if (pi->need_update_smu7_dpm_table & (DPMTABLE_OD_UPDATE_MCLK | DPMTABLE_UPDATE_MCLK)) { + ret = ci_populate_all_memory_levels(adev); + if (ret) + return ret; + } + + return 0; +} + +static int ci_enable_uvd_dpm(struct amdgpu_device *adev, bool enable) +{ + struct ci_power_info *pi = ci_get_pi(adev); + const struct amdgpu_clock_and_voltage_limits *max_limits; + int i; + + if (adev->pm.dpm.ac_power) + max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_ac; + else + max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_dc; + + if (enable) { + pi->dpm_level_enable_mask.uvd_dpm_enable_mask = 0; + + for (i = adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count - 1; i >= 0; i--) { + if (adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].v <= max_limits->vddc) { + pi->dpm_level_enable_mask.uvd_dpm_enable_mask |= 1 << i; + + if (!pi->caps_uvd_dpm) + break; + } + } + + amdgpu_ci_send_msg_to_smc_with_parameter(adev, + PPSMC_MSG_UVDDPM_SetEnabledMask, + pi->dpm_level_enable_mask.uvd_dpm_enable_mask); + + if (pi->last_mclk_dpm_enable_mask & 0x1) { + pi->uvd_enabled = true; + pi->dpm_level_enable_mask.mclk_dpm_enable_mask &= 0xFFFFFFFE; + amdgpu_ci_send_msg_to_smc_with_parameter(adev, + PPSMC_MSG_MCLKDPM_SetEnabledMask, + pi->dpm_level_enable_mask.mclk_dpm_enable_mask); + } + } else { + if (pi->last_mclk_dpm_enable_mask & 0x1) { + pi->uvd_enabled = false; + pi->dpm_level_enable_mask.mclk_dpm_enable_mask |= 1; + amdgpu_ci_send_msg_to_smc_with_parameter(adev, + PPSMC_MSG_MCLKDPM_SetEnabledMask, + pi->dpm_level_enable_mask.mclk_dpm_enable_mask); + } + } + + return (amdgpu_ci_send_msg_to_smc(adev, enable ? + PPSMC_MSG_UVDDPM_Enable : PPSMC_MSG_UVDDPM_Disable) == PPSMC_Result_OK) ? + 0 : -EINVAL; +} + +static int ci_enable_vce_dpm(struct amdgpu_device *adev, bool enable) +{ + struct ci_power_info *pi = ci_get_pi(adev); + const struct amdgpu_clock_and_voltage_limits *max_limits; + int i; + + if (adev->pm.dpm.ac_power) + max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_ac; + else + max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_dc; + + if (enable) { + pi->dpm_level_enable_mask.vce_dpm_enable_mask = 0; + for (i = adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.count - 1; i >= 0; i--) { + if (adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].v <= max_limits->vddc) { + pi->dpm_level_enable_mask.vce_dpm_enable_mask |= 1 << i; + + if (!pi->caps_vce_dpm) + break; + } + } + + amdgpu_ci_send_msg_to_smc_with_parameter(adev, + PPSMC_MSG_VCEDPM_SetEnabledMask, + pi->dpm_level_enable_mask.vce_dpm_enable_mask); + } + + return (amdgpu_ci_send_msg_to_smc(adev, enable ? + PPSMC_MSG_VCEDPM_Enable : PPSMC_MSG_VCEDPM_Disable) == PPSMC_Result_OK) ? + 0 : -EINVAL; +} + +#if 0 +static int ci_enable_samu_dpm(struct amdgpu_device *adev, bool enable) +{ + struct ci_power_info *pi = ci_get_pi(adev); + const struct amdgpu_clock_and_voltage_limits *max_limits; + int i; + + if (adev->pm.dpm.ac_power) + max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_ac; + else + max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_dc; + + if (enable) { + pi->dpm_level_enable_mask.samu_dpm_enable_mask = 0; + for (i = adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.count - 1; i >= 0; i--) { + if (adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[i].v <= max_limits->vddc) { + pi->dpm_level_enable_mask.samu_dpm_enable_mask |= 1 << i; + + if (!pi->caps_samu_dpm) + break; + } + } + + amdgpu_ci_send_msg_to_smc_with_parameter(adev, + PPSMC_MSG_SAMUDPM_SetEnabledMask, + pi->dpm_level_enable_mask.samu_dpm_enable_mask); + } + return (amdgpu_ci_send_msg_to_smc(adev, enable ? + PPSMC_MSG_SAMUDPM_Enable : PPSMC_MSG_SAMUDPM_Disable) == PPSMC_Result_OK) ? + 0 : -EINVAL; +} + +static int ci_enable_acp_dpm(struct amdgpu_device *adev, bool enable) +{ + struct ci_power_info *pi = ci_get_pi(adev); + const struct amdgpu_clock_and_voltage_limits *max_limits; + int i; + + if (adev->pm.dpm.ac_power) + max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_ac; + else + max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_dc; + + if (enable) { + pi->dpm_level_enable_mask.acp_dpm_enable_mask = 0; + for (i = adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.count - 1; i >= 0; i--) { + if (adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[i].v <= max_limits->vddc) { + pi->dpm_level_enable_mask.acp_dpm_enable_mask |= 1 << i; + + if (!pi->caps_acp_dpm) + break; + } + } + + amdgpu_ci_send_msg_to_smc_with_parameter(adev, + PPSMC_MSG_ACPDPM_SetEnabledMask, + pi->dpm_level_enable_mask.acp_dpm_enable_mask); + } + + return (amdgpu_ci_send_msg_to_smc(adev, enable ? + PPSMC_MSG_ACPDPM_Enable : PPSMC_MSG_ACPDPM_Disable) == PPSMC_Result_OK) ? + 0 : -EINVAL; +} +#endif + +static int ci_update_uvd_dpm(struct amdgpu_device *adev, bool gate) +{ + struct ci_power_info *pi = ci_get_pi(adev); + u32 tmp; + + if (!gate) { + if (pi->caps_uvd_dpm || + (adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count <= 0)) + pi->smc_state_table.UvdBootLevel = 0; + else + pi->smc_state_table.UvdBootLevel = + adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count - 1; + + tmp = RREG32_SMC(ixDPM_TABLE_475); + tmp &= ~DPM_TABLE_475__UvdBootLevel_MASK; + tmp |= (pi->smc_state_table.UvdBootLevel << DPM_TABLE_475__UvdBootLevel__SHIFT); + WREG32_SMC(ixDPM_TABLE_475, tmp); + } + + return ci_enable_uvd_dpm(adev, !gate); +} + +static u8 ci_get_vce_boot_level(struct amdgpu_device *adev) +{ + u8 i; + u32 min_evclk = 30000; /* ??? */ + struct amdgpu_vce_clock_voltage_dependency_table *table = + &adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table; + + for (i = 0; i < table->count; i++) { + if (table->entries[i].evclk >= min_evclk) + return i; + } + + return table->count - 1; +} + +static int ci_update_vce_dpm(struct amdgpu_device *adev, + struct amdgpu_ps *amdgpu_new_state, + struct amdgpu_ps *amdgpu_current_state) +{ + struct ci_power_info *pi = ci_get_pi(adev); + int ret = 0; + u32 tmp; + + if (amdgpu_current_state->evclk != amdgpu_new_state->evclk) { + if (amdgpu_new_state->evclk) { + /* turn the clocks on when encoding */ + ret = amdgpu_set_clockgating_state(adev, AMDGPU_IP_BLOCK_TYPE_VCE, + AMDGPU_CG_STATE_UNGATE); + if (ret) + return ret; + + pi->smc_state_table.VceBootLevel = ci_get_vce_boot_level(adev); + tmp = RREG32_SMC(ixDPM_TABLE_475); + tmp &= ~DPM_TABLE_475__VceBootLevel_MASK; + tmp |= (pi->smc_state_table.VceBootLevel << DPM_TABLE_475__VceBootLevel__SHIFT); + WREG32_SMC(ixDPM_TABLE_475, tmp); + + ret = ci_enable_vce_dpm(adev, true); + } else { + /* turn the clocks off when not encoding */ + ret = amdgpu_set_clockgating_state(adev, AMDGPU_IP_BLOCK_TYPE_VCE, + AMDGPU_CG_STATE_GATE); + if (ret) + return ret; + + ret = ci_enable_vce_dpm(adev, false); + } + } + return ret; +} + +#if 0 +static int ci_update_samu_dpm(struct amdgpu_device *adev, bool gate) +{ + return ci_enable_samu_dpm(adev, gate); +} + +static int ci_update_acp_dpm(struct amdgpu_device *adev, bool gate) +{ + struct ci_power_info *pi = ci_get_pi(adev); + u32 tmp; + + if (!gate) { + pi->smc_state_table.AcpBootLevel = 0; + + tmp = RREG32_SMC(ixDPM_TABLE_475); + tmp &= ~AcpBootLevel_MASK; + tmp |= AcpBootLevel(pi->smc_state_table.AcpBootLevel); + WREG32_SMC(ixDPM_TABLE_475, tmp); + } + + return ci_enable_acp_dpm(adev, !gate); +} +#endif + +static int ci_generate_dpm_level_enable_mask(struct amdgpu_device *adev, + struct amdgpu_ps *amdgpu_state) +{ + struct ci_power_info *pi = ci_get_pi(adev); + int ret; + + ret = ci_trim_dpm_states(adev, amdgpu_state); + if (ret) + return ret; + + pi->dpm_level_enable_mask.sclk_dpm_enable_mask = + ci_get_dpm_level_enable_mask_value(&pi->dpm_table.sclk_table); + pi->dpm_level_enable_mask.mclk_dpm_enable_mask = + ci_get_dpm_level_enable_mask_value(&pi->dpm_table.mclk_table); + pi->last_mclk_dpm_enable_mask = + pi->dpm_level_enable_mask.mclk_dpm_enable_mask; + if (pi->uvd_enabled) { + if (pi->dpm_level_enable_mask.mclk_dpm_enable_mask & 1) + pi->dpm_level_enable_mask.mclk_dpm_enable_mask &= 0xFFFFFFFE; + } + pi->dpm_level_enable_mask.pcie_dpm_enable_mask = + ci_get_dpm_level_enable_mask_value(&pi->dpm_table.pcie_speed_table); + + return 0; +} + +static u32 ci_get_lowest_enabled_level(struct amdgpu_device *adev, + u32 level_mask) +{ + u32 level = 0; + + while ((level_mask & (1 << level)) == 0) + level++; + + return level; +} + + +static int ci_dpm_force_performance_level(struct amdgpu_device *adev, + enum amdgpu_dpm_forced_level level) +{ + struct ci_power_info *pi = ci_get_pi(adev); + u32 tmp, levels, i; + int ret; + + if (level == AMDGPU_DPM_FORCED_LEVEL_HIGH) { + if ((!pi->pcie_dpm_key_disabled) && + pi->dpm_level_enable_mask.pcie_dpm_enable_mask) { + levels = 0; + tmp = pi->dpm_level_enable_mask.pcie_dpm_enable_mask; + while (tmp >>= 1) + levels++; + if (levels) { + ret = ci_dpm_force_state_pcie(adev, level); + if (ret) + return ret; + for (i = 0; i < adev->usec_timeout; i++) { + tmp = (RREG32_SMC(ixTARGET_AND_CURRENT_PROFILE_INDEX_1) & + TARGET_AND_CURRENT_PROFILE_INDEX_1__CURR_PCIE_INDEX_MASK) >> + TARGET_AND_CURRENT_PROFILE_INDEX_1__CURR_PCIE_INDEX__SHIFT; + if (tmp == levels) + break; + udelay(1); + } + } + } + if ((!pi->sclk_dpm_key_disabled) && + pi->dpm_level_enable_mask.sclk_dpm_enable_mask) { + levels = 0; + tmp = pi->dpm_level_enable_mask.sclk_dpm_enable_mask; + while (tmp >>= 1) + levels++; + if (levels) { + ret = ci_dpm_force_state_sclk(adev, levels); + if (ret) + return ret; + for (i = 0; i < adev->usec_timeout; i++) { + tmp = (RREG32_SMC(ixTARGET_AND_CURRENT_PROFILE_INDEX) & + TARGET_AND_CURRENT_PROFILE_INDEX__CURR_SCLK_INDEX_MASK) >> + TARGET_AND_CURRENT_PROFILE_INDEX__CURR_SCLK_INDEX__SHIFT; + if (tmp == levels) + break; + udelay(1); + } + } + } + if ((!pi->mclk_dpm_key_disabled) && + pi->dpm_level_enable_mask.mclk_dpm_enable_mask) { + levels = 0; + tmp = pi->dpm_level_enable_mask.mclk_dpm_enable_mask; + while (tmp >>= 1) + levels++; + if (levels) { + ret = ci_dpm_force_state_mclk(adev, levels); + if (ret) + return ret; + for (i = 0; i < adev->usec_timeout; i++) { + tmp = (RREG32_SMC(ixTARGET_AND_CURRENT_PROFILE_INDEX) & + TARGET_AND_CURRENT_PROFILE_INDEX__CURR_MCLK_INDEX_MASK) >> + TARGET_AND_CURRENT_PROFILE_INDEX__CURR_MCLK_INDEX__SHIFT; + if (tmp == levels) + break; + udelay(1); + } + } + } + if ((!pi->pcie_dpm_key_disabled) && + pi->dpm_level_enable_mask.pcie_dpm_enable_mask) { + levels = 0; + tmp = pi->dpm_level_enable_mask.pcie_dpm_enable_mask; + while (tmp >>= 1) + levels++; + if (levels) { + ret = ci_dpm_force_state_pcie(adev, level); + if (ret) + return ret; + for (i = 0; i < adev->usec_timeout; i++) { + tmp = (RREG32_SMC(ixTARGET_AND_CURRENT_PROFILE_INDEX_1) & + TARGET_AND_CURRENT_PROFILE_INDEX_1__CURR_PCIE_INDEX_MASK) >> + TARGET_AND_CURRENT_PROFILE_INDEX_1__CURR_PCIE_INDEX__SHIFT; + if (tmp == levels) + break; + udelay(1); + } + } + } + } else if (level == AMDGPU_DPM_FORCED_LEVEL_LOW) { + if ((!pi->sclk_dpm_key_disabled) && + pi->dpm_level_enable_mask.sclk_dpm_enable_mask) { + levels = ci_get_lowest_enabled_level(adev, + pi->dpm_level_enable_mask.sclk_dpm_enable_mask); + ret = ci_dpm_force_state_sclk(adev, levels); + if (ret) + return ret; + for (i = 0; i < adev->usec_timeout; i++) { + tmp = (RREG32_SMC(ixTARGET_AND_CURRENT_PROFILE_INDEX) & + TARGET_AND_CURRENT_PROFILE_INDEX__CURR_SCLK_INDEX_MASK) >> + TARGET_AND_CURRENT_PROFILE_INDEX__CURR_SCLK_INDEX__SHIFT; + if (tmp == levels) + break; + udelay(1); + } + } + if ((!pi->mclk_dpm_key_disabled) && + pi->dpm_level_enable_mask.mclk_dpm_enable_mask) { + levels = ci_get_lowest_enabled_level(adev, + pi->dpm_level_enable_mask.mclk_dpm_enable_mask); + ret = ci_dpm_force_state_mclk(adev, levels); + if (ret) + return ret; + for (i = 0; i < adev->usec_timeout; i++) { + tmp = (RREG32_SMC(ixTARGET_AND_CURRENT_PROFILE_INDEX) & + TARGET_AND_CURRENT_PROFILE_INDEX__CURR_MCLK_INDEX_MASK) >> + TARGET_AND_CURRENT_PROFILE_INDEX__CURR_MCLK_INDEX__SHIFT; + if (tmp == levels) + break; + udelay(1); + } + } + if ((!pi->pcie_dpm_key_disabled) && + pi->dpm_level_enable_mask.pcie_dpm_enable_mask) { + levels = ci_get_lowest_enabled_level(adev, + pi->dpm_level_enable_mask.pcie_dpm_enable_mask); + ret = ci_dpm_force_state_pcie(adev, levels); + if (ret) + return ret; + for (i = 0; i < adev->usec_timeout; i++) { + tmp = (RREG32_SMC(ixTARGET_AND_CURRENT_PROFILE_INDEX_1) & + TARGET_AND_CURRENT_PROFILE_INDEX_1__CURR_PCIE_INDEX_MASK) >> + TARGET_AND_CURRENT_PROFILE_INDEX_1__CURR_PCIE_INDEX__SHIFT; + if (tmp == levels) + break; + udelay(1); + } + } + } else if (level == AMDGPU_DPM_FORCED_LEVEL_AUTO) { + if (!pi->pcie_dpm_key_disabled) { + PPSMC_Result smc_result; + + smc_result = amdgpu_ci_send_msg_to_smc(adev, + PPSMC_MSG_PCIeDPM_UnForceLevel); + if (smc_result != PPSMC_Result_OK) + return -EINVAL; + } + ret = ci_upload_dpm_level_enable_mask(adev); + if (ret) + return ret; + } + + adev->pm.dpm.forced_level = level; + + return 0; +} + +static int ci_set_mc_special_registers(struct amdgpu_device *adev, + struct ci_mc_reg_table *table) +{ + u8 i, j, k; + u32 temp_reg; + + for (i = 0, j = table->last; i < table->last; i++) { + if (j >= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE) + return -EINVAL; + switch(table->mc_reg_address[i].s1) { + case mmMC_SEQ_MISC1: + temp_reg = RREG32(mmMC_PMG_CMD_EMRS); + table->mc_reg_address[j].s1 = mmMC_PMG_CMD_EMRS; + table->mc_reg_address[j].s0 = mmMC_SEQ_PMG_CMD_EMRS_LP; + for (k = 0; k < table->num_entries; k++) { + table->mc_reg_table_entry[k].mc_data[j] = + ((temp_reg & 0xffff0000)) | ((table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16); + } + j++; + if (j >= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE) + return -EINVAL; + + temp_reg = RREG32(mmMC_PMG_CMD_MRS); + table->mc_reg_address[j].s1 = mmMC_PMG_CMD_MRS; + table->mc_reg_address[j].s0 = mmMC_SEQ_PMG_CMD_MRS_LP; + for (k = 0; k < table->num_entries; k++) { + table->mc_reg_table_entry[k].mc_data[j] = + (temp_reg & 0xffff0000) | (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff); + if (!adev->mc.is_gddr5) + table->mc_reg_table_entry[k].mc_data[j] |= 0x100; + } + j++; + if (j > SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE) + return -EINVAL; + + if (!adev->mc.is_gddr5) { + table->mc_reg_address[j].s1 = mmMC_PMG_AUTO_CMD; + table->mc_reg_address[j].s0 = mmMC_PMG_AUTO_CMD; + for (k = 0; k < table->num_entries; k++) { + table->mc_reg_table_entry[k].mc_data[j] = + (table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16; + } + j++; + if (j > SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE) + return -EINVAL; + } + break; + case mmMC_SEQ_RESERVE_M: + temp_reg = RREG32(mmMC_PMG_CMD_MRS1); + table->mc_reg_address[j].s1 = mmMC_PMG_CMD_MRS1; + table->mc_reg_address[j].s0 = mmMC_SEQ_PMG_CMD_MRS1_LP; + for (k = 0; k < table->num_entries; k++) { + table->mc_reg_table_entry[k].mc_data[j] = + (temp_reg & 0xffff0000) | (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff); + } + j++; + if (j > SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE) + return -EINVAL; + break; + default: + break; + } + + } + + table->last = j; + + return 0; +} + +static bool ci_check_s0_mc_reg_index(u16 in_reg, u16 *out_reg) +{ + bool result = true; + + switch(in_reg) { + case mmMC_SEQ_RAS_TIMING: + *out_reg = mmMC_SEQ_RAS_TIMING_LP; + break; + case mmMC_SEQ_DLL_STBY: + *out_reg = mmMC_SEQ_DLL_STBY_LP; + break; + case mmMC_SEQ_G5PDX_CMD0: + *out_reg = mmMC_SEQ_G5PDX_CMD0_LP; + break; + case mmMC_SEQ_G5PDX_CMD1: + *out_reg = mmMC_SEQ_G5PDX_CMD1_LP; + break; + case mmMC_SEQ_G5PDX_CTRL: + *out_reg = mmMC_SEQ_G5PDX_CTRL_LP; + break; + case mmMC_SEQ_CAS_TIMING: + *out_reg = mmMC_SEQ_CAS_TIMING_LP; + break; + case mmMC_SEQ_MISC_TIMING: + *out_reg = mmMC_SEQ_MISC_TIMING_LP; + break; + case mmMC_SEQ_MISC_TIMING2: + *out_reg = mmMC_SEQ_MISC_TIMING2_LP; + break; + case mmMC_SEQ_PMG_DVS_CMD: + *out_reg = mmMC_SEQ_PMG_DVS_CMD_LP; + break; + case mmMC_SEQ_PMG_DVS_CTL: + *out_reg = mmMC_SEQ_PMG_DVS_CTL_LP; + break; + case mmMC_SEQ_RD_CTL_D0: + *out_reg = mmMC_SEQ_RD_CTL_D0_LP; + break; + case mmMC_SEQ_RD_CTL_D1: + *out_reg = mmMC_SEQ_RD_CTL_D1_LP; + break; + case mmMC_SEQ_WR_CTL_D0: + *out_reg = mmMC_SEQ_WR_CTL_D0_LP; + break; + case mmMC_SEQ_WR_CTL_D1: + *out_reg = mmMC_SEQ_WR_CTL_D1_LP; + break; + case mmMC_PMG_CMD_EMRS: + *out_reg = mmMC_SEQ_PMG_CMD_EMRS_LP; + break; + case mmMC_PMG_CMD_MRS: + *out_reg = mmMC_SEQ_PMG_CMD_MRS_LP; + break; + case mmMC_PMG_CMD_MRS1: + *out_reg = mmMC_SEQ_PMG_CMD_MRS1_LP; + break; + case mmMC_SEQ_PMG_TIMING: + *out_reg = mmMC_SEQ_PMG_TIMING_LP; + break; + case mmMC_PMG_CMD_MRS2: + *out_reg = mmMC_SEQ_PMG_CMD_MRS2_LP; + break; + case mmMC_SEQ_WR_CTL_2: + *out_reg = mmMC_SEQ_WR_CTL_2_LP; + break; + default: + result = false; + break; + } + + return result; +} + +static void ci_set_valid_flag(struct ci_mc_reg_table *table) +{ + u8 i, j; + + for (i = 0; i < table->last; i++) { + for (j = 1; j < table->num_entries; j++) { + if (table->mc_reg_table_entry[j-1].mc_data[i] != + table->mc_reg_table_entry[j].mc_data[i]) { + table->valid_flag |= 1 << i; + break; + } + } + } +} + +static void ci_set_s0_mc_reg_index(struct ci_mc_reg_table *table) +{ + u32 i; + u16 address; + + for (i = 0; i < table->last; i++) { + table->mc_reg_address[i].s0 = + ci_check_s0_mc_reg_index(table->mc_reg_address[i].s1, &address) ? + address : table->mc_reg_address[i].s1; + } +} + +static int ci_copy_vbios_mc_reg_table(const struct atom_mc_reg_table *table, + struct ci_mc_reg_table *ci_table) +{ + u8 i, j; + + if (table->last > SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE) + return -EINVAL; + if (table->num_entries > MAX_AC_TIMING_ENTRIES) + return -EINVAL; + + for (i = 0; i < table->last; i++) + ci_table->mc_reg_address[i].s1 = table->mc_reg_address[i].s1; + + ci_table->last = table->last; + + for (i = 0; i < table->num_entries; i++) { + ci_table->mc_reg_table_entry[i].mclk_max = + table->mc_reg_table_entry[i].mclk_max; + for (j = 0; j < table->last; j++) + ci_table->mc_reg_table_entry[i].mc_data[j] = + table->mc_reg_table_entry[i].mc_data[j]; + } + ci_table->num_entries = table->num_entries; + + return 0; +} + +static int ci_register_patching_mc_seq(struct amdgpu_device *adev, + struct ci_mc_reg_table *table) +{ + u8 i, k; + u32 tmp; + bool patch; + + tmp = RREG32(mmMC_SEQ_MISC0); + patch = ((tmp & 0x0000f00) == 0x300) ? true : false; + + if (patch && + ((adev->pdev->device == 0x67B0) || + (adev->pdev->device == 0x67B1))) { + for (i = 0; i < table->last; i++) { + if (table->last >= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE) + return -EINVAL; + switch (table->mc_reg_address[i].s1) { + case mmMC_SEQ_MISC1: + for (k = 0; k < table->num_entries; k++) { + if ((table->mc_reg_table_entry[k].mclk_max == 125000) || + (table->mc_reg_table_entry[k].mclk_max == 137500)) + table->mc_reg_table_entry[k].mc_data[i] = + (table->mc_reg_table_entry[k].mc_data[i] & 0xFFFFFFF8) | + 0x00000007; + } + break; + case mmMC_SEQ_WR_CTL_D0: + for (k = 0; k < table->num_entries; k++) { + if ((table->mc_reg_table_entry[k].mclk_max == 125000) || + (table->mc_reg_table_entry[k].mclk_max == 137500)) + table->mc_reg_table_entry[k].mc_data[i] = + (table->mc_reg_table_entry[k].mc_data[i] & 0xFFFF0F00) | + 0x0000D0DD; + } + break; + case mmMC_SEQ_WR_CTL_D1: + for (k = 0; k < table->num_entries; k++) { + if ((table->mc_reg_table_entry[k].mclk_max == 125000) || + (table->mc_reg_table_entry[k].mclk_max == 137500)) + table->mc_reg_table_entry[k].mc_data[i] = + (table->mc_reg_table_entry[k].mc_data[i] & 0xFFFF0F00) | + 0x0000D0DD; + } + break; + case mmMC_SEQ_WR_CTL_2: + for (k = 0; k < table->num_entries; k++) { + if ((table->mc_reg_table_entry[k].mclk_max == 125000) || + (table->mc_reg_table_entry[k].mclk_max == 137500)) + table->mc_reg_table_entry[k].mc_data[i] = 0; + } + break; + case mmMC_SEQ_CAS_TIMING: + for (k = 0; k < table->num_entries; k++) { + if (table->mc_reg_table_entry[k].mclk_max == 125000) + table->mc_reg_table_entry[k].mc_data[i] = + (table->mc_reg_table_entry[k].mc_data[i] & 0xFFE0FE0F) | + 0x000C0140; + else if (table->mc_reg_table_entry[k].mclk_max == 137500) + table->mc_reg_table_entry[k].mc_data[i] = + (table->mc_reg_table_entry[k].mc_data[i] & 0xFFE0FE0F) | + 0x000C0150; + } + break; + case mmMC_SEQ_MISC_TIMING: + for (k = 0; k < table->num_entries; k++) { + if (table->mc_reg_table_entry[k].mclk_max == 125000) + table->mc_reg_table_entry[k].mc_data[i] = + (table->mc_reg_table_entry[k].mc_data[i] & 0xFFFFFFE0) | + 0x00000030; + else if (table->mc_reg_table_entry[k].mclk_max == 137500) + table->mc_reg_table_entry[k].mc_data[i] = + (table->mc_reg_table_entry[k].mc_data[i] & 0xFFFFFFE0) | + 0x00000035; + } + break; + default: + break; + } + } + + WREG32(mmMC_SEQ_IO_DEBUG_INDEX, 3); + tmp = RREG32(mmMC_SEQ_IO_DEBUG_DATA); + tmp = (tmp & 0xFFF8FFFF) | (1 << 16); + WREG32(mmMC_SEQ_IO_DEBUG_INDEX, 3); + WREG32(mmMC_SEQ_IO_DEBUG_DATA, tmp); + } + + return 0; +} + +static int ci_initialize_mc_reg_table(struct amdgpu_device *adev) +{ + struct ci_power_info *pi = ci_get_pi(adev); + struct atom_mc_reg_table *table; + struct ci_mc_reg_table *ci_table = &pi->mc_reg_table; + u8 module_index = ci_get_memory_module_index(adev); + int ret; + + table = kzalloc(sizeof(struct atom_mc_reg_table), GFP_KERNEL); + if (!table) + return -ENOMEM; + + WREG32(mmMC_SEQ_RAS_TIMING_LP, RREG32(mmMC_SEQ_RAS_TIMING)); + WREG32(mmMC_SEQ_CAS_TIMING_LP, RREG32(mmMC_SEQ_CAS_TIMING)); + WREG32(mmMC_SEQ_DLL_STBY_LP, RREG32(mmMC_SEQ_DLL_STBY)); + WREG32(mmMC_SEQ_G5PDX_CMD0_LP, RREG32(mmMC_SEQ_G5PDX_CMD0)); + WREG32(mmMC_SEQ_G5PDX_CMD1_LP, RREG32(mmMC_SEQ_G5PDX_CMD1)); + WREG32(mmMC_SEQ_G5PDX_CTRL_LP, RREG32(mmMC_SEQ_G5PDX_CTRL)); + WREG32(mmMC_SEQ_PMG_DVS_CMD_LP, RREG32(mmMC_SEQ_PMG_DVS_CMD)); + WREG32(mmMC_SEQ_PMG_DVS_CTL_LP, RREG32(mmMC_SEQ_PMG_DVS_CTL)); + WREG32(mmMC_SEQ_MISC_TIMING_LP, RREG32(mmMC_SEQ_MISC_TIMING)); + WREG32(mmMC_SEQ_MISC_TIMING2_LP, RREG32(mmMC_SEQ_MISC_TIMING2)); + WREG32(mmMC_SEQ_PMG_CMD_EMRS_LP, RREG32(mmMC_PMG_CMD_EMRS)); + WREG32(mmMC_SEQ_PMG_CMD_MRS_LP, RREG32(mmMC_PMG_CMD_MRS)); + WREG32(mmMC_SEQ_PMG_CMD_MRS1_LP, RREG32(mmMC_PMG_CMD_MRS1)); + WREG32(mmMC_SEQ_WR_CTL_D0_LP, RREG32(mmMC_SEQ_WR_CTL_D0)); + WREG32(mmMC_SEQ_WR_CTL_D1_LP, RREG32(mmMC_SEQ_WR_CTL_D1)); + WREG32(mmMC_SEQ_RD_CTL_D0_LP, RREG32(mmMC_SEQ_RD_CTL_D0)); + WREG32(mmMC_SEQ_RD_CTL_D1_LP, RREG32(mmMC_SEQ_RD_CTL_D1)); + WREG32(mmMC_SEQ_PMG_TIMING_LP, RREG32(mmMC_SEQ_PMG_TIMING)); + WREG32(mmMC_SEQ_PMG_CMD_MRS2_LP, RREG32(mmMC_PMG_CMD_MRS2)); + WREG32(mmMC_SEQ_WR_CTL_2_LP, RREG32(mmMC_SEQ_WR_CTL_2)); + + ret = amdgpu_atombios_init_mc_reg_table(adev, module_index, table); + if (ret) + goto init_mc_done; + + ret = ci_copy_vbios_mc_reg_table(table, ci_table); + if (ret) + goto init_mc_done; + + ci_set_s0_mc_reg_index(ci_table); + + ret = ci_register_patching_mc_seq(adev, ci_table); + if (ret) + goto init_mc_done; + + ret = ci_set_mc_special_registers(adev, ci_table); + if (ret) + goto init_mc_done; + + ci_set_valid_flag(ci_table); + +init_mc_done: + kfree(table); + + return ret; +} + +static int ci_populate_mc_reg_addresses(struct amdgpu_device *adev, + SMU7_Discrete_MCRegisters *mc_reg_table) +{ + struct ci_power_info *pi = ci_get_pi(adev); + u32 i, j; + + for (i = 0, j = 0; j < pi->mc_reg_table.last; j++) { + if (pi->mc_reg_table.valid_flag & (1 << j)) { + if (i >= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE) + return -EINVAL; + mc_reg_table->address[i].s0 = cpu_to_be16(pi->mc_reg_table.mc_reg_address[j].s0); + mc_reg_table->address[i].s1 = cpu_to_be16(pi->mc_reg_table.mc_reg_address[j].s1); + i++; + } + } + + mc_reg_table->last = (u8)i; + + return 0; +} + +static void ci_convert_mc_registers(const struct ci_mc_reg_entry *entry, + SMU7_Discrete_MCRegisterSet *data, + u32 num_entries, u32 valid_flag) +{ + u32 i, j; + + for (i = 0, j = 0; j < num_entries; j++) { + if (valid_flag & (1 << j)) { + data->value[i] = cpu_to_be32(entry->mc_data[j]); + i++; + } + } +} + +static void ci_convert_mc_reg_table_entry_to_smc(struct amdgpu_device *adev, + const u32 memory_clock, + SMU7_Discrete_MCRegisterSet *mc_reg_table_data) +{ + struct ci_power_info *pi = ci_get_pi(adev); + u32 i = 0; + + for(i = 0; i < pi->mc_reg_table.num_entries; i++) { + if (memory_clock <= pi->mc_reg_table.mc_reg_table_entry[i].mclk_max) + break; + } + + if ((i == pi->mc_reg_table.num_entries) && (i > 0)) + --i; + + ci_convert_mc_registers(&pi->mc_reg_table.mc_reg_table_entry[i], + mc_reg_table_data, pi->mc_reg_table.last, + pi->mc_reg_table.valid_flag); +} + +static void ci_convert_mc_reg_table_to_smc(struct amdgpu_device *adev, + SMU7_Discrete_MCRegisters *mc_reg_table) +{ + struct ci_power_info *pi = ci_get_pi(adev); + u32 i; + + for (i = 0; i < pi->dpm_table.mclk_table.count; i++) + ci_convert_mc_reg_table_entry_to_smc(adev, + pi->dpm_table.mclk_table.dpm_levels[i].value, + &mc_reg_table->data[i]); +} + +static int ci_populate_initial_mc_reg_table(struct amdgpu_device *adev) +{ + struct ci_power_info *pi = ci_get_pi(adev); + int ret; + + memset(&pi->smc_mc_reg_table, 0, sizeof(SMU7_Discrete_MCRegisters)); + + ret = ci_populate_mc_reg_addresses(adev, &pi->smc_mc_reg_table); + if (ret) + return ret; + ci_convert_mc_reg_table_to_smc(adev, &pi->smc_mc_reg_table); + + return amdgpu_ci_copy_bytes_to_smc(adev, + pi->mc_reg_table_start, + (u8 *)&pi->smc_mc_reg_table, + sizeof(SMU7_Discrete_MCRegisters), + pi->sram_end); +} + +static int ci_update_and_upload_mc_reg_table(struct amdgpu_device *adev) +{ + struct ci_power_info *pi = ci_get_pi(adev); + + if (!(pi->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK)) + return 0; + + memset(&pi->smc_mc_reg_table, 0, sizeof(SMU7_Discrete_MCRegisters)); + + ci_convert_mc_reg_table_to_smc(adev, &pi->smc_mc_reg_table); + + return amdgpu_ci_copy_bytes_to_smc(adev, + pi->mc_reg_table_start + + offsetof(SMU7_Discrete_MCRegisters, data[0]), + (u8 *)&pi->smc_mc_reg_table.data[0], + sizeof(SMU7_Discrete_MCRegisterSet) * + pi->dpm_table.mclk_table.count, + pi->sram_end); +} + +static void ci_enable_voltage_control(struct amdgpu_device *adev) +{ + u32 tmp = RREG32_SMC(ixGENERAL_PWRMGT); + + tmp |= GENERAL_PWRMGT__VOLT_PWRMGT_EN_MASK; + WREG32_SMC(ixGENERAL_PWRMGT, tmp); +} + +static enum amdgpu_pcie_gen ci_get_maximum_link_speed(struct amdgpu_device *adev, + struct amdgpu_ps *amdgpu_state) +{ + struct ci_ps *state = ci_get_ps(amdgpu_state); + int i; + u16 pcie_speed, max_speed = 0; + + for (i = 0; i < state->performance_level_count; i++) { + pcie_speed = state->performance_levels[i].pcie_gen; + if (max_speed < pcie_speed) + max_speed = pcie_speed; + } + + return max_speed; +} + +static u16 ci_get_current_pcie_speed(struct amdgpu_device *adev) +{ + u32 speed_cntl = 0; + + speed_cntl = RREG32_PCIE(ixPCIE_LC_SPEED_CNTL) & + PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE_MASK; + speed_cntl >>= PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE__SHIFT; + + return (u16)speed_cntl; +} + +static int ci_get_current_pcie_lane_number(struct amdgpu_device *adev) +{ + u32 link_width = 0; + + link_width = RREG32_PCIE(ixPCIE_LC_LINK_WIDTH_CNTL) & + PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD_MASK; + link_width >>= PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD__SHIFT; + + switch (link_width) { + case 1: + return 1; + case 2: + return 2; + case 3: + return 4; + case 4: + return 8; + case 0: + case 6: + default: + return 16; + } +} + +static void ci_request_link_speed_change_before_state_change(struct amdgpu_device *adev, + struct amdgpu_ps *amdgpu_new_state, + struct amdgpu_ps *amdgpu_current_state) +{ + struct ci_power_info *pi = ci_get_pi(adev); + enum amdgpu_pcie_gen target_link_speed = + ci_get_maximum_link_speed(adev, amdgpu_new_state); + enum amdgpu_pcie_gen current_link_speed; + + if (pi->force_pcie_gen == AMDGPU_PCIE_GEN_INVALID) + current_link_speed = ci_get_maximum_link_speed(adev, amdgpu_current_state); + else + current_link_speed = pi->force_pcie_gen; + + pi->force_pcie_gen = AMDGPU_PCIE_GEN_INVALID; + pi->pspp_notify_required = false; + if (target_link_speed > current_link_speed) { + switch (target_link_speed) { +#ifdef CONFIG_ACPI + case AMDGPU_PCIE_GEN3: + if (amdgpu_acpi_pcie_performance_request(adev, PCIE_PERF_REQ_PECI_GEN3, false) == 0) + break; + pi->force_pcie_gen = AMDGPU_PCIE_GEN2; + if (current_link_speed == AMDGPU_PCIE_GEN2) + break; + case AMDGPU_PCIE_GEN2: + if (amdgpu_acpi_pcie_performance_request(adev, PCIE_PERF_REQ_PECI_GEN2, false) == 0) + break; +#endif + default: + pi->force_pcie_gen = ci_get_current_pcie_speed(adev); + break; + } + } else { + if (target_link_speed < current_link_speed) + pi->pspp_notify_required = true; + } +} + +static void ci_notify_link_speed_change_after_state_change(struct amdgpu_device *adev, + struct amdgpu_ps *amdgpu_new_state, + struct amdgpu_ps *amdgpu_current_state) +{ + struct ci_power_info *pi = ci_get_pi(adev); + enum amdgpu_pcie_gen target_link_speed = + ci_get_maximum_link_speed(adev, amdgpu_new_state); + u8 request; + + if (pi->pspp_notify_required) { + if (target_link_speed == AMDGPU_PCIE_GEN3) + request = PCIE_PERF_REQ_PECI_GEN3; + else if (target_link_speed == AMDGPU_PCIE_GEN2) + request = PCIE_PERF_REQ_PECI_GEN2; + else + request = PCIE_PERF_REQ_PECI_GEN1; + + if ((request == PCIE_PERF_REQ_PECI_GEN1) && + (ci_get_current_pcie_speed(adev) > 0)) + return; + +#ifdef CONFIG_ACPI + amdgpu_acpi_pcie_performance_request(adev, request, false); +#endif + } +} + +static int ci_set_private_data_variables_based_on_pptable(struct amdgpu_device *adev) +{ + struct ci_power_info *pi = ci_get_pi(adev); + struct amdgpu_clock_voltage_dependency_table *allowed_sclk_vddc_table = + &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk; + struct amdgpu_clock_voltage_dependency_table *allowed_mclk_vddc_table = + &adev->pm.dpm.dyn_state.vddc_dependency_on_mclk; + struct amdgpu_clock_voltage_dependency_table *allowed_mclk_vddci_table = + &adev->pm.dpm.dyn_state.vddci_dependency_on_mclk; + + if (allowed_sclk_vddc_table == NULL) + return -EINVAL; + if (allowed_sclk_vddc_table->count < 1) + return -EINVAL; + if (allowed_mclk_vddc_table == NULL) + return -EINVAL; + if (allowed_mclk_vddc_table->count < 1) + return -EINVAL; + if (allowed_mclk_vddci_table == NULL) + return -EINVAL; + if (allowed_mclk_vddci_table->count < 1) + return -EINVAL; + + pi->min_vddc_in_pp_table = allowed_sclk_vddc_table->entries[0].v; + pi->max_vddc_in_pp_table = + allowed_sclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].v; + + pi->min_vddci_in_pp_table = allowed_mclk_vddci_table->entries[0].v; + pi->max_vddci_in_pp_table = + allowed_mclk_vddci_table->entries[allowed_mclk_vddci_table->count - 1].v; + + adev->pm.dpm.dyn_state.max_clock_voltage_on_ac.sclk = + allowed_sclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].clk; + adev->pm.dpm.dyn_state.max_clock_voltage_on_ac.mclk = + allowed_mclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].clk; + adev->pm.dpm.dyn_state.max_clock_voltage_on_ac.vddc = + allowed_sclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].v; + adev->pm.dpm.dyn_state.max_clock_voltage_on_ac.vddci = + allowed_mclk_vddci_table->entries[allowed_mclk_vddci_table->count - 1].v; + + return 0; +} + +static void ci_patch_with_vddc_leakage(struct amdgpu_device *adev, u16 *vddc) +{ + struct ci_power_info *pi = ci_get_pi(adev); + struct ci_leakage_voltage *leakage_table = &pi->vddc_leakage; + u32 leakage_index; + + for (leakage_index = 0; leakage_index < leakage_table->count; leakage_index++) { + if (leakage_table->leakage_id[leakage_index] == *vddc) { + *vddc = leakage_table->actual_voltage[leakage_index]; + break; + } + } +} + +static void ci_patch_with_vddci_leakage(struct amdgpu_device *adev, u16 *vddci) +{ + struct ci_power_info *pi = ci_get_pi(adev); + struct ci_leakage_voltage *leakage_table = &pi->vddci_leakage; + u32 leakage_index; + + for (leakage_index = 0; leakage_index < leakage_table->count; leakage_index++) { + if (leakage_table->leakage_id[leakage_index] == *vddci) { + *vddci = leakage_table->actual_voltage[leakage_index]; + break; + } + } +} + +static void ci_patch_clock_voltage_dependency_table_with_vddc_leakage(struct amdgpu_device *adev, + struct amdgpu_clock_voltage_dependency_table *table) +{ + u32 i; + + if (table) { + for (i = 0; i < table->count; i++) + ci_patch_with_vddc_leakage(adev, &table->entries[i].v); + } +} + +static void ci_patch_clock_voltage_dependency_table_with_vddci_leakage(struct amdgpu_device *adev, + struct amdgpu_clock_voltage_dependency_table *table) +{ + u32 i; + + if (table) { + for (i = 0; i < table->count; i++) + ci_patch_with_vddci_leakage(adev, &table->entries[i].v); + } +} + +static void ci_patch_vce_clock_voltage_dependency_table_with_vddc_leakage(struct amdgpu_device *adev, + struct amdgpu_vce_clock_voltage_dependency_table *table) +{ + u32 i; + + if (table) { + for (i = 0; i < table->count; i++) + ci_patch_with_vddc_leakage(adev, &table->entries[i].v); + } +} + +static void ci_patch_uvd_clock_voltage_dependency_table_with_vddc_leakage(struct amdgpu_device *adev, + struct amdgpu_uvd_clock_voltage_dependency_table *table) +{ + u32 i; + + if (table) { + for (i = 0; i < table->count; i++) + ci_patch_with_vddc_leakage(adev, &table->entries[i].v); + } +} + +static void ci_patch_vddc_phase_shed_limit_table_with_vddc_leakage(struct amdgpu_device *adev, + struct amdgpu_phase_shedding_limits_table *table) +{ + u32 i; + + if (table) { + for (i = 0; i < table->count; i++) + ci_patch_with_vddc_leakage(adev, &table->entries[i].voltage); + } +} + +static void ci_patch_clock_voltage_limits_with_vddc_leakage(struct amdgpu_device *adev, + struct amdgpu_clock_and_voltage_limits *table) +{ + if (table) { + ci_patch_with_vddc_leakage(adev, (u16 *)&table->vddc); + ci_patch_with_vddci_leakage(adev, (u16 *)&table->vddci); + } +} + +static void ci_patch_cac_leakage_table_with_vddc_leakage(struct amdgpu_device *adev, + struct amdgpu_cac_leakage_table *table) +{ + u32 i; + + if (table) { + for (i = 0; i < table->count; i++) + ci_patch_with_vddc_leakage(adev, &table->entries[i].vddc); + } +} + +static void ci_patch_dependency_tables_with_leakage(struct amdgpu_device *adev) +{ + + ci_patch_clock_voltage_dependency_table_with_vddc_leakage(adev, + &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk); + ci_patch_clock_voltage_dependency_table_with_vddc_leakage(adev, + &adev->pm.dpm.dyn_state.vddc_dependency_on_mclk); + ci_patch_clock_voltage_dependency_table_with_vddc_leakage(adev, + &adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk); + ci_patch_clock_voltage_dependency_table_with_vddci_leakage(adev, + &adev->pm.dpm.dyn_state.vddci_dependency_on_mclk); + ci_patch_vce_clock_voltage_dependency_table_with_vddc_leakage(adev, + &adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table); + ci_patch_uvd_clock_voltage_dependency_table_with_vddc_leakage(adev, + &adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table); + ci_patch_clock_voltage_dependency_table_with_vddc_leakage(adev, + &adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table); + ci_patch_clock_voltage_dependency_table_with_vddc_leakage(adev, + &adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table); + ci_patch_vddc_phase_shed_limit_table_with_vddc_leakage(adev, + &adev->pm.dpm.dyn_state.phase_shedding_limits_table); + ci_patch_clock_voltage_limits_with_vddc_leakage(adev, + &adev->pm.dpm.dyn_state.max_clock_voltage_on_ac); + ci_patch_clock_voltage_limits_with_vddc_leakage(adev, + &adev->pm.dpm.dyn_state.max_clock_voltage_on_dc); + ci_patch_cac_leakage_table_with_vddc_leakage(adev, + &adev->pm.dpm.dyn_state.cac_leakage_table); + +} + +static void ci_update_current_ps(struct amdgpu_device *adev, + struct amdgpu_ps *rps) +{ + struct ci_ps *new_ps = ci_get_ps(rps); + struct ci_power_info *pi = ci_get_pi(adev); + + pi->current_rps = *rps; + pi->current_ps = *new_ps; + pi->current_rps.ps_priv = &pi->current_ps; +} + +static void ci_update_requested_ps(struct amdgpu_device *adev, + struct amdgpu_ps *rps) +{ + struct ci_ps *new_ps = ci_get_ps(rps); + struct ci_power_info *pi = ci_get_pi(adev); + + pi->requested_rps = *rps; + pi->requested_ps = *new_ps; + pi->requested_rps.ps_priv = &pi->requested_ps; +} + +static int ci_dpm_pre_set_power_state(struct amdgpu_device *adev) +{ + struct ci_power_info *pi = ci_get_pi(adev); + struct amdgpu_ps requested_ps = *adev->pm.dpm.requested_ps; + struct amdgpu_ps *new_ps = &requested_ps; + + ci_update_requested_ps(adev, new_ps); + + ci_apply_state_adjust_rules(adev, &pi->requested_rps); + + return 0; +} + +static void ci_dpm_post_set_power_state(struct amdgpu_device *adev) +{ + struct ci_power_info *pi = ci_get_pi(adev); + struct amdgpu_ps *new_ps = &pi->requested_rps; + + ci_update_current_ps(adev, new_ps); +} + + +static void ci_dpm_setup_asic(struct amdgpu_device *adev) +{ + ci_read_clock_registers(adev); + ci_enable_acpi_power_management(adev); + ci_init_sclk_t(adev); +} + +static int ci_dpm_enable(struct amdgpu_device *adev) +{ + struct ci_power_info *pi = ci_get_pi(adev); + struct amdgpu_ps *boot_ps = adev->pm.dpm.boot_ps; + int ret; + + if (amdgpu_ci_is_smc_running(adev)) + return -EINVAL; + if (pi->voltage_control != CISLANDS_VOLTAGE_CONTROL_NONE) { + ci_enable_voltage_control(adev); + ret = ci_construct_voltage_tables(adev); + if (ret) { + DRM_ERROR("ci_construct_voltage_tables failed\n"); + return ret; + } + } + if (pi->caps_dynamic_ac_timing) { + ret = ci_initialize_mc_reg_table(adev); + if (ret) + pi->caps_dynamic_ac_timing = false; + } + if (pi->dynamic_ss) + ci_enable_spread_spectrum(adev, true); + if (pi->thermal_protection) + ci_enable_thermal_protection(adev, true); + ci_program_sstp(adev); + ci_enable_display_gap(adev); + ci_program_vc(adev); + ret = ci_upload_firmware(adev); + if (ret) { + DRM_ERROR("ci_upload_firmware failed\n"); + return ret; + } + ret = ci_process_firmware_header(adev); + if (ret) { + DRM_ERROR("ci_process_firmware_header failed\n"); + return ret; + } + ret = ci_initial_switch_from_arb_f0_to_f1(adev); + if (ret) { + DRM_ERROR("ci_initial_switch_from_arb_f0_to_f1 failed\n"); + return ret; + } + ret = ci_init_smc_table(adev); + if (ret) { + DRM_ERROR("ci_init_smc_table failed\n"); + return ret; + } + ret = ci_init_arb_table_index(adev); + if (ret) { + DRM_ERROR("ci_init_arb_table_index failed\n"); + return ret; + } + if (pi->caps_dynamic_ac_timing) { + ret = ci_populate_initial_mc_reg_table(adev); + if (ret) { + DRM_ERROR("ci_populate_initial_mc_reg_table failed\n"); + return ret; + } + } + ret = ci_populate_pm_base(adev); + if (ret) { + DRM_ERROR("ci_populate_pm_base failed\n"); + return ret; + } + ci_dpm_start_smc(adev); + ci_enable_vr_hot_gpio_interrupt(adev); + ret = ci_notify_smc_display_change(adev, false); + if (ret) { + DRM_ERROR("ci_notify_smc_display_change failed\n"); + return ret; + } + ci_enable_sclk_control(adev, true); + ret = ci_enable_ulv(adev, true); + if (ret) { + DRM_ERROR("ci_enable_ulv failed\n"); + return ret; + } + ret = ci_enable_ds_master_switch(adev, true); + if (ret) { + DRM_ERROR("ci_enable_ds_master_switch failed\n"); + return ret; + } + ret = ci_start_dpm(adev); + if (ret) { + DRM_ERROR("ci_start_dpm failed\n"); + return ret; + } + ret = ci_enable_didt(adev, true); + if (ret) { + DRM_ERROR("ci_enable_didt failed\n"); + return ret; + } + ret = ci_enable_smc_cac(adev, true); + if (ret) { + DRM_ERROR("ci_enable_smc_cac failed\n"); + return ret; + } + ret = ci_enable_power_containment(adev, true); + if (ret) { + DRM_ERROR("ci_enable_power_containment failed\n"); + return ret; + } + + ret = ci_power_control_set_level(adev); + if (ret) { + DRM_ERROR("ci_power_control_set_level failed\n"); + return ret; + } + + ci_enable_auto_throttle_source(adev, AMDGPU_DPM_AUTO_THROTTLE_SRC_THERMAL, true); + + ret = ci_enable_thermal_based_sclk_dpm(adev, true); + if (ret) { + DRM_ERROR("ci_enable_thermal_based_sclk_dpm failed\n"); + return ret; + } + + ci_thermal_start_thermal_controller(adev); + + ci_update_current_ps(adev, boot_ps); + + if (adev->irq.installed && + amdgpu_is_internal_thermal_sensor(adev->pm.int_thermal_type)) { +#if 0 + PPSMC_Result result; +#endif + ret = ci_thermal_set_temperature_range(adev, CISLANDS_TEMP_RANGE_MIN, + CISLANDS_TEMP_RANGE_MAX); + if (ret) { + DRM_ERROR("ci_thermal_set_temperature_range failed\n"); + return ret; + } + amdgpu_irq_get(adev, &adev->pm.dpm.thermal.irq, + AMDGPU_THERMAL_IRQ_LOW_TO_HIGH); + amdgpu_irq_get(adev, &adev->pm.dpm.thermal.irq, + AMDGPU_THERMAL_IRQ_HIGH_TO_LOW); + +#if 0 + result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_EnableThermalInterrupt); + + if (result != PPSMC_Result_OK) + DRM_DEBUG_KMS("Could not enable thermal interrupts.\n"); +#endif + } + + return 0; +} + +static void ci_dpm_disable(struct amdgpu_device *adev) +{ + struct ci_power_info *pi = ci_get_pi(adev); + struct amdgpu_ps *boot_ps = adev->pm.dpm.boot_ps; + + amdgpu_irq_put(adev, &adev->pm.dpm.thermal.irq, + AMDGPU_THERMAL_IRQ_LOW_TO_HIGH); + amdgpu_irq_put(adev, &adev->pm.dpm.thermal.irq, + AMDGPU_THERMAL_IRQ_HIGH_TO_LOW); + + ci_dpm_powergate_uvd(adev, false); + + if (!amdgpu_ci_is_smc_running(adev)) + return; + + ci_thermal_stop_thermal_controller(adev); + + if (pi->thermal_protection) + ci_enable_thermal_protection(adev, false); + ci_enable_power_containment(adev, false); + ci_enable_smc_cac(adev, false); + ci_enable_didt(adev, false); + ci_enable_spread_spectrum(adev, false); + ci_enable_auto_throttle_source(adev, AMDGPU_DPM_AUTO_THROTTLE_SRC_THERMAL, false); + ci_stop_dpm(adev); + ci_enable_ds_master_switch(adev, false); + ci_enable_ulv(adev, false); + ci_clear_vc(adev); + ci_reset_to_default(adev); + ci_dpm_stop_smc(adev); + ci_force_switch_to_arb_f0(adev); + ci_enable_thermal_based_sclk_dpm(adev, false); + + ci_update_current_ps(adev, boot_ps); +} + +static int ci_dpm_set_power_state(struct amdgpu_device *adev) +{ + struct ci_power_info *pi = ci_get_pi(adev); + struct amdgpu_ps *new_ps = &pi->requested_rps; + struct amdgpu_ps *old_ps = &pi->current_rps; + int ret; + + ci_find_dpm_states_clocks_in_dpm_table(adev, new_ps); + if (pi->pcie_performance_request) + ci_request_link_speed_change_before_state_change(adev, new_ps, old_ps); + ret = ci_freeze_sclk_mclk_dpm(adev); + if (ret) { + DRM_ERROR("ci_freeze_sclk_mclk_dpm failed\n"); + return ret; + } + ret = ci_populate_and_upload_sclk_mclk_dpm_levels(adev, new_ps); + if (ret) { + DRM_ERROR("ci_populate_and_upload_sclk_mclk_dpm_levels failed\n"); + return ret; + } + ret = ci_generate_dpm_level_enable_mask(adev, new_ps); + if (ret) { + DRM_ERROR("ci_generate_dpm_level_enable_mask failed\n"); + return ret; + } + + ret = ci_update_vce_dpm(adev, new_ps, old_ps); + if (ret) { + DRM_ERROR("ci_update_vce_dpm failed\n"); + return ret; + } + + ret = ci_update_sclk_t(adev); + if (ret) { + DRM_ERROR("ci_update_sclk_t failed\n"); + return ret; + } + if (pi->caps_dynamic_ac_timing) { + ret = ci_update_and_upload_mc_reg_table(adev); + if (ret) { + DRM_ERROR("ci_update_and_upload_mc_reg_table failed\n"); + return ret; + } + } + ret = ci_program_memory_timing_parameters(adev); + if (ret) { + DRM_ERROR("ci_program_memory_timing_parameters failed\n"); + return ret; + } + ret = ci_unfreeze_sclk_mclk_dpm(adev); + if (ret) { + DRM_ERROR("ci_unfreeze_sclk_mclk_dpm failed\n"); + return ret; + } + ret = ci_upload_dpm_level_enable_mask(adev); + if (ret) { + DRM_ERROR("ci_upload_dpm_level_enable_mask failed\n"); + return ret; + } + if (pi->pcie_performance_request) + ci_notify_link_speed_change_after_state_change(adev, new_ps, old_ps); + + return 0; +} + +#if 0 +static void ci_dpm_reset_asic(struct amdgpu_device *adev) +{ + ci_set_boot_state(adev); +} +#endif + +static void ci_dpm_display_configuration_changed(struct amdgpu_device *adev) +{ + ci_program_display_gap(adev); +} + +union power_info { + struct _ATOM_POWERPLAY_INFO info; + struct _ATOM_POWERPLAY_INFO_V2 info_2; + struct _ATOM_POWERPLAY_INFO_V3 info_3; + struct _ATOM_PPLIB_POWERPLAYTABLE pplib; + struct _ATOM_PPLIB_POWERPLAYTABLE2 pplib2; + struct _ATOM_PPLIB_POWERPLAYTABLE3 pplib3; +}; + +union pplib_clock_info { + struct _ATOM_PPLIB_R600_CLOCK_INFO r600; + struct _ATOM_PPLIB_RS780_CLOCK_INFO rs780; + struct _ATOM_PPLIB_EVERGREEN_CLOCK_INFO evergreen; + struct _ATOM_PPLIB_SUMO_CLOCK_INFO sumo; + struct _ATOM_PPLIB_SI_CLOCK_INFO si; + struct _ATOM_PPLIB_CI_CLOCK_INFO ci; +}; + +union pplib_power_state { + struct _ATOM_PPLIB_STATE v1; + struct _ATOM_PPLIB_STATE_V2 v2; +}; + +static void ci_parse_pplib_non_clock_info(struct amdgpu_device *adev, + struct amdgpu_ps *rps, + struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info, + u8 table_rev) +{ + rps->caps = le32_to_cpu(non_clock_info->ulCapsAndSettings); + rps->class = le16_to_cpu(non_clock_info->usClassification); + rps->class2 = le16_to_cpu(non_clock_info->usClassification2); + + if (ATOM_PPLIB_NONCLOCKINFO_VER1 < table_rev) { + rps->vclk = le32_to_cpu(non_clock_info->ulVCLK); + rps->dclk = le32_to_cpu(non_clock_info->ulDCLK); + } else { + rps->vclk = 0; + rps->dclk = 0; + } + + if (rps->class & ATOM_PPLIB_CLASSIFICATION_BOOT) + adev->pm.dpm.boot_ps = rps; + if (rps->class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE) + adev->pm.dpm.uvd_ps = rps; +} + +static void ci_parse_pplib_clock_info(struct amdgpu_device *adev, + struct amdgpu_ps *rps, int index, + union pplib_clock_info *clock_info) +{ + struct ci_power_info *pi = ci_get_pi(adev); + struct ci_ps *ps = ci_get_ps(rps); + struct ci_pl *pl = &ps->performance_levels[index]; + + ps->performance_level_count = index + 1; + + pl->sclk = le16_to_cpu(clock_info->ci.usEngineClockLow); + pl->sclk |= clock_info->ci.ucEngineClockHigh << 16; + pl->mclk = le16_to_cpu(clock_info->ci.usMemoryClockLow); + pl->mclk |= clock_info->ci.ucMemoryClockHigh << 16; + + pl->pcie_gen = amdgpu_get_pcie_gen_support(adev, + pi->sys_pcie_mask, + pi->vbios_boot_state.pcie_gen_bootup_value, + clock_info->ci.ucPCIEGen); + pl->pcie_lane = amdgpu_get_pcie_lane_support(adev, + pi->vbios_boot_state.pcie_lane_bootup_value, + le16_to_cpu(clock_info->ci.usPCIELane)); + + if (rps->class & ATOM_PPLIB_CLASSIFICATION_ACPI) { + pi->acpi_pcie_gen = pl->pcie_gen; + } + + if (rps->class2 & ATOM_PPLIB_CLASSIFICATION2_ULV) { + pi->ulv.supported = true; + pi->ulv.pl = *pl; + pi->ulv.cg_ulv_parameter = CISLANDS_CGULVPARAMETER_DFLT; + } + + /* patch up boot state */ + if (rps->class & ATOM_PPLIB_CLASSIFICATION_BOOT) { + pl->mclk = pi->vbios_boot_state.mclk_bootup_value; + pl->sclk = pi->vbios_boot_state.sclk_bootup_value; + pl->pcie_gen = pi->vbios_boot_state.pcie_gen_bootup_value; + pl->pcie_lane = pi->vbios_boot_state.pcie_lane_bootup_value; + } + + switch (rps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) { + case ATOM_PPLIB_CLASSIFICATION_UI_BATTERY: + pi->use_pcie_powersaving_levels = true; + if (pi->pcie_gen_powersaving.max < pl->pcie_gen) + pi->pcie_gen_powersaving.max = pl->pcie_gen; + if (pi->pcie_gen_powersaving.min > pl->pcie_gen) + pi->pcie_gen_powersaving.min = pl->pcie_gen; + if (pi->pcie_lane_powersaving.max < pl->pcie_lane) + pi->pcie_lane_powersaving.max = pl->pcie_lane; + if (pi->pcie_lane_powersaving.min > pl->pcie_lane) + pi->pcie_lane_powersaving.min = pl->pcie_lane; + break; + case ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE: + pi->use_pcie_performance_levels = true; + if (pi->pcie_gen_performance.max < pl->pcie_gen) + pi->pcie_gen_performance.max = pl->pcie_gen; + if (pi->pcie_gen_performance.min > pl->pcie_gen) + pi->pcie_gen_performance.min = pl->pcie_gen; + if (pi->pcie_lane_performance.max < pl->pcie_lane) + pi->pcie_lane_performance.max = pl->pcie_lane; + if (pi->pcie_lane_performance.min > pl->pcie_lane) + pi->pcie_lane_performance.min = pl->pcie_lane; + break; + default: + break; + } +} + +static int ci_parse_power_table(struct amdgpu_device *adev) +{ + struct amdgpu_mode_info *mode_info = &adev->mode_info; + struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info; + union pplib_power_state *power_state; + int i, j, k, non_clock_array_index, clock_array_index; + union pplib_clock_info *clock_info; + struct _StateArray *state_array; + struct _ClockInfoArray *clock_info_array; + struct _NonClockInfoArray *non_clock_info_array; + union power_info *power_info; + int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo); + u16 data_offset; + u8 frev, crev; + u8 *power_state_offset; + struct ci_ps *ps; + + if (!amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL, + &frev, &crev, &data_offset)) + return -EINVAL; + power_info = (union power_info *)(mode_info->atom_context->bios + data_offset); + + amdgpu_add_thermal_controller(adev); + + state_array = (struct _StateArray *) + (mode_info->atom_context->bios + data_offset + + le16_to_cpu(power_info->pplib.usStateArrayOffset)); + clock_info_array = (struct _ClockInfoArray *) + (mode_info->atom_context->bios + data_offset + + le16_to_cpu(power_info->pplib.usClockInfoArrayOffset)); + non_clock_info_array = (struct _NonClockInfoArray *) + (mode_info->atom_context->bios + data_offset + + le16_to_cpu(power_info->pplib.usNonClockInfoArrayOffset)); + + adev->pm.dpm.ps = kzalloc(sizeof(struct amdgpu_ps) * + state_array->ucNumEntries, GFP_KERNEL); + if (!adev->pm.dpm.ps) + return -ENOMEM; + power_state_offset = (u8 *)state_array->states; + for (i = 0; i < state_array->ucNumEntries; i++) { + u8 *idx; + power_state = (union pplib_power_state *)power_state_offset; + non_clock_array_index = power_state->v2.nonClockInfoIndex; + non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *) + &non_clock_info_array->nonClockInfo[non_clock_array_index]; + ps = kzalloc(sizeof(struct ci_ps), GFP_KERNEL); + if (ps == NULL) { + kfree(adev->pm.dpm.ps); + return -ENOMEM; + } + adev->pm.dpm.ps[i].ps_priv = ps; + ci_parse_pplib_non_clock_info(adev, &adev->pm.dpm.ps[i], + non_clock_info, + non_clock_info_array->ucEntrySize); + k = 0; + idx = (u8 *)&power_state->v2.clockInfoIndex[0]; + for (j = 0; j < power_state->v2.ucNumDPMLevels; j++) { + clock_array_index = idx[j]; + if (clock_array_index >= clock_info_array->ucNumEntries) + continue; + if (k >= CISLANDS_MAX_HARDWARE_POWERLEVELS) + break; + clock_info = (union pplib_clock_info *) + ((u8 *)&clock_info_array->clockInfo[0] + + (clock_array_index * clock_info_array->ucEntrySize)); + ci_parse_pplib_clock_info(adev, + &adev->pm.dpm.ps[i], k, + clock_info); + k++; + } + power_state_offset += 2 + power_state->v2.ucNumDPMLevels; + } + adev->pm.dpm.num_ps = state_array->ucNumEntries; + + /* fill in the vce power states */ + for (i = 0; i < AMDGPU_MAX_VCE_LEVELS; i++) { + u32 sclk, mclk; + clock_array_index = adev->pm.dpm.vce_states[i].clk_idx; + clock_info = (union pplib_clock_info *) + &clock_info_array->clockInfo[clock_array_index * clock_info_array->ucEntrySize]; + sclk = le16_to_cpu(clock_info->ci.usEngineClockLow); + sclk |= clock_info->ci.ucEngineClockHigh << 16; + mclk = le16_to_cpu(clock_info->ci.usMemoryClockLow); + mclk |= clock_info->ci.ucMemoryClockHigh << 16; + adev->pm.dpm.vce_states[i].sclk = sclk; + adev->pm.dpm.vce_states[i].mclk = mclk; + } + + return 0; +} + +static int ci_get_vbios_boot_values(struct amdgpu_device *adev, + struct ci_vbios_boot_state *boot_state) +{ + struct amdgpu_mode_info *mode_info = &adev->mode_info; + int index = GetIndexIntoMasterTable(DATA, FirmwareInfo); + ATOM_FIRMWARE_INFO_V2_2 *firmware_info; + u8 frev, crev; + u16 data_offset; + + if (amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL, + &frev, &crev, &data_offset)) { + firmware_info = + (ATOM_FIRMWARE_INFO_V2_2 *)(mode_info->atom_context->bios + + data_offset); + boot_state->mvdd_bootup_value = le16_to_cpu(firmware_info->usBootUpMVDDCVoltage); + boot_state->vddc_bootup_value = le16_to_cpu(firmware_info->usBootUpVDDCVoltage); + boot_state->vddci_bootup_value = le16_to_cpu(firmware_info->usBootUpVDDCIVoltage); + boot_state->pcie_gen_bootup_value = ci_get_current_pcie_speed(adev); + boot_state->pcie_lane_bootup_value = ci_get_current_pcie_lane_number(adev); + boot_state->sclk_bootup_value = le32_to_cpu(firmware_info->ulDefaultEngineClock); + boot_state->mclk_bootup_value = le32_to_cpu(firmware_info->ulDefaultMemoryClock); + + return 0; + } + return -EINVAL; +} + +static void ci_dpm_fini(struct amdgpu_device *adev) +{ + int i; + + for (i = 0; i < adev->pm.dpm.num_ps; i++) { + kfree(adev->pm.dpm.ps[i].ps_priv); + } + kfree(adev->pm.dpm.ps); + kfree(adev->pm.dpm.priv); + kfree(adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries); + amdgpu_free_extended_power_table(adev); +} + +/** + * ci_dpm_init_microcode - load ucode images from disk + * + * @adev: amdgpu_device pointer + * + * Use the firmware interface to load the ucode images into + * the driver (not loaded into hw). + * Returns 0 on success, error on failure. + */ +static int ci_dpm_init_microcode(struct amdgpu_device *adev) +{ + const char *chip_name; + char fw_name[30]; + int err; + + DRM_DEBUG("\n"); + + switch (adev->asic_type) { + case CHIP_BONAIRE: + chip_name = "bonaire"; + break; + case CHIP_HAWAII: + chip_name = "hawaii"; + break; + case CHIP_KAVERI: + case CHIP_KABINI: + default: BUG(); + } + + snprintf(fw_name, sizeof(fw_name), "radeon/%s_smc.bin", chip_name); + err = request_firmware(&adev->pm.fw, fw_name, adev->dev); + if (err) + goto out; + err = amdgpu_ucode_validate(adev->pm.fw); + +out: + if (err) { + printk(KERN_ERR + "cik_smc: Failed to load firmware \"%s\"\n", + fw_name); + release_firmware(adev->pm.fw); + adev->pm.fw = NULL; + } + return err; +} + +static int ci_dpm_init(struct amdgpu_device *adev) +{ + int index = GetIndexIntoMasterTable(DATA, ASIC_InternalSS_Info); + SMU7_Discrete_DpmTable *dpm_table; + struct amdgpu_gpio_rec gpio; + u16 data_offset, size; + u8 frev, crev; + struct ci_power_info *pi; + int ret; + u32 mask; + + ret = ci_dpm_init_microcode(adev); + if (ret) + return ret; + + pi = kzalloc(sizeof(struct ci_power_info), GFP_KERNEL); + if (pi == NULL) + return -ENOMEM; + adev->pm.dpm.priv = pi; + + ret = drm_pcie_get_speed_cap_mask(adev->ddev, &mask); + if (ret) + pi->sys_pcie_mask = 0; + else + pi->sys_pcie_mask = mask; + pi->force_pcie_gen = AMDGPU_PCIE_GEN_INVALID; + + pi->pcie_gen_performance.max = AMDGPU_PCIE_GEN1; + pi->pcie_gen_performance.min = AMDGPU_PCIE_GEN3; + pi->pcie_gen_powersaving.max = AMDGPU_PCIE_GEN1; + pi->pcie_gen_powersaving.min = AMDGPU_PCIE_GEN3; + + pi->pcie_lane_performance.max = 0; + pi->pcie_lane_performance.min = 16; + pi->pcie_lane_powersaving.max = 0; + pi->pcie_lane_powersaving.min = 16; + + ret = ci_get_vbios_boot_values(adev, &pi->vbios_boot_state); + if (ret) { + ci_dpm_fini(adev); + return ret; + } + + ret = amdgpu_get_platform_caps(adev); + if (ret) { + ci_dpm_fini(adev); + return ret; + } + + ret = amdgpu_parse_extended_power_table(adev); + if (ret) { + ci_dpm_fini(adev); + return ret; + } + + ret = ci_parse_power_table(adev); + if (ret) { + ci_dpm_fini(adev); + return ret; + } + + pi->dll_default_on = false; + pi->sram_end = SMC_RAM_END; + + pi->activity_target[0] = CISLAND_TARGETACTIVITY_DFLT; + pi->activity_target[1] = CISLAND_TARGETACTIVITY_DFLT; + pi->activity_target[2] = CISLAND_TARGETACTIVITY_DFLT; + pi->activity_target[3] = CISLAND_TARGETACTIVITY_DFLT; + pi->activity_target[4] = CISLAND_TARGETACTIVITY_DFLT; + pi->activity_target[5] = CISLAND_TARGETACTIVITY_DFLT; + pi->activity_target[6] = CISLAND_TARGETACTIVITY_DFLT; + pi->activity_target[7] = CISLAND_TARGETACTIVITY_DFLT; + + pi->mclk_activity_target = CISLAND_MCLK_TARGETACTIVITY_DFLT; + + pi->sclk_dpm_key_disabled = 0; + pi->mclk_dpm_key_disabled = 0; + pi->pcie_dpm_key_disabled = 0; + pi->thermal_sclk_dpm_enabled = 0; + + pi->caps_sclk_ds = true; + + pi->mclk_strobe_mode_threshold = 40000; + pi->mclk_stutter_mode_threshold = 40000; + pi->mclk_edc_enable_threshold = 40000; + pi->mclk_edc_wr_enable_threshold = 40000; + + ci_initialize_powertune_defaults(adev); + + pi->caps_fps = false; + + pi->caps_sclk_throttle_low_notification = false; + + pi->caps_uvd_dpm = true; + pi->caps_vce_dpm = true; + + ci_get_leakage_voltages(adev); + ci_patch_dependency_tables_with_leakage(adev); + ci_set_private_data_variables_based_on_pptable(adev); + + adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries = + kzalloc(4 * sizeof(struct amdgpu_clock_voltage_dependency_entry), GFP_KERNEL); + if (!adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries) { + ci_dpm_fini(adev); + return -ENOMEM; + } + adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.count = 4; + adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[0].clk = 0; + adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[0].v = 0; + adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[1].clk = 36000; + adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[1].v = 720; + adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[2].clk = 54000; + adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[2].v = 810; + adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[3].clk = 72000; + adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[3].v = 900; + + adev->pm.dpm.dyn_state.mclk_sclk_ratio = 4; + adev->pm.dpm.dyn_state.sclk_mclk_delta = 15000; + adev->pm.dpm.dyn_state.vddc_vddci_delta = 200; + + adev->pm.dpm.dyn_state.valid_sclk_values.count = 0; + adev->pm.dpm.dyn_state.valid_sclk_values.values = NULL; + adev->pm.dpm.dyn_state.valid_mclk_values.count = 0; + adev->pm.dpm.dyn_state.valid_mclk_values.values = NULL; + + if (adev->asic_type == CHIP_HAWAII) { + pi->thermal_temp_setting.temperature_low = 94500; + pi->thermal_temp_setting.temperature_high = 95000; + pi->thermal_temp_setting.temperature_shutdown = 104000; + } else { + pi->thermal_temp_setting.temperature_low = 99500; + pi->thermal_temp_setting.temperature_high = 100000; + pi->thermal_temp_setting.temperature_shutdown = 104000; + } + + pi->uvd_enabled = false; + + dpm_table = &pi->smc_state_table; + + gpio = amdgpu_atombios_lookup_gpio(adev, VDDC_VRHOT_GPIO_PINID); + if (gpio.valid) { + dpm_table->VRHotGpio = gpio.shift; + adev->pm.dpm.platform_caps |= ATOM_PP_PLATFORM_CAP_REGULATOR_HOT; + } else { + dpm_table->VRHotGpio = CISLANDS_UNUSED_GPIO_PIN; + adev->pm.dpm.platform_caps &= ~ATOM_PP_PLATFORM_CAP_REGULATOR_HOT; + } + + gpio = amdgpu_atombios_lookup_gpio(adev, PP_AC_DC_SWITCH_GPIO_PINID); + if (gpio.valid) { + dpm_table->AcDcGpio = gpio.shift; + adev->pm.dpm.platform_caps |= ATOM_PP_PLATFORM_CAP_HARDWAREDC; + } else { + dpm_table->AcDcGpio = CISLANDS_UNUSED_GPIO_PIN; + adev->pm.dpm.platform_caps &= ~ATOM_PP_PLATFORM_CAP_HARDWAREDC; + } + + gpio = amdgpu_atombios_lookup_gpio(adev, VDDC_PCC_GPIO_PINID); + if (gpio.valid) { + u32 tmp = RREG32_SMC(ixCNB_PWRMGT_CNTL); + + switch (gpio.shift) { + case 0: + tmp &= ~CNB_PWRMGT_CNTL__GNB_SLOW_MODE_MASK; + tmp |= 1 << CNB_PWRMGT_CNTL__GNB_SLOW_MODE__SHIFT; + break; + case 1: + tmp &= ~CNB_PWRMGT_CNTL__GNB_SLOW_MODE_MASK; + tmp |= 2 << CNB_PWRMGT_CNTL__GNB_SLOW_MODE__SHIFT; + break; + case 2: + tmp |= CNB_PWRMGT_CNTL__GNB_SLOW_MASK; + break; + case 3: + tmp |= CNB_PWRMGT_CNTL__FORCE_NB_PS1_MASK; + break; + case 4: + tmp |= CNB_PWRMGT_CNTL__DPM_ENABLED_MASK; + break; + default: + DRM_ERROR("Invalid PCC GPIO: %u!\n", gpio.shift); + break; + } + WREG32_SMC(ixCNB_PWRMGT_CNTL, tmp); + } + + pi->voltage_control = CISLANDS_VOLTAGE_CONTROL_NONE; + pi->vddci_control = CISLANDS_VOLTAGE_CONTROL_NONE; + pi->mvdd_control = CISLANDS_VOLTAGE_CONTROL_NONE; + if (amdgpu_atombios_is_voltage_gpio(adev, VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_GPIO_LUT)) + pi->voltage_control = CISLANDS_VOLTAGE_CONTROL_BY_GPIO; + else if (amdgpu_atombios_is_voltage_gpio(adev, VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_SVID2)) + pi->voltage_control = CISLANDS_VOLTAGE_CONTROL_BY_SVID2; + + if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_VDDCI_CONTROL) { + if (amdgpu_atombios_is_voltage_gpio(adev, VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_GPIO_LUT)) + pi->vddci_control = CISLANDS_VOLTAGE_CONTROL_BY_GPIO; + else if (amdgpu_atombios_is_voltage_gpio(adev, VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_SVID2)) + pi->vddci_control = CISLANDS_VOLTAGE_CONTROL_BY_SVID2; + else + adev->pm.dpm.platform_caps &= ~ATOM_PP_PLATFORM_CAP_VDDCI_CONTROL; + } + + if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_MVDDCONTROL) { + if (amdgpu_atombios_is_voltage_gpio(adev, VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_GPIO_LUT)) + pi->mvdd_control = CISLANDS_VOLTAGE_CONTROL_BY_GPIO; + else if (amdgpu_atombios_is_voltage_gpio(adev, VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_SVID2)) + pi->mvdd_control = CISLANDS_VOLTAGE_CONTROL_BY_SVID2; + else + adev->pm.dpm.platform_caps &= ~ATOM_PP_PLATFORM_CAP_MVDDCONTROL; + } + + pi->vddc_phase_shed_control = true; + +#if defined(CONFIG_ACPI) + pi->pcie_performance_request = + amdgpu_acpi_is_pcie_performance_request_supported(adev); +#else + pi->pcie_performance_request = false; +#endif + + if (amdgpu_atom_parse_data_header(adev->mode_info.atom_context, index, &size, + &frev, &crev, &data_offset)) { + pi->caps_sclk_ss_support = true; + pi->caps_mclk_ss_support = true; + pi->dynamic_ss = true; + } else { + pi->caps_sclk_ss_support = false; + pi->caps_mclk_ss_support = false; + pi->dynamic_ss = true; + } + + if (adev->pm.int_thermal_type != THERMAL_TYPE_NONE) + pi->thermal_protection = true; + else + pi->thermal_protection = false; + + pi->caps_dynamic_ac_timing = true; + + pi->uvd_power_gated = false; + + /* make sure dc limits are valid */ + if ((adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.sclk == 0) || + (adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.mclk == 0)) + adev->pm.dpm.dyn_state.max_clock_voltage_on_dc = + adev->pm.dpm.dyn_state.max_clock_voltage_on_ac; + + pi->fan_ctrl_is_in_default_mode = true; + + return 0; +} + +static void +ci_dpm_debugfs_print_current_performance_level(struct amdgpu_device *adev, + struct seq_file *m) +{ + struct ci_power_info *pi = ci_get_pi(adev); + struct amdgpu_ps *rps = &pi->current_rps; + u32 sclk = ci_get_average_sclk_freq(adev); + u32 mclk = ci_get_average_mclk_freq(adev); + + seq_printf(m, "uvd %sabled\n", pi->uvd_enabled ? "en" : "dis"); + seq_printf(m, "vce %sabled\n", rps->vce_active ? "en" : "dis"); + seq_printf(m, "power level avg sclk: %u mclk: %u\n", + sclk, mclk); +} + +static void ci_dpm_print_power_state(struct amdgpu_device *adev, + struct amdgpu_ps *rps) +{ + struct ci_ps *ps = ci_get_ps(rps); + struct ci_pl *pl; + int i; + + amdgpu_dpm_print_class_info(rps->class, rps->class2); + amdgpu_dpm_print_cap_info(rps->caps); + printk("\tuvd vclk: %d dclk: %d\n", rps->vclk, rps->dclk); + for (i = 0; i < ps->performance_level_count; i++) { + pl = &ps->performance_levels[i]; + printk("\t\tpower level %d sclk: %u mclk: %u pcie gen: %u pcie lanes: %u\n", + i, pl->sclk, pl->mclk, pl->pcie_gen + 1, pl->pcie_lane); + } + amdgpu_dpm_print_ps_status(adev, rps); +} + +static u32 ci_dpm_get_sclk(struct amdgpu_device *adev, bool low) +{ + struct ci_power_info *pi = ci_get_pi(adev); + struct ci_ps *requested_state = ci_get_ps(&pi->requested_rps); + + if (low) + return requested_state->performance_levels[0].sclk; + else + return requested_state->performance_levels[requested_state->performance_level_count - 1].sclk; +} + +static u32 ci_dpm_get_mclk(struct amdgpu_device *adev, bool low) +{ + struct ci_power_info *pi = ci_get_pi(adev); + struct ci_ps *requested_state = ci_get_ps(&pi->requested_rps); + + if (low) + return requested_state->performance_levels[0].mclk; + else + return requested_state->performance_levels[requested_state->performance_level_count - 1].mclk; +} + +/* get temperature in millidegrees */ +static int ci_dpm_get_temp(struct amdgpu_device *adev) +{ + u32 temp; + int actual_temp = 0; + + temp = (RREG32_SMC(ixCG_MULT_THERMAL_STATUS) & CG_MULT_THERMAL_STATUS__CTF_TEMP_MASK) >> + CG_MULT_THERMAL_STATUS__CTF_TEMP__SHIFT; + + if (temp & 0x200) + actual_temp = 255; + else + actual_temp = temp & 0x1ff; + + actual_temp = actual_temp * 1000; + + return actual_temp; +} + +static int ci_set_temperature_range(struct amdgpu_device *adev) +{ + int ret; + + ret = ci_thermal_enable_alert(adev, false); + if (ret) + return ret; + ret = ci_thermal_set_temperature_range(adev, CISLANDS_TEMP_RANGE_MIN, + CISLANDS_TEMP_RANGE_MAX); + if (ret) + return ret; + ret = ci_thermal_enable_alert(adev, true); + if (ret) + return ret; + return ret; +} + +static int ci_dpm_early_init(struct amdgpu_device *adev) +{ + ci_dpm_set_dpm_funcs(adev); + ci_dpm_set_irq_funcs(adev); + + return 0; +} + +static int ci_dpm_late_init(struct amdgpu_device *adev) +{ + int ret; + + if (!amdgpu_dpm) + return 0; + + ret = ci_set_temperature_range(adev); + if (ret) + return ret; + + ci_dpm_powergate_uvd(adev, true); + + return 0; +} + +static int ci_dpm_sw_init(struct amdgpu_device *adev) +{ + int ret; + + ret = amdgpu_irq_add_id(adev, 230, &adev->pm.dpm.thermal.irq); + if (ret) + return ret; + + ret = amdgpu_irq_add_id(adev, 231, &adev->pm.dpm.thermal.irq); + if (ret) + return ret; + + /* default to balanced state */ + adev->pm.dpm.state = POWER_STATE_TYPE_BALANCED; + adev->pm.dpm.user_state = POWER_STATE_TYPE_BALANCED; + adev->pm.dpm.forced_level = AMDGPU_DPM_FORCED_LEVEL_AUTO; + adev->pm.default_sclk = adev->clock.default_sclk; + adev->pm.default_mclk = adev->clock.default_mclk; + adev->pm.current_sclk = adev->clock.default_sclk; + adev->pm.current_mclk = adev->clock.default_mclk; + adev->pm.int_thermal_type = THERMAL_TYPE_NONE; + + if (amdgpu_dpm == 0) + return 0; + + INIT_WORK(&adev->pm.dpm.thermal.work, amdgpu_dpm_thermal_work_handler); + mutex_lock(&adev->pm.mutex); + ret = ci_dpm_init(adev); + if (ret) + goto dpm_failed; + adev->pm.dpm.current_ps = adev->pm.dpm.requested_ps = adev->pm.dpm.boot_ps; + if (amdgpu_dpm == 1) + amdgpu_pm_print_power_states(adev); + ret = amdgpu_pm_sysfs_init(adev); + if (ret) + goto dpm_failed; + mutex_unlock(&adev->pm.mutex); + DRM_INFO("amdgpu: dpm initialized\n"); + + return 0; + +dpm_failed: + ci_dpm_fini(adev); + mutex_unlock(&adev->pm.mutex); + DRM_ERROR("amdgpu: dpm initialization failed\n"); + return ret; +} + +static int ci_dpm_sw_fini(struct amdgpu_device *adev) +{ + mutex_lock(&adev->pm.mutex); + amdgpu_pm_sysfs_fini(adev); + ci_dpm_fini(adev); + mutex_unlock(&adev->pm.mutex); + + return 0; +} + +static int ci_dpm_hw_init(struct amdgpu_device *adev) +{ + int ret; + + if (!amdgpu_dpm) + return 0; + + mutex_lock(&adev->pm.mutex); + ci_dpm_setup_asic(adev); + ret = ci_dpm_enable(adev); + if (ret) + adev->pm.dpm_enabled = false; + else + adev->pm.dpm_enabled = true; + mutex_unlock(&adev->pm.mutex); + + return ret; +} + +static int ci_dpm_hw_fini(struct amdgpu_device *adev) +{ + if (adev->pm.dpm_enabled) { + mutex_lock(&adev->pm.mutex); + ci_dpm_disable(adev); + mutex_unlock(&adev->pm.mutex); + } + + return 0; +} + +static int ci_dpm_suspend(struct amdgpu_device *adev) +{ + if (adev->pm.dpm_enabled) { + mutex_lock(&adev->pm.mutex); + /* disable dpm */ + ci_dpm_disable(adev); + /* reset the power state */ + adev->pm.dpm.current_ps = adev->pm.dpm.requested_ps = adev->pm.dpm.boot_ps; + mutex_unlock(&adev->pm.mutex); + } + return 0; +} + +static int ci_dpm_resume(struct amdgpu_device *adev) +{ + int ret; + + if (adev->pm.dpm_enabled) { + /* asic init will reset to the boot state */ + mutex_lock(&adev->pm.mutex); + ci_dpm_setup_asic(adev); + ret = ci_dpm_enable(adev); + if (ret) + adev->pm.dpm_enabled = false; + else + adev->pm.dpm_enabled = true; + mutex_unlock(&adev->pm.mutex); + if (adev->pm.dpm_enabled) + amdgpu_pm_compute_clocks(adev); + } + return 0; +} + +static bool ci_dpm_is_idle(struct amdgpu_device *adev) +{ + /* XXX */ + return true; +} + +static int ci_dpm_wait_for_idle(struct amdgpu_device *adev) +{ + /* XXX */ + return 0; +} + +static void ci_dpm_print_status(struct amdgpu_device *adev) +{ + dev_info(adev->dev, "CIK DPM registers\n"); + dev_info(adev->dev, " BIOS_SCRATCH_4=0x%08X\n", + RREG32(mmBIOS_SCRATCH_4)); + dev_info(adev->dev, " MC_ARB_DRAM_TIMING=0x%08X\n", + RREG32(mmMC_ARB_DRAM_TIMING)); + dev_info(adev->dev, " MC_ARB_DRAM_TIMING2=0x%08X\n", + RREG32(mmMC_ARB_DRAM_TIMING2)); + dev_info(adev->dev, " MC_ARB_BURST_TIME=0x%08X\n", + RREG32(mmMC_ARB_BURST_TIME)); + dev_info(adev->dev, " MC_ARB_DRAM_TIMING_1=0x%08X\n", + RREG32(mmMC_ARB_DRAM_TIMING_1)); + dev_info(adev->dev, " MC_ARB_DRAM_TIMING2_1=0x%08X\n", + RREG32(mmMC_ARB_DRAM_TIMING2_1)); + dev_info(adev->dev, " MC_CG_CONFIG=0x%08X\n", + RREG32(mmMC_CG_CONFIG)); + dev_info(adev->dev, " MC_ARB_CG=0x%08X\n", + RREG32(mmMC_ARB_CG)); + dev_info(adev->dev, " DIDT_SQ_CTRL0=0x%08X\n", + RREG32_DIDT(ixDIDT_SQ_CTRL0)); + dev_info(adev->dev, " DIDT_DB_CTRL0=0x%08X\n", + RREG32_DIDT(ixDIDT_DB_CTRL0)); + dev_info(adev->dev, " DIDT_TD_CTRL0=0x%08X\n", + RREG32_DIDT(ixDIDT_TD_CTRL0)); + dev_info(adev->dev, " DIDT_TCP_CTRL0=0x%08X\n", + RREG32_DIDT(ixDIDT_TCP_CTRL0)); + dev_info(adev->dev, " CG_THERMAL_INT=0x%08X\n", + RREG32_SMC(ixCG_THERMAL_INT)); + dev_info(adev->dev, " CG_THERMAL_CTRL=0x%08X\n", + RREG32_SMC(ixCG_THERMAL_CTRL)); + dev_info(adev->dev, " GENERAL_PWRMGT=0x%08X\n", + RREG32_SMC(ixGENERAL_PWRMGT)); + dev_info(adev->dev, " MC_SEQ_CNTL_3=0x%08X\n", + RREG32(mmMC_SEQ_CNTL_3)); + dev_info(adev->dev, " LCAC_MC0_CNTL=0x%08X\n", + RREG32_SMC(ixLCAC_MC0_CNTL)); + dev_info(adev->dev, " LCAC_MC1_CNTL=0x%08X\n", + RREG32_SMC(ixLCAC_MC1_CNTL)); + dev_info(adev->dev, " LCAC_CPL_CNTL=0x%08X\n", + RREG32_SMC(ixLCAC_CPL_CNTL)); + dev_info(adev->dev, " SCLK_PWRMGT_CNTL=0x%08X\n", + RREG32_SMC(ixSCLK_PWRMGT_CNTL)); + dev_info(adev->dev, " BIF_LNCNT_RESET=0x%08X\n", + RREG32(mmBIF_LNCNT_RESET)); + dev_info(adev->dev, " FIRMWARE_FLAGS=0x%08X\n", + RREG32_SMC(ixFIRMWARE_FLAGS)); + dev_info(adev->dev, " CG_SPLL_FUNC_CNTL=0x%08X\n", + RREG32_SMC(ixCG_SPLL_FUNC_CNTL)); + dev_info(adev->dev, " CG_SPLL_FUNC_CNTL_2=0x%08X\n", + RREG32_SMC(ixCG_SPLL_FUNC_CNTL_2)); + dev_info(adev->dev, " CG_SPLL_FUNC_CNTL_3=0x%08X\n", + RREG32_SMC(ixCG_SPLL_FUNC_CNTL_3)); + dev_info(adev->dev, " CG_SPLL_FUNC_CNTL_4=0x%08X\n", + RREG32_SMC(ixCG_SPLL_FUNC_CNTL_4)); + dev_info(adev->dev, " CG_SPLL_SPREAD_SPECTRUM=0x%08X\n", + RREG32_SMC(ixCG_SPLL_SPREAD_SPECTRUM)); + dev_info(adev->dev, " CG_SPLL_SPREAD_SPECTRUM_2=0x%08X\n", + RREG32_SMC(ixCG_SPLL_SPREAD_SPECTRUM_2)); + dev_info(adev->dev, " DLL_CNTL=0x%08X\n", + RREG32(mmDLL_CNTL)); + dev_info(adev->dev, " MCLK_PWRMGT_CNTL=0x%08X\n", + RREG32(mmMCLK_PWRMGT_CNTL)); + dev_info(adev->dev, " MPLL_AD_FUNC_CNTL=0x%08X\n", + RREG32(mmMPLL_AD_FUNC_CNTL)); + dev_info(adev->dev, " MPLL_DQ_FUNC_CNTL=0x%08X\n", + RREG32(mmMPLL_DQ_FUNC_CNTL)); + dev_info(adev->dev, " MPLL_FUNC_CNTL=0x%08X\n", + RREG32(mmMPLL_FUNC_CNTL)); + dev_info(adev->dev, " MPLL_FUNC_CNTL_1=0x%08X\n", + RREG32(mmMPLL_FUNC_CNTL_1)); + dev_info(adev->dev, " MPLL_FUNC_CNTL_2=0x%08X\n", + RREG32(mmMPLL_FUNC_CNTL_2)); + dev_info(adev->dev, " MPLL_SS1=0x%08X\n", + RREG32(mmMPLL_SS1)); + dev_info(adev->dev, " MPLL_SS2=0x%08X\n", + RREG32(mmMPLL_SS2)); + dev_info(adev->dev, " CG_DISPLAY_GAP_CNTL=0x%08X\n", + RREG32_SMC(ixCG_DISPLAY_GAP_CNTL)); + dev_info(adev->dev, " CG_DISPLAY_GAP_CNTL2=0x%08X\n", + RREG32_SMC(ixCG_DISPLAY_GAP_CNTL2)); + dev_info(adev->dev, " CG_STATIC_SCREEN_PARAMETER=0x%08X\n", + RREG32_SMC(ixCG_STATIC_SCREEN_PARAMETER)); + dev_info(adev->dev, " CG_FREQ_TRAN_VOTING_0=0x%08X\n", + RREG32_SMC(ixCG_FREQ_TRAN_VOTING_0)); + dev_info(adev->dev, " CG_FREQ_TRAN_VOTING_1=0x%08X\n", + RREG32_SMC(ixCG_FREQ_TRAN_VOTING_1)); + dev_info(adev->dev, " CG_FREQ_TRAN_VOTING_2=0x%08X\n", + RREG32_SMC(ixCG_FREQ_TRAN_VOTING_2)); + dev_info(adev->dev, " CG_FREQ_TRAN_VOTING_3=0x%08X\n", + RREG32_SMC(ixCG_FREQ_TRAN_VOTING_3)); + dev_info(adev->dev, " CG_FREQ_TRAN_VOTING_4=0x%08X\n", + RREG32_SMC(ixCG_FREQ_TRAN_VOTING_4)); + dev_info(adev->dev, " CG_FREQ_TRAN_VOTING_5=0x%08X\n", + RREG32_SMC(ixCG_FREQ_TRAN_VOTING_5)); + dev_info(adev->dev, " CG_FREQ_TRAN_VOTING_6=0x%08X\n", + RREG32_SMC(ixCG_FREQ_TRAN_VOTING_6)); + dev_info(adev->dev, " CG_FREQ_TRAN_VOTING_7=0x%08X\n", + RREG32_SMC(ixCG_FREQ_TRAN_VOTING_7)); + dev_info(adev->dev, " RCU_UC_EVENTS=0x%08X\n", + RREG32_SMC(ixRCU_UC_EVENTS)); + dev_info(adev->dev, " DPM_TABLE_475=0x%08X\n", + RREG32_SMC(ixDPM_TABLE_475)); + dev_info(adev->dev, " MC_SEQ_RAS_TIMING_LP=0x%08X\n", + RREG32(mmMC_SEQ_RAS_TIMING_LP)); + dev_info(adev->dev, " MC_SEQ_RAS_TIMING=0x%08X\n", + RREG32(mmMC_SEQ_RAS_TIMING)); + dev_info(adev->dev, " MC_SEQ_CAS_TIMING_LP=0x%08X\n", + RREG32(mmMC_SEQ_CAS_TIMING_LP)); + dev_info(adev->dev, " MC_SEQ_CAS_TIMING=0x%08X\n", + RREG32(mmMC_SEQ_CAS_TIMING)); + dev_info(adev->dev, " MC_SEQ_DLL_STBY_LP=0x%08X\n", + RREG32(mmMC_SEQ_DLL_STBY_LP)); + dev_info(adev->dev, " MC_SEQ_DLL_STBY=0x%08X\n", + RREG32(mmMC_SEQ_DLL_STBY)); + dev_info(adev->dev, " MC_SEQ_G5PDX_CMD0_LP=0x%08X\n", + RREG32(mmMC_SEQ_G5PDX_CMD0_LP)); + dev_info(adev->dev, " MC_SEQ_G5PDX_CMD0=0x%08X\n", + RREG32(mmMC_SEQ_G5PDX_CMD0)); + dev_info(adev->dev, " MC_SEQ_G5PDX_CMD1_LP=0x%08X\n", + RREG32(mmMC_SEQ_G5PDX_CMD1_LP)); + dev_info(adev->dev, " MC_SEQ_G5PDX_CMD1=0x%08X\n", + RREG32(mmMC_SEQ_G5PDX_CMD1)); + dev_info(adev->dev, " MC_SEQ_G5PDX_CTRL_LP=0x%08X\n", + RREG32(mmMC_SEQ_G5PDX_CTRL_LP)); + dev_info(adev->dev, " MC_SEQ_G5PDX_CTRL=0x%08X\n", + RREG32(mmMC_SEQ_G5PDX_CTRL)); + dev_info(adev->dev, " MC_SEQ_PMG_DVS_CMD_LP=0x%08X\n", + RREG32(mmMC_SEQ_PMG_DVS_CMD_LP)); + dev_info(adev->dev, " MC_SEQ_PMG_DVS_CMD=0x%08X\n", + RREG32(mmMC_SEQ_PMG_DVS_CMD)); + dev_info(adev->dev, " MC_SEQ_PMG_DVS_CTL_LP=0x%08X\n", + RREG32(mmMC_SEQ_PMG_DVS_CTL_LP)); + dev_info(adev->dev, " MC_SEQ_PMG_DVS_CTL=0x%08X\n", + RREG32(mmMC_SEQ_PMG_DVS_CTL)); + dev_info(adev->dev, " MC_SEQ_MISC_TIMING_LP=0x%08X\n", + RREG32(mmMC_SEQ_MISC_TIMING_LP)); + dev_info(adev->dev, " MC_SEQ_MISC_TIMING=0x%08X\n", + RREG32(mmMC_SEQ_MISC_TIMING)); + dev_info(adev->dev, " MC_SEQ_MISC_TIMING2_LP=0x%08X\n", + RREG32(mmMC_SEQ_MISC_TIMING2_LP)); + dev_info(adev->dev, " MC_SEQ_MISC_TIMING2=0x%08X\n", + RREG32(mmMC_SEQ_MISC_TIMING2)); + dev_info(adev->dev, " MC_SEQ_PMG_CMD_EMRS_LP=0x%08X\n", + RREG32(mmMC_SEQ_PMG_CMD_EMRS_LP)); + dev_info(adev->dev, " MC_PMG_CMD_EMRS=0x%08X\n", + RREG32(mmMC_PMG_CMD_EMRS)); + dev_info(adev->dev, " MC_SEQ_PMG_CMD_MRS_LP=0x%08X\n", + RREG32(mmMC_SEQ_PMG_CMD_MRS_LP)); + dev_info(adev->dev, " MC_PMG_CMD_MRS=0x%08X\n", + RREG32(mmMC_PMG_CMD_MRS)); + dev_info(adev->dev, " MC_SEQ_PMG_CMD_MRS1_LP=0x%08X\n", + RREG32(mmMC_SEQ_PMG_CMD_MRS1_LP)); + dev_info(adev->dev, " MC_PMG_CMD_MRS1=0x%08X\n", + RREG32(mmMC_PMG_CMD_MRS1)); + dev_info(adev->dev, " MC_SEQ_WR_CTL_D0_LP=0x%08X\n", + RREG32(mmMC_SEQ_WR_CTL_D0_LP)); + dev_info(adev->dev, " MC_SEQ_WR_CTL_D0=0x%08X\n", + RREG32(mmMC_SEQ_WR_CTL_D0)); + dev_info(adev->dev, " MC_SEQ_WR_CTL_D1_LP=0x%08X\n", + RREG32(mmMC_SEQ_WR_CTL_D1_LP)); + dev_info(adev->dev, " MC_SEQ_WR_CTL_D1=0x%08X\n", + RREG32(mmMC_SEQ_WR_CTL_D1)); + dev_info(adev->dev, " MC_SEQ_RD_CTL_D0_LP=0x%08X\n", + RREG32(mmMC_SEQ_RD_CTL_D0_LP)); + dev_info(adev->dev, " MC_SEQ_RD_CTL_D0=0x%08X\n", + RREG32(mmMC_SEQ_RD_CTL_D0)); + dev_info(adev->dev, " MC_SEQ_RD_CTL_D1_LP=0x%08X\n", + RREG32(mmMC_SEQ_RD_CTL_D1_LP)); + dev_info(adev->dev, " MC_SEQ_RD_CTL_D1=0x%08X\n", + RREG32(mmMC_SEQ_RD_CTL_D1)); + dev_info(adev->dev, " MC_SEQ_PMG_TIMING_LP=0x%08X\n", + RREG32(mmMC_SEQ_PMG_TIMING_LP)); + dev_info(adev->dev, " MC_SEQ_PMG_TIMING=0x%08X\n", + RREG32(mmMC_SEQ_PMG_TIMING)); + dev_info(adev->dev, " MC_SEQ_PMG_CMD_MRS2_LP=0x%08X\n", + RREG32(mmMC_SEQ_PMG_CMD_MRS2_LP)); + dev_info(adev->dev, " MC_PMG_CMD_MRS2=0x%08X\n", + RREG32(mmMC_PMG_CMD_MRS2)); + dev_info(adev->dev, " MC_SEQ_WR_CTL_2_LP=0x%08X\n", + RREG32(mmMC_SEQ_WR_CTL_2_LP)); + dev_info(adev->dev, " MC_SEQ_WR_CTL_2=0x%08X\n", + RREG32(mmMC_SEQ_WR_CTL_2)); + dev_info(adev->dev, " PCIE_LC_SPEED_CNTL=0x%08X\n", + RREG32_PCIE(ixPCIE_LC_SPEED_CNTL)); + dev_info(adev->dev, " PCIE_LC_LINK_WIDTH_CNTL=0x%08X\n", + RREG32_PCIE(ixPCIE_LC_LINK_WIDTH_CNTL)); + dev_info(adev->dev, " SMC_IND_INDEX_0=0x%08X\n", + RREG32(mmSMC_IND_INDEX_0)); + dev_info(adev->dev, " SMC_IND_DATA_0=0x%08X\n", + RREG32(mmSMC_IND_DATA_0)); + dev_info(adev->dev, " SMC_IND_ACCESS_CNTL=0x%08X\n", + RREG32(mmSMC_IND_ACCESS_CNTL)); + dev_info(adev->dev, " SMC_RESP_0=0x%08X\n", + RREG32(mmSMC_RESP_0)); + dev_info(adev->dev, " SMC_MESSAGE_0=0x%08X\n", + RREG32(mmSMC_MESSAGE_0)); + dev_info(adev->dev, " SMC_SYSCON_RESET_CNTL=0x%08X\n", + RREG32_SMC(ixSMC_SYSCON_RESET_CNTL)); + dev_info(adev->dev, " SMC_SYSCON_CLOCK_CNTL_0=0x%08X\n", + RREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0)); + dev_info(adev->dev, " SMC_SYSCON_MISC_CNTL=0x%08X\n", + RREG32_SMC(ixSMC_SYSCON_MISC_CNTL)); + dev_info(adev->dev, " SMC_PC_C=0x%08X\n", + RREG32_SMC(ixSMC_PC_C)); +} + +static int ci_dpm_soft_reset(struct amdgpu_device *adev) +{ + return 0; +} + +static int ci_dpm_set_interrupt_state(struct amdgpu_device *adev, + struct amdgpu_irq_src *source, + unsigned type, + enum amdgpu_interrupt_state state) +{ + u32 cg_thermal_int; + + switch (type) { + case AMDGPU_THERMAL_IRQ_LOW_TO_HIGH: + switch (state) { + case AMDGPU_IRQ_STATE_DISABLE: + cg_thermal_int = RREG32_SMC(ixCG_THERMAL_INT); + cg_thermal_int &= ~CG_THERMAL_INT_CTRL__THERM_INTH_MASK_MASK; + WREG32_SMC(ixCG_THERMAL_INT, cg_thermal_int); + break; + case AMDGPU_IRQ_STATE_ENABLE: + cg_thermal_int = RREG32_SMC(ixCG_THERMAL_INT); + cg_thermal_int |= CG_THERMAL_INT_CTRL__THERM_INTH_MASK_MASK; + WREG32_SMC(ixCG_THERMAL_INT, cg_thermal_int); + break; + default: + break; + } + break; + + case AMDGPU_THERMAL_IRQ_HIGH_TO_LOW: + switch (state) { + case AMDGPU_IRQ_STATE_DISABLE: + cg_thermal_int = RREG32_SMC(ixCG_THERMAL_INT); + cg_thermal_int &= ~CG_THERMAL_INT_CTRL__THERM_INTL_MASK_MASK; + WREG32_SMC(ixCG_THERMAL_INT, cg_thermal_int); + break; + case AMDGPU_IRQ_STATE_ENABLE: + cg_thermal_int = RREG32_SMC(ixCG_THERMAL_INT); + cg_thermal_int |= CG_THERMAL_INT_CTRL__THERM_INTL_MASK_MASK; + WREG32_SMC(ixCG_THERMAL_INT, cg_thermal_int); + break; + default: + break; + } + break; + + default: + break; + } + return 0; +} + +static int ci_dpm_process_interrupt(struct amdgpu_device *adev, + struct amdgpu_irq_src *source, + struct amdgpu_iv_entry *entry) +{ + bool queue_thermal = false; + + if (entry == NULL) + return -EINVAL; + + switch (entry->src_id) { + case 230: /* thermal low to high */ + DRM_DEBUG("IH: thermal low to high\n"); + adev->pm.dpm.thermal.high_to_low = false; + queue_thermal = true; + break; + case 231: /* thermal high to low */ + DRM_DEBUG("IH: thermal high to low\n"); + adev->pm.dpm.thermal.high_to_low = true; + queue_thermal = true; + break; + default: + break; + } + + if (queue_thermal) + schedule_work(&adev->pm.dpm.thermal.work); + + return 0; +} + +static int ci_dpm_set_clockgating_state(struct amdgpu_device *adev, + enum amdgpu_clockgating_state state) +{ + return 0; +} + +static int ci_dpm_set_powergating_state(struct amdgpu_device *adev, + enum amdgpu_powergating_state state) +{ + return 0; +} + +const struct amdgpu_ip_funcs ci_dpm_ip_funcs = { + .early_init = ci_dpm_early_init, + .late_init = ci_dpm_late_init, + .sw_init = ci_dpm_sw_init, + .sw_fini = ci_dpm_sw_fini, + .hw_init = ci_dpm_hw_init, + .hw_fini = ci_dpm_hw_fini, + .suspend = ci_dpm_suspend, + .resume = ci_dpm_resume, + .is_idle = ci_dpm_is_idle, + .wait_for_idle = ci_dpm_wait_for_idle, + .soft_reset = ci_dpm_soft_reset, + .print_status = ci_dpm_print_status, + .set_clockgating_state = ci_dpm_set_clockgating_state, + .set_powergating_state = ci_dpm_set_powergating_state, +}; + +static const struct amdgpu_dpm_funcs ci_dpm_funcs = { + .get_temperature = &ci_dpm_get_temp, + .pre_set_power_state = &ci_dpm_pre_set_power_state, + .set_power_state = &ci_dpm_set_power_state, + .post_set_power_state = &ci_dpm_post_set_power_state, + .display_configuration_changed = &ci_dpm_display_configuration_changed, + .get_sclk = &ci_dpm_get_sclk, + .get_mclk = &ci_dpm_get_mclk, + .print_power_state = &ci_dpm_print_power_state, + .debugfs_print_current_performance_level = &ci_dpm_debugfs_print_current_performance_level, + .force_performance_level = &ci_dpm_force_performance_level, + .vblank_too_short = &ci_dpm_vblank_too_short, + .powergate_uvd = &ci_dpm_powergate_uvd, + .set_fan_control_mode = &ci_dpm_set_fan_control_mode, + .get_fan_control_mode = &ci_dpm_get_fan_control_mode, + .set_fan_speed_percent = &ci_dpm_set_fan_speed_percent, + .get_fan_speed_percent = &ci_dpm_get_fan_speed_percent, +}; + +static void ci_dpm_set_dpm_funcs(struct amdgpu_device *adev) +{ + if (adev->pm.funcs == NULL) + adev->pm.funcs = &ci_dpm_funcs; +} + +static const struct amdgpu_irq_src_funcs ci_dpm_irq_funcs = { + .set = ci_dpm_set_interrupt_state, + .process = ci_dpm_process_interrupt, +}; + +static void ci_dpm_set_irq_funcs(struct amdgpu_device *adev) +{ + adev->pm.dpm.thermal.irq.num_types = AMDGPU_THERMAL_IRQ_LAST; + adev->pm.dpm.thermal.irq.funcs = &ci_dpm_irq_funcs; +} diff --git a/drivers/gpu/drm/amd/amdgpu/ci_dpm.h b/drivers/gpu/drm/amd/amdgpu/ci_dpm.h new file mode 100644 index 000000000000..faccc30c93bf --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/ci_dpm.h @@ -0,0 +1,348 @@ +/* + * Copyright 2013 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#ifndef __CI_DPM_H__ +#define __CI_DPM_H__ + +#include "amdgpu_atombios.h" +#include "ppsmc.h" + +#define SMU__NUM_SCLK_DPM_STATE 8 +#define SMU__NUM_MCLK_DPM_LEVELS 6 +#define SMU__NUM_LCLK_DPM_LEVELS 8 +#define SMU__NUM_PCIE_DPM_LEVELS 8 +#include "smu7_discrete.h" + +#define CISLANDS_MAX_HARDWARE_POWERLEVELS 2 + +#define CISLANDS_UNUSED_GPIO_PIN 0x7F + +struct ci_pl { + u32 mclk; + u32 sclk; + enum amdgpu_pcie_gen pcie_gen; + u16 pcie_lane; +}; + +struct ci_ps { + u16 performance_level_count; + bool dc_compatible; + u32 sclk_t; + struct ci_pl performance_levels[CISLANDS_MAX_HARDWARE_POWERLEVELS]; +}; + +struct ci_dpm_level { + bool enabled; + u32 value; + u32 param1; +}; + +#define CISLAND_MAX_DEEPSLEEP_DIVIDER_ID 5 +#define MAX_REGULAR_DPM_NUMBER 8 +#define CISLAND_MINIMUM_ENGINE_CLOCK 800 + +struct ci_single_dpm_table { + u32 count; + struct ci_dpm_level dpm_levels[MAX_REGULAR_DPM_NUMBER]; +}; + +struct ci_dpm_table { + struct ci_single_dpm_table sclk_table; + struct ci_single_dpm_table mclk_table; + struct ci_single_dpm_table pcie_speed_table; + struct ci_single_dpm_table vddc_table; + struct ci_single_dpm_table vddci_table; + struct ci_single_dpm_table mvdd_table; +}; + +struct ci_mc_reg_entry { + u32 mclk_max; + u32 mc_data[SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE]; +}; + +struct ci_mc_reg_table { + u8 last; + u8 num_entries; + u16 valid_flag; + struct ci_mc_reg_entry mc_reg_table_entry[MAX_AC_TIMING_ENTRIES]; + SMU7_Discrete_MCRegisterAddress mc_reg_address[SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE]; +}; + +struct ci_ulv_parm +{ + bool supported; + u32 cg_ulv_parameter; + u32 volt_change_delay; + struct ci_pl pl; +}; + +#define CISLANDS_MAX_LEAKAGE_COUNT 8 + +struct ci_leakage_voltage { + u16 count; + u16 leakage_id[CISLANDS_MAX_LEAKAGE_COUNT]; + u16 actual_voltage[CISLANDS_MAX_LEAKAGE_COUNT]; +}; + +struct ci_dpm_level_enable_mask { + u32 uvd_dpm_enable_mask; + u32 vce_dpm_enable_mask; + u32 acp_dpm_enable_mask; + u32 samu_dpm_enable_mask; + u32 sclk_dpm_enable_mask; + u32 mclk_dpm_enable_mask; + u32 pcie_dpm_enable_mask; +}; + +struct ci_vbios_boot_state +{ + u16 mvdd_bootup_value; + u16 vddc_bootup_value; + u16 vddci_bootup_value; + u32 sclk_bootup_value; + u32 mclk_bootup_value; + u16 pcie_gen_bootup_value; + u16 pcie_lane_bootup_value; +}; + +struct ci_clock_registers { + u32 cg_spll_func_cntl; + u32 cg_spll_func_cntl_2; + u32 cg_spll_func_cntl_3; + u32 cg_spll_func_cntl_4; + u32 cg_spll_spread_spectrum; + u32 cg_spll_spread_spectrum_2; + u32 dll_cntl; + u32 mclk_pwrmgt_cntl; + u32 mpll_ad_func_cntl; + u32 mpll_dq_func_cntl; + u32 mpll_func_cntl; + u32 mpll_func_cntl_1; + u32 mpll_func_cntl_2; + u32 mpll_ss1; + u32 mpll_ss2; +}; + +struct ci_thermal_temperature_setting { + s32 temperature_low; + s32 temperature_high; + s32 temperature_shutdown; +}; + +struct ci_pcie_perf_range { + u16 max; + u16 min; +}; + +enum ci_pt_config_reg_type { + CISLANDS_CONFIGREG_MMR = 0, + CISLANDS_CONFIGREG_SMC_IND, + CISLANDS_CONFIGREG_DIDT_IND, + CISLANDS_CONFIGREG_CACHE, + CISLANDS_CONFIGREG_MAX +}; + +#define POWERCONTAINMENT_FEATURE_BAPM 0x00000001 +#define POWERCONTAINMENT_FEATURE_TDCLimit 0x00000002 +#define POWERCONTAINMENT_FEATURE_PkgPwrLimit 0x00000004 + +struct ci_pt_config_reg { + u32 offset; + u32 mask; + u32 shift; + u32 value; + enum ci_pt_config_reg_type type; +}; + +struct ci_pt_defaults { + u8 svi_load_line_en; + u8 svi_load_line_vddc; + u8 tdc_vddc_throttle_release_limit_perc; + u8 tdc_mawt; + u8 tdc_waterfall_ctl; + u8 dte_ambient_temp_base; + u32 display_cac; + u32 bapm_temp_gradient; + u16 bapmti_r[SMU7_DTE_ITERATIONS * SMU7_DTE_SOURCES * SMU7_DTE_SINKS]; + u16 bapmti_rc[SMU7_DTE_ITERATIONS * SMU7_DTE_SOURCES * SMU7_DTE_SINKS]; +}; + +#define DPMTABLE_OD_UPDATE_SCLK 0x00000001 +#define DPMTABLE_OD_UPDATE_MCLK 0x00000002 +#define DPMTABLE_UPDATE_SCLK 0x00000004 +#define DPMTABLE_UPDATE_MCLK 0x00000008 + +struct ci_power_info { + struct ci_dpm_table dpm_table; + u32 voltage_control; + u32 mvdd_control; + u32 vddci_control; + u32 active_auto_throttle_sources; + struct ci_clock_registers clock_registers; + u16 acpi_vddc; + u16 acpi_vddci; + enum amdgpu_pcie_gen force_pcie_gen; + enum amdgpu_pcie_gen acpi_pcie_gen; + struct ci_leakage_voltage vddc_leakage; + struct ci_leakage_voltage vddci_leakage; + u16 max_vddc_in_pp_table; + u16 min_vddc_in_pp_table; + u16 max_vddci_in_pp_table; + u16 min_vddci_in_pp_table; + u32 mclk_strobe_mode_threshold; + u32 mclk_stutter_mode_threshold; + u32 mclk_edc_enable_threshold; + u32 mclk_edc_wr_enable_threshold; + struct ci_vbios_boot_state vbios_boot_state; + /* smc offsets */ + u32 sram_end; + u32 dpm_table_start; + u32 soft_regs_start; + u32 mc_reg_table_start; + u32 fan_table_start; + u32 arb_table_start; + /* smc tables */ + SMU7_Discrete_DpmTable smc_state_table; + SMU7_Discrete_MCRegisters smc_mc_reg_table; + SMU7_Discrete_PmFuses smc_powertune_table; + /* other stuff */ + struct ci_mc_reg_table mc_reg_table; + struct atom_voltage_table vddc_voltage_table; + struct atom_voltage_table vddci_voltage_table; + struct atom_voltage_table mvdd_voltage_table; + struct ci_ulv_parm ulv; + u32 power_containment_features; + const struct ci_pt_defaults *powertune_defaults; + u32 dte_tj_offset; + bool vddc_phase_shed_control; + struct ci_thermal_temperature_setting thermal_temp_setting; + struct ci_dpm_level_enable_mask dpm_level_enable_mask; + u32 need_update_smu7_dpm_table; + u32 sclk_dpm_key_disabled; + u32 mclk_dpm_key_disabled; + u32 pcie_dpm_key_disabled; + u32 thermal_sclk_dpm_enabled; + struct ci_pcie_perf_range pcie_gen_performance; + struct ci_pcie_perf_range pcie_lane_performance; + struct ci_pcie_perf_range pcie_gen_powersaving; + struct ci_pcie_perf_range pcie_lane_powersaving; + u32 activity_target[SMU7_MAX_LEVELS_GRAPHICS]; + u32 mclk_activity_target; + u32 low_sclk_interrupt_t; + u32 last_mclk_dpm_enable_mask; + u32 sys_pcie_mask; + /* caps */ + bool caps_power_containment; + bool caps_cac; + bool caps_sq_ramping; + bool caps_db_ramping; + bool caps_td_ramping; + bool caps_tcp_ramping; + bool caps_fps; + bool caps_sclk_ds; + bool caps_sclk_ss_support; + bool caps_mclk_ss_support; + bool caps_uvd_dpm; + bool caps_vce_dpm; + bool caps_samu_dpm; + bool caps_acp_dpm; + bool caps_automatic_dc_transition; + bool caps_sclk_throttle_low_notification; + bool caps_dynamic_ac_timing; + bool caps_od_fuzzy_fan_control_support; + /* flags */ + bool thermal_protection; + bool pcie_performance_request; + bool dynamic_ss; + bool dll_default_on; + bool cac_enabled; + bool uvd_enabled; + bool battery_state; + bool pspp_notify_required; + bool enable_bapm_feature; + bool enable_tdc_limit_feature; + bool enable_pkg_pwr_tracking_feature; + bool use_pcie_performance_levels; + bool use_pcie_powersaving_levels; + bool uvd_power_gated; + /* driver states */ + struct amdgpu_ps current_rps; + struct ci_ps current_ps; + struct amdgpu_ps requested_rps; + struct ci_ps requested_ps; + /* fan control */ + bool fan_ctrl_is_in_default_mode; + bool fan_is_controlled_by_smc; + u32 t_min; + u32 fan_ctrl_default_mode; +}; + +#define CISLANDS_VOLTAGE_CONTROL_NONE 0x0 +#define CISLANDS_VOLTAGE_CONTROL_BY_GPIO 0x1 +#define CISLANDS_VOLTAGE_CONTROL_BY_SVID2 0x2 + +#define CISLANDS_Q88_FORMAT_CONVERSION_UNIT 256 + +#define CISLANDS_VRC_DFLT0 0x3FFFC000 +#define CISLANDS_VRC_DFLT1 0x000400 +#define CISLANDS_VRC_DFLT2 0xC00080 +#define CISLANDS_VRC_DFLT3 0xC00200 +#define CISLANDS_VRC_DFLT4 0xC01680 +#define CISLANDS_VRC_DFLT5 0xC00033 +#define CISLANDS_VRC_DFLT6 0xC00033 +#define CISLANDS_VRC_DFLT7 0x3FFFC000 + +#define CISLANDS_CGULVPARAMETER_DFLT 0x00040035 +#define CISLAND_TARGETACTIVITY_DFLT 30 +#define CISLAND_MCLK_TARGETACTIVITY_DFLT 10 + +#define PCIE_PERF_REQ_REMOVE_REGISTRY 0 +#define PCIE_PERF_REQ_FORCE_LOWPOWER 1 +#define PCIE_PERF_REQ_PECI_GEN1 2 +#define PCIE_PERF_REQ_PECI_GEN2 3 +#define PCIE_PERF_REQ_PECI_GEN3 4 + +#define CISLANDS_SSTU_DFLT 0 +#define CISLANDS_SST_DFLT 0x00C8 + +/* XXX are these ok? */ +#define CISLANDS_TEMP_RANGE_MIN (90 * 1000) +#define CISLANDS_TEMP_RANGE_MAX (120 * 1000) + +int amdgpu_ci_copy_bytes_to_smc(struct amdgpu_device *adev, + u32 smc_start_address, + const u8 *src, u32 byte_count, u32 limit); +void amdgpu_ci_start_smc(struct amdgpu_device *adev); +void amdgpu_ci_reset_smc(struct amdgpu_device *adev); +int amdgpu_ci_program_jump_on_start(struct amdgpu_device *adev); +void amdgpu_ci_stop_smc_clock(struct amdgpu_device *adev); +void amdgpu_ci_start_smc_clock(struct amdgpu_device *adev); +bool amdgpu_ci_is_smc_running(struct amdgpu_device *adev); +PPSMC_Result amdgpu_ci_send_msg_to_smc(struct amdgpu_device *adev, PPSMC_Msg msg); +PPSMC_Result amdgpu_ci_wait_for_smc_inactive(struct amdgpu_device *adev); +int amdgpu_ci_load_smc_ucode(struct amdgpu_device *adev, u32 limit); +int amdgpu_ci_read_smc_sram_dword(struct amdgpu_device *adev, + u32 smc_address, u32 *value, u32 limit); +int amdgpu_ci_write_smc_sram_dword(struct amdgpu_device *adev, + u32 smc_address, u32 value, u32 limit); + +#endif diff --git a/drivers/gpu/drm/amd/amdgpu/ci_smc.c b/drivers/gpu/drm/amd/amdgpu/ci_smc.c new file mode 100644 index 000000000000..7eb9069db8e3 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/ci_smc.c @@ -0,0 +1,279 @@ +/* + * Copyright 2011 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Alex Deucher + */ + +#include +#include "drmP.h" +#include "amdgpu.h" +#include "cikd.h" +#include "ppsmc.h" +#include "amdgpu_ucode.h" +#include "ci_dpm.h" + +#include "smu/smu_7_0_1_d.h" +#include "smu/smu_7_0_1_sh_mask.h" + +static int ci_set_smc_sram_address(struct amdgpu_device *adev, + u32 smc_address, u32 limit) +{ + if (smc_address & 3) + return -EINVAL; + if ((smc_address + 3) > limit) + return -EINVAL; + + WREG32(mmSMC_IND_INDEX_0, smc_address); + WREG32_P(mmSMC_IND_ACCESS_CNTL, 0, ~SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_0_MASK); + + return 0; +} + +int amdgpu_ci_copy_bytes_to_smc(struct amdgpu_device *adev, + u32 smc_start_address, + const u8 *src, u32 byte_count, u32 limit) +{ + unsigned long flags; + u32 data, original_data; + u32 addr; + u32 extra_shift; + int ret = 0; + + if (smc_start_address & 3) + return -EINVAL; + if ((smc_start_address + byte_count) > limit) + return -EINVAL; + + addr = smc_start_address; + + spin_lock_irqsave(&adev->smc_idx_lock, flags); + while (byte_count >= 4) { + /* SMC address space is BE */ + data = (src[0] << 24) | (src[1] << 16) | (src[2] << 8) | src[3]; + + ret = ci_set_smc_sram_address(adev, addr, limit); + if (ret) + goto done; + + WREG32(mmSMC_IND_DATA_0, data); + + src += 4; + byte_count -= 4; + addr += 4; + } + + /* RMW for the final bytes */ + if (byte_count > 0) { + data = 0; + + ret = ci_set_smc_sram_address(adev, addr, limit); + if (ret) + goto done; + + original_data = RREG32(mmSMC_IND_DATA_0); + + extra_shift = 8 * (4 - byte_count); + + while (byte_count > 0) { + data = (data << 8) + *src++; + byte_count--; + } + + data <<= extra_shift; + + data |= (original_data & ~((~0UL) << extra_shift)); + + ret = ci_set_smc_sram_address(adev, addr, limit); + if (ret) + goto done; + + WREG32(mmSMC_IND_DATA_0, data); + } + +done: + spin_unlock_irqrestore(&adev->smc_idx_lock, flags); + + return ret; +} + +void amdgpu_ci_start_smc(struct amdgpu_device *adev) +{ + u32 tmp = RREG32_SMC(ixSMC_SYSCON_RESET_CNTL); + + tmp &= ~SMC_SYSCON_RESET_CNTL__rst_reg_MASK; + WREG32_SMC(ixSMC_SYSCON_RESET_CNTL, tmp); +} + +void amdgpu_ci_reset_smc(struct amdgpu_device *adev) +{ + u32 tmp = RREG32_SMC(ixSMC_SYSCON_RESET_CNTL); + + tmp |= SMC_SYSCON_RESET_CNTL__rst_reg_MASK; + WREG32_SMC(ixSMC_SYSCON_RESET_CNTL, tmp); +} + +int amdgpu_ci_program_jump_on_start(struct amdgpu_device *adev) +{ + static u8 data[] = { 0xE0, 0x00, 0x80, 0x40 }; + + return amdgpu_ci_copy_bytes_to_smc(adev, 0x0, data, 4, sizeof(data)+1); +} + +void amdgpu_ci_stop_smc_clock(struct amdgpu_device *adev) +{ + u32 tmp = RREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0); + + tmp |= SMC_SYSCON_CLOCK_CNTL_0__ck_disable_MASK; + + WREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0, tmp); +} + +void amdgpu_ci_start_smc_clock(struct amdgpu_device *adev) +{ + u32 tmp = RREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0); + + tmp &= ~SMC_SYSCON_CLOCK_CNTL_0__ck_disable_MASK; + + WREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0, tmp); +} + +bool amdgpu_ci_is_smc_running(struct amdgpu_device *adev) +{ + u32 clk = RREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0); + u32 pc_c = RREG32_SMC(ixSMC_PC_C); + + if (!(clk & SMC_SYSCON_CLOCK_CNTL_0__ck_disable_MASK) && (0x20100 <= pc_c)) + return true; + + return false; +} + +PPSMC_Result amdgpu_ci_send_msg_to_smc(struct amdgpu_device *adev, PPSMC_Msg msg) +{ + u32 tmp; + int i; + + if (!amdgpu_ci_is_smc_running(adev)) + return PPSMC_Result_Failed; + + WREG32(mmSMC_MESSAGE_0, msg); + + for (i = 0; i < adev->usec_timeout; i++) { + tmp = RREG32(mmSMC_RESP_0); + if (tmp != 0) + break; + udelay(1); + } + tmp = RREG32(mmSMC_RESP_0); + + return (PPSMC_Result)tmp; +} + +PPSMC_Result amdgpu_ci_wait_for_smc_inactive(struct amdgpu_device *adev) +{ + u32 tmp; + int i; + + if (!amdgpu_ci_is_smc_running(adev)) + return PPSMC_Result_OK; + + for (i = 0; i < adev->usec_timeout; i++) { + tmp = RREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0); + if ((tmp & SMC_SYSCON_CLOCK_CNTL_0__cken_MASK) == 0) + break; + udelay(1); + } + + return PPSMC_Result_OK; +} + +int amdgpu_ci_load_smc_ucode(struct amdgpu_device *adev, u32 limit) +{ + const struct smc_firmware_header_v1_0 *hdr; + unsigned long flags; + u32 ucode_start_address; + u32 ucode_size; + const u8 *src; + u32 data; + + if (!adev->pm.fw) + return -EINVAL; + + hdr = (const struct smc_firmware_header_v1_0 *)adev->pm.fw->data; + amdgpu_ucode_print_smc_hdr(&hdr->header); + + adev->pm.fw_version = le32_to_cpu(hdr->header.ucode_version); + ucode_start_address = le32_to_cpu(hdr->ucode_start_addr); + ucode_size = le32_to_cpu(hdr->header.ucode_size_bytes); + src = (const u8 *) + (adev->pm.fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes)); + + if (ucode_size & 3) + return -EINVAL; + + spin_lock_irqsave(&adev->smc_idx_lock, flags); + WREG32(mmSMC_IND_INDEX_0, ucode_start_address); + WREG32_P(mmSMC_IND_ACCESS_CNTL, SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_0_MASK, + ~SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_0_MASK); + while (ucode_size >= 4) { + /* SMC address space is BE */ + data = (src[0] << 24) | (src[1] << 16) | (src[2] << 8) | src[3]; + + WREG32(mmSMC_IND_DATA_0, data); + + src += 4; + ucode_size -= 4; + } + WREG32_P(mmSMC_IND_ACCESS_CNTL, 0, ~SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_0_MASK); + spin_unlock_irqrestore(&adev->smc_idx_lock, flags); + + return 0; +} + +int amdgpu_ci_read_smc_sram_dword(struct amdgpu_device *adev, + u32 smc_address, u32 *value, u32 limit) +{ + unsigned long flags; + int ret; + + spin_lock_irqsave(&adev->smc_idx_lock, flags); + ret = ci_set_smc_sram_address(adev, smc_address, limit); + if (ret == 0) + *value = RREG32(mmSMC_IND_DATA_0); + spin_unlock_irqrestore(&adev->smc_idx_lock, flags); + + return ret; +} + +int amdgpu_ci_write_smc_sram_dword(struct amdgpu_device *adev, + u32 smc_address, u32 value, u32 limit) +{ + unsigned long flags; + int ret; + + spin_lock_irqsave(&adev->smc_idx_lock, flags); + ret = ci_set_smc_sram_address(adev, smc_address, limit); + if (ret == 0) + WREG32(mmSMC_IND_DATA_0, value); + spin_unlock_irqrestore(&adev->smc_idx_lock, flags); + + return ret; +} diff --git a/drivers/gpu/drm/amd/amdgpu/cik.c b/drivers/gpu/drm/amd/amdgpu/cik.c new file mode 100644 index 000000000000..74ce0be2fbb7 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/cik.c @@ -0,0 +1,2505 @@ +/* + * Copyright 2012 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Alex Deucher + */ +#include +#include +#include +#include "drmP.h" +#include "amdgpu.h" +#include "amdgpu_atombios.h" +#include "amdgpu_ih.h" +#include "amdgpu_uvd.h" +#include "amdgpu_vce.h" +#include "cikd.h" +#include "atom.h" + +#include "cik.h" +#include "gmc_v7_0.h" +#include "cik_ih.h" +#include "dce_v8_0.h" +#include "gfx_v7_0.h" +#include "cik_sdma.h" +#include "uvd_v4_2.h" +#include "vce_v2_0.h" +#include "cik_dpm.h" + +#include "uvd/uvd_4_2_d.h" + +#include "smu/smu_7_0_1_d.h" +#include "smu/smu_7_0_1_sh_mask.h" + +#include "dce/dce_8_0_d.h" +#include "dce/dce_8_0_sh_mask.h" + +#include "bif/bif_4_1_d.h" +#include "bif/bif_4_1_sh_mask.h" + +#include "gca/gfx_7_2_d.h" +#include "gca/gfx_7_2_enum.h" +#include "gca/gfx_7_2_sh_mask.h" + +#include "gmc/gmc_7_1_d.h" +#include "gmc/gmc_7_1_sh_mask.h" + +#include "oss/oss_2_0_d.h" +#include "oss/oss_2_0_sh_mask.h" + +/* + * Indirect registers accessor + */ +static u32 cik_pcie_rreg(struct amdgpu_device *adev, u32 reg) +{ + unsigned long flags; + u32 r; + + spin_lock_irqsave(&adev->pcie_idx_lock, flags); + WREG32(mmPCIE_INDEX, reg); + (void)RREG32(mmPCIE_INDEX); + r = RREG32(mmPCIE_DATA); + spin_unlock_irqrestore(&adev->pcie_idx_lock, flags); + return r; +} + +static void cik_pcie_wreg(struct amdgpu_device *adev, u32 reg, u32 v) +{ + unsigned long flags; + + spin_lock_irqsave(&adev->pcie_idx_lock, flags); + WREG32(mmPCIE_INDEX, reg); + (void)RREG32(mmPCIE_INDEX); + WREG32(mmPCIE_DATA, v); + (void)RREG32(mmPCIE_DATA); + spin_unlock_irqrestore(&adev->pcie_idx_lock, flags); +} + +static u32 cik_smc_rreg(struct amdgpu_device *adev, u32 reg) +{ + unsigned long flags; + u32 r; + + spin_lock_irqsave(&adev->smc_idx_lock, flags); + WREG32(mmSMC_IND_INDEX_0, (reg)); + r = RREG32(mmSMC_IND_DATA_0); + spin_unlock_irqrestore(&adev->smc_idx_lock, flags); + return r; +} + +static void cik_smc_wreg(struct amdgpu_device *adev, u32 reg, u32 v) +{ + unsigned long flags; + + spin_lock_irqsave(&adev->smc_idx_lock, flags); + WREG32(mmSMC_IND_INDEX_0, (reg)); + WREG32(mmSMC_IND_DATA_0, (v)); + spin_unlock_irqrestore(&adev->smc_idx_lock, flags); +} + +static u32 cik_uvd_ctx_rreg(struct amdgpu_device *adev, u32 reg) +{ + unsigned long flags; + u32 r; + + spin_lock_irqsave(&adev->uvd_ctx_idx_lock, flags); + WREG32(mmUVD_CTX_INDEX, ((reg) & 0x1ff)); + r = RREG32(mmUVD_CTX_DATA); + spin_unlock_irqrestore(&adev->uvd_ctx_idx_lock, flags); + return r; +} + +static void cik_uvd_ctx_wreg(struct amdgpu_device *adev, u32 reg, u32 v) +{ + unsigned long flags; + + spin_lock_irqsave(&adev->uvd_ctx_idx_lock, flags); + WREG32(mmUVD_CTX_INDEX, ((reg) & 0x1ff)); + WREG32(mmUVD_CTX_DATA, (v)); + spin_unlock_irqrestore(&adev->uvd_ctx_idx_lock, flags); +} + +static u32 cik_didt_rreg(struct amdgpu_device *adev, u32 reg) +{ + unsigned long flags; + u32 r; + + spin_lock_irqsave(&adev->didt_idx_lock, flags); + WREG32(mmDIDT_IND_INDEX, (reg)); + r = RREG32(mmDIDT_IND_DATA); + spin_unlock_irqrestore(&adev->didt_idx_lock, flags); + return r; +} + +static void cik_didt_wreg(struct amdgpu_device *adev, u32 reg, u32 v) +{ + unsigned long flags; + + spin_lock_irqsave(&adev->didt_idx_lock, flags); + WREG32(mmDIDT_IND_INDEX, (reg)); + WREG32(mmDIDT_IND_DATA, (v)); + spin_unlock_irqrestore(&adev->didt_idx_lock, flags); +} + +static const u32 bonaire_golden_spm_registers[] = +{ + 0xc200, 0xe0ffffff, 0xe0000000 +}; + +static const u32 bonaire_golden_common_registers[] = +{ + 0x31dc, 0xffffffff, 0x00000800, + 0x31dd, 0xffffffff, 0x00000800, + 0x31e6, 0xffffffff, 0x00007fbf, + 0x31e7, 0xffffffff, 0x00007faf +}; + +static const u32 bonaire_golden_registers[] = +{ + 0xcd5, 0x00000333, 0x00000333, + 0xcd4, 0x000c0fc0, 0x00040200, + 0x2684, 0x00010000, 0x00058208, + 0xf000, 0xffff1fff, 0x00140000, + 0xf080, 0xfdfc0fff, 0x00000100, + 0xf08d, 0x40000000, 0x40000200, + 0x260c, 0xffffffff, 0x00000000, + 0x260d, 0xf00fffff, 0x00000400, + 0x260e, 0x0002021c, 0x00020200, + 0x31e, 0x00000080, 0x00000000, + 0x16ec, 0x000000f0, 0x00000070, + 0x16f0, 0xf0311fff, 0x80300000, + 0x263e, 0x73773777, 0x12010001, + 0xd43, 0x00810000, 0x408af000, + 0x1c0c, 0x31000111, 0x00000011, + 0xbd2, 0x73773777, 0x12010001, + 0x883, 0x00007fb6, 0x0021a1b1, + 0x884, 0x00007fb6, 0x002021b1, + 0x860, 0x00007fb6, 0x00002191, + 0x886, 0x00007fb6, 0x002121b1, + 0x887, 0x00007fb6, 0x002021b1, + 0x877, 0x00007fb6, 0x00002191, + 0x878, 0x00007fb6, 0x00002191, + 0xd8a, 0x0000003f, 0x0000000a, + 0xd8b, 0x0000003f, 0x0000000a, + 0xab9, 0x00073ffe, 0x000022a2, + 0x903, 0x000007ff, 0x00000000, + 0x2285, 0xf000003f, 0x00000007, + 0x22fc, 0x00002001, 0x00000001, + 0x22c9, 0xffffffff, 0x00ffffff, + 0xc281, 0x0000ff0f, 0x00000000, + 0xa293, 0x07ffffff, 0x06000000, + 0x136, 0x00000fff, 0x00000100, + 0xf9e, 0x00000001, 0x00000002, + 0x2440, 0x03000000, 0x0362c688, + 0x2300, 0x000000ff, 0x00000001, + 0x390, 0x00001fff, 0x00001fff, + 0x2418, 0x0000007f, 0x00000020, + 0x2542, 0x00010000, 0x00010000, + 0x2b05, 0x000003ff, 0x000000f3, + 0x2b03, 0xffffffff, 0x00001032 +}; + +static const u32 bonaire_mgcg_cgcg_init[] = +{ + 0x3108, 0xffffffff, 0xfffffffc, + 0xc200, 0xffffffff, 0xe0000000, + 0xf0a8, 0xffffffff, 0x00000100, + 0xf082, 0xffffffff, 0x00000100, + 0xf0b0, 0xffffffff, 0xc0000100, + 0xf0b2, 0xffffffff, 0xc0000100, + 0xf0b1, 0xffffffff, 0xc0000100, + 0x1579, 0xffffffff, 0x00600100, + 0xf0a0, 0xffffffff, 0x00000100, + 0xf085, 0xffffffff, 0x06000100, + 0xf088, 0xffffffff, 0x00000100, + 0xf086, 0xffffffff, 0x06000100, + 0xf081, 0xffffffff, 0x00000100, + 0xf0b8, 0xffffffff, 0x00000100, + 0xf089, 0xffffffff, 0x00000100, + 0xf080, 0xffffffff, 0x00000100, + 0xf08c, 0xffffffff, 0x00000100, + 0xf08d, 0xffffffff, 0x00000100, + 0xf094, 0xffffffff, 0x00000100, + 0xf095, 0xffffffff, 0x00000100, + 0xf096, 0xffffffff, 0x00000100, + 0xf097, 0xffffffff, 0x00000100, + 0xf098, 0xffffffff, 0x00000100, + 0xf09f, 0xffffffff, 0x00000100, + 0xf09e, 0xffffffff, 0x00000100, + 0xf084, 0xffffffff, 0x06000100, + 0xf0a4, 0xffffffff, 0x00000100, + 0xf09d, 0xffffffff, 0x00000100, + 0xf0ad, 0xffffffff, 0x00000100, + 0xf0ac, 0xffffffff, 0x00000100, + 0xf09c, 0xffffffff, 0x00000100, + 0xc200, 0xffffffff, 0xe0000000, + 0xf008, 0xffffffff, 0x00010000, + 0xf009, 0xffffffff, 0x00030002, + 0xf00a, 0xffffffff, 0x00040007, + 0xf00b, 0xffffffff, 0x00060005, + 0xf00c, 0xffffffff, 0x00090008, + 0xf00d, 0xffffffff, 0x00010000, + 0xf00e, 0xffffffff, 0x00030002, + 0xf00f, 0xffffffff, 0x00040007, + 0xf010, 0xffffffff, 0x00060005, + 0xf011, 0xffffffff, 0x00090008, + 0xf012, 0xffffffff, 0x00010000, + 0xf013, 0xffffffff, 0x00030002, + 0xf014, 0xffffffff, 0x00040007, + 0xf015, 0xffffffff, 0x00060005, + 0xf016, 0xffffffff, 0x00090008, + 0xf017, 0xffffffff, 0x00010000, + 0xf018, 0xffffffff, 0x00030002, + 0xf019, 0xffffffff, 0x00040007, + 0xf01a, 0xffffffff, 0x00060005, + 0xf01b, 0xffffffff, 0x00090008, + 0xf01c, 0xffffffff, 0x00010000, + 0xf01d, 0xffffffff, 0x00030002, + 0xf01e, 0xffffffff, 0x00040007, + 0xf01f, 0xffffffff, 0x00060005, + 0xf020, 0xffffffff, 0x00090008, + 0xf021, 0xffffffff, 0x00010000, + 0xf022, 0xffffffff, 0x00030002, + 0xf023, 0xffffffff, 0x00040007, + 0xf024, 0xffffffff, 0x00060005, + 0xf025, 0xffffffff, 0x00090008, + 0xf026, 0xffffffff, 0x00010000, + 0xf027, 0xffffffff, 0x00030002, + 0xf028, 0xffffffff, 0x00040007, + 0xf029, 0xffffffff, 0x00060005, + 0xf02a, 0xffffffff, 0x00090008, + 0xf000, 0xffffffff, 0x96e00200, + 0x21c2, 0xffffffff, 0x00900100, + 0x3109, 0xffffffff, 0x0020003f, + 0xe, 0xffffffff, 0x0140001c, + 0xf, 0x000f0000, 0x000f0000, + 0x88, 0xffffffff, 0xc060000c, + 0x89, 0xc0000fff, 0x00000100, + 0x3e4, 0xffffffff, 0x00000100, + 0x3e6, 0x00000101, 0x00000000, + 0x82a, 0xffffffff, 0x00000104, + 0x1579, 0xff000fff, 0x00000100, + 0xc33, 0xc0000fff, 0x00000104, + 0x3079, 0x00000001, 0x00000001, + 0x3403, 0xff000ff0, 0x00000100, + 0x3603, 0xff000ff0, 0x00000100 +}; + +static const u32 spectre_golden_spm_registers[] = +{ + 0xc200, 0xe0ffffff, 0xe0000000 +}; + +static const u32 spectre_golden_common_registers[] = +{ + 0x31dc, 0xffffffff, 0x00000800, + 0x31dd, 0xffffffff, 0x00000800, + 0x31e6, 0xffffffff, 0x00007fbf, + 0x31e7, 0xffffffff, 0x00007faf +}; + +static const u32 spectre_golden_registers[] = +{ + 0xf000, 0xffff1fff, 0x96940200, + 0xf003, 0xffff0001, 0xff000000, + 0xf080, 0xfffc0fff, 0x00000100, + 0x1bb6, 0x00010101, 0x00010000, + 0x260d, 0xf00fffff, 0x00000400, + 0x260e, 0xfffffffc, 0x00020200, + 0x16ec, 0x000000f0, 0x00000070, + 0x16f0, 0xf0311fff, 0x80300000, + 0x263e, 0x73773777, 0x12010001, + 0x26df, 0x00ff0000, 0x00fc0000, + 0xbd2, 0x73773777, 0x12010001, + 0x2285, 0xf000003f, 0x00000007, + 0x22c9, 0xffffffff, 0x00ffffff, + 0xa0d4, 0x3f3f3fff, 0x00000082, + 0xa0d5, 0x0000003f, 0x00000000, + 0xf9e, 0x00000001, 0x00000002, + 0x244f, 0xffff03df, 0x00000004, + 0x31da, 0x00000008, 0x00000008, + 0x2300, 0x000008ff, 0x00000800, + 0x2542, 0x00010000, 0x00010000, + 0x2b03, 0xffffffff, 0x54763210, + 0x853e, 0x01ff01ff, 0x00000002, + 0x8526, 0x007ff800, 0x00200000, + 0x8057, 0xffffffff, 0x00000f40, + 0xc24d, 0xffffffff, 0x00000001 +}; + +static const u32 spectre_mgcg_cgcg_init[] = +{ + 0x3108, 0xffffffff, 0xfffffffc, + 0xc200, 0xffffffff, 0xe0000000, + 0xf0a8, 0xffffffff, 0x00000100, + 0xf082, 0xffffffff, 0x00000100, + 0xf0b0, 0xffffffff, 0x00000100, + 0xf0b2, 0xffffffff, 0x00000100, + 0xf0b1, 0xffffffff, 0x00000100, + 0x1579, 0xffffffff, 0x00600100, + 0xf0a0, 0xffffffff, 0x00000100, + 0xf085, 0xffffffff, 0x06000100, + 0xf088, 0xffffffff, 0x00000100, + 0xf086, 0xffffffff, 0x06000100, + 0xf081, 0xffffffff, 0x00000100, + 0xf0b8, 0xffffffff, 0x00000100, + 0xf089, 0xffffffff, 0x00000100, + 0xf080, 0xffffffff, 0x00000100, + 0xf08c, 0xffffffff, 0x00000100, + 0xf08d, 0xffffffff, 0x00000100, + 0xf094, 0xffffffff, 0x00000100, + 0xf095, 0xffffffff, 0x00000100, + 0xf096, 0xffffffff, 0x00000100, + 0xf097, 0xffffffff, 0x00000100, + 0xf098, 0xffffffff, 0x00000100, + 0xf09f, 0xffffffff, 0x00000100, + 0xf09e, 0xffffffff, 0x00000100, + 0xf084, 0xffffffff, 0x06000100, + 0xf0a4, 0xffffffff, 0x00000100, + 0xf09d, 0xffffffff, 0x00000100, + 0xf0ad, 0xffffffff, 0x00000100, + 0xf0ac, 0xffffffff, 0x00000100, + 0xf09c, 0xffffffff, 0x00000100, + 0xc200, 0xffffffff, 0xe0000000, + 0xf008, 0xffffffff, 0x00010000, + 0xf009, 0xffffffff, 0x00030002, + 0xf00a, 0xffffffff, 0x00040007, + 0xf00b, 0xffffffff, 0x00060005, + 0xf00c, 0xffffffff, 0x00090008, + 0xf00d, 0xffffffff, 0x00010000, + 0xf00e, 0xffffffff, 0x00030002, + 0xf00f, 0xffffffff, 0x00040007, + 0xf010, 0xffffffff, 0x00060005, + 0xf011, 0xffffffff, 0x00090008, + 0xf012, 0xffffffff, 0x00010000, + 0xf013, 0xffffffff, 0x00030002, + 0xf014, 0xffffffff, 0x00040007, + 0xf015, 0xffffffff, 0x00060005, + 0xf016, 0xffffffff, 0x00090008, + 0xf017, 0xffffffff, 0x00010000, + 0xf018, 0xffffffff, 0x00030002, + 0xf019, 0xffffffff, 0x00040007, + 0xf01a, 0xffffffff, 0x00060005, + 0xf01b, 0xffffffff, 0x00090008, + 0xf01c, 0xffffffff, 0x00010000, + 0xf01d, 0xffffffff, 0x00030002, + 0xf01e, 0xffffffff, 0x00040007, + 0xf01f, 0xffffffff, 0x00060005, + 0xf020, 0xffffffff, 0x00090008, + 0xf021, 0xffffffff, 0x00010000, + 0xf022, 0xffffffff, 0x00030002, + 0xf023, 0xffffffff, 0x00040007, + 0xf024, 0xffffffff, 0x00060005, + 0xf025, 0xffffffff, 0x00090008, + 0xf026, 0xffffffff, 0x00010000, + 0xf027, 0xffffffff, 0x00030002, + 0xf028, 0xffffffff, 0x00040007, + 0xf029, 0xffffffff, 0x00060005, + 0xf02a, 0xffffffff, 0x00090008, + 0xf02b, 0xffffffff, 0x00010000, + 0xf02c, 0xffffffff, 0x00030002, + 0xf02d, 0xffffffff, 0x00040007, + 0xf02e, 0xffffffff, 0x00060005, + 0xf02f, 0xffffffff, 0x00090008, + 0xf000, 0xffffffff, 0x96e00200, + 0x21c2, 0xffffffff, 0x00900100, + 0x3109, 0xffffffff, 0x0020003f, + 0xe, 0xffffffff, 0x0140001c, + 0xf, 0x000f0000, 0x000f0000, + 0x88, 0xffffffff, 0xc060000c, + 0x89, 0xc0000fff, 0x00000100, + 0x3e4, 0xffffffff, 0x00000100, + 0x3e6, 0x00000101, 0x00000000, + 0x82a, 0xffffffff, 0x00000104, + 0x1579, 0xff000fff, 0x00000100, + 0xc33, 0xc0000fff, 0x00000104, + 0x3079, 0x00000001, 0x00000001, + 0x3403, 0xff000ff0, 0x00000100, + 0x3603, 0xff000ff0, 0x00000100 +}; + +static const u32 kalindi_golden_spm_registers[] = +{ + 0xc200, 0xe0ffffff, 0xe0000000 +}; + +static const u32 kalindi_golden_common_registers[] = +{ + 0x31dc, 0xffffffff, 0x00000800, + 0x31dd, 0xffffffff, 0x00000800, + 0x31e6, 0xffffffff, 0x00007fbf, + 0x31e7, 0xffffffff, 0x00007faf +}; + +static const u32 kalindi_golden_registers[] = +{ + 0xf000, 0xffffdfff, 0x6e944040, + 0x1579, 0xff607fff, 0xfc000100, + 0xf088, 0xff000fff, 0x00000100, + 0xf089, 0xff000fff, 0x00000100, + 0xf080, 0xfffc0fff, 0x00000100, + 0x1bb6, 0x00010101, 0x00010000, + 0x260c, 0xffffffff, 0x00000000, + 0x260d, 0xf00fffff, 0x00000400, + 0x16ec, 0x000000f0, 0x00000070, + 0x16f0, 0xf0311fff, 0x80300000, + 0x263e, 0x73773777, 0x12010001, + 0x263f, 0xffffffff, 0x00000010, + 0x26df, 0x00ff0000, 0x00fc0000, + 0x200c, 0x00001f0f, 0x0000100a, + 0xbd2, 0x73773777, 0x12010001, + 0x902, 0x000fffff, 0x000c007f, + 0x2285, 0xf000003f, 0x00000007, + 0x22c9, 0x3fff3fff, 0x00ffcfff, + 0xc281, 0x0000ff0f, 0x00000000, + 0xa293, 0x07ffffff, 0x06000000, + 0x136, 0x00000fff, 0x00000100, + 0xf9e, 0x00000001, 0x00000002, + 0x31da, 0x00000008, 0x00000008, + 0x2300, 0x000000ff, 0x00000003, + 0x853e, 0x01ff01ff, 0x00000002, + 0x8526, 0x007ff800, 0x00200000, + 0x8057, 0xffffffff, 0x00000f40, + 0x2231, 0x001f3ae3, 0x00000082, + 0x2235, 0x0000001f, 0x00000010, + 0xc24d, 0xffffffff, 0x00000000 +}; + +static const u32 kalindi_mgcg_cgcg_init[] = +{ + 0x3108, 0xffffffff, 0xfffffffc, + 0xc200, 0xffffffff, 0xe0000000, + 0xf0a8, 0xffffffff, 0x00000100, + 0xf082, 0xffffffff, 0x00000100, + 0xf0b0, 0xffffffff, 0x00000100, + 0xf0b2, 0xffffffff, 0x00000100, + 0xf0b1, 0xffffffff, 0x00000100, + 0x1579, 0xffffffff, 0x00600100, + 0xf0a0, 0xffffffff, 0x00000100, + 0xf085, 0xffffffff, 0x06000100, + 0xf088, 0xffffffff, 0x00000100, + 0xf086, 0xffffffff, 0x06000100, + 0xf081, 0xffffffff, 0x00000100, + 0xf0b8, 0xffffffff, 0x00000100, + 0xf089, 0xffffffff, 0x00000100, + 0xf080, 0xffffffff, 0x00000100, + 0xf08c, 0xffffffff, 0x00000100, + 0xf08d, 0xffffffff, 0x00000100, + 0xf094, 0xffffffff, 0x00000100, + 0xf095, 0xffffffff, 0x00000100, + 0xf096, 0xffffffff, 0x00000100, + 0xf097, 0xffffffff, 0x00000100, + 0xf098, 0xffffffff, 0x00000100, + 0xf09f, 0xffffffff, 0x00000100, + 0xf09e, 0xffffffff, 0x00000100, + 0xf084, 0xffffffff, 0x06000100, + 0xf0a4, 0xffffffff, 0x00000100, + 0xf09d, 0xffffffff, 0x00000100, + 0xf0ad, 0xffffffff, 0x00000100, + 0xf0ac, 0xffffffff, 0x00000100, + 0xf09c, 0xffffffff, 0x00000100, + 0xc200, 0xffffffff, 0xe0000000, + 0xf008, 0xffffffff, 0x00010000, + 0xf009, 0xffffffff, 0x00030002, + 0xf00a, 0xffffffff, 0x00040007, + 0xf00b, 0xffffffff, 0x00060005, + 0xf00c, 0xffffffff, 0x00090008, + 0xf00d, 0xffffffff, 0x00010000, + 0xf00e, 0xffffffff, 0x00030002, + 0xf00f, 0xffffffff, 0x00040007, + 0xf010, 0xffffffff, 0x00060005, + 0xf011, 0xffffffff, 0x00090008, + 0xf000, 0xffffffff, 0x96e00200, + 0x21c2, 0xffffffff, 0x00900100, + 0x3109, 0xffffffff, 0x0020003f, + 0xe, 0xffffffff, 0x0140001c, + 0xf, 0x000f0000, 0x000f0000, + 0x88, 0xffffffff, 0xc060000c, + 0x89, 0xc0000fff, 0x00000100, + 0x82a, 0xffffffff, 0x00000104, + 0x1579, 0xff000fff, 0x00000100, + 0xc33, 0xc0000fff, 0x00000104, + 0x3079, 0x00000001, 0x00000001, + 0x3403, 0xff000ff0, 0x00000100, + 0x3603, 0xff000ff0, 0x00000100 +}; + +static const u32 hawaii_golden_spm_registers[] = +{ + 0xc200, 0xe0ffffff, 0xe0000000 +}; + +static const u32 hawaii_golden_common_registers[] = +{ + 0xc200, 0xffffffff, 0xe0000000, + 0xa0d4, 0xffffffff, 0x3a00161a, + 0xa0d5, 0xffffffff, 0x0000002e, + 0x2684, 0xffffffff, 0x00018208, + 0x263e, 0xffffffff, 0x12011003 +}; + +static const u32 hawaii_golden_registers[] = +{ + 0xcd5, 0x00000333, 0x00000333, + 0x2684, 0x00010000, 0x00058208, + 0x260c, 0xffffffff, 0x00000000, + 0x260d, 0xf00fffff, 0x00000400, + 0x260e, 0x0002021c, 0x00020200, + 0x31e, 0x00000080, 0x00000000, + 0x16ec, 0x000000f0, 0x00000070, + 0x16f0, 0xf0311fff, 0x80300000, + 0xd43, 0x00810000, 0x408af000, + 0x1c0c, 0x31000111, 0x00000011, + 0xbd2, 0x73773777, 0x12010001, + 0x848, 0x0000007f, 0x0000001b, + 0x877, 0x00007fb6, 0x00002191, + 0xd8a, 0x0000003f, 0x0000000a, + 0xd8b, 0x0000003f, 0x0000000a, + 0xab9, 0x00073ffe, 0x000022a2, + 0x903, 0x000007ff, 0x00000000, + 0x22fc, 0x00002001, 0x00000001, + 0x22c9, 0xffffffff, 0x00ffffff, + 0xc281, 0x0000ff0f, 0x00000000, + 0xa293, 0x07ffffff, 0x06000000, + 0xf9e, 0x00000001, 0x00000002, + 0x31da, 0x00000008, 0x00000008, + 0x31dc, 0x00000f00, 0x00000800, + 0x31dd, 0x00000f00, 0x00000800, + 0x31e6, 0x00ffffff, 0x00ff7fbf, + 0x31e7, 0x00ffffff, 0x00ff7faf, + 0x2300, 0x000000ff, 0x00000800, + 0x390, 0x00001fff, 0x00001fff, + 0x2418, 0x0000007f, 0x00000020, + 0x2542, 0x00010000, 0x00010000, + 0x2b80, 0x00100000, 0x000ff07c, + 0x2b05, 0x000003ff, 0x0000000f, + 0x2b04, 0xffffffff, 0x7564fdec, + 0x2b03, 0xffffffff, 0x3120b9a8, + 0x2b02, 0x20000000, 0x0f9c0000 +}; + +static const u32 hawaii_mgcg_cgcg_init[] = +{ + 0x3108, 0xffffffff, 0xfffffffd, + 0xc200, 0xffffffff, 0xe0000000, + 0xf0a8, 0xffffffff, 0x00000100, + 0xf082, 0xffffffff, 0x00000100, + 0xf0b0, 0xffffffff, 0x00000100, + 0xf0b2, 0xffffffff, 0x00000100, + 0xf0b1, 0xffffffff, 0x00000100, + 0x1579, 0xffffffff, 0x00200100, + 0xf0a0, 0xffffffff, 0x00000100, + 0xf085, 0xffffffff, 0x06000100, + 0xf088, 0xffffffff, 0x00000100, + 0xf086, 0xffffffff, 0x06000100, + 0xf081, 0xffffffff, 0x00000100, + 0xf0b8, 0xffffffff, 0x00000100, + 0xf089, 0xffffffff, 0x00000100, + 0xf080, 0xffffffff, 0x00000100, + 0xf08c, 0xffffffff, 0x00000100, + 0xf08d, 0xffffffff, 0x00000100, + 0xf094, 0xffffffff, 0x00000100, + 0xf095, 0xffffffff, 0x00000100, + 0xf096, 0xffffffff, 0x00000100, + 0xf097, 0xffffffff, 0x00000100, + 0xf098, 0xffffffff, 0x00000100, + 0xf09f, 0xffffffff, 0x00000100, + 0xf09e, 0xffffffff, 0x00000100, + 0xf084, 0xffffffff, 0x06000100, + 0xf0a4, 0xffffffff, 0x00000100, + 0xf09d, 0xffffffff, 0x00000100, + 0xf0ad, 0xffffffff, 0x00000100, + 0xf0ac, 0xffffffff, 0x00000100, + 0xf09c, 0xffffffff, 0x00000100, + 0xc200, 0xffffffff, 0xe0000000, + 0xf008, 0xffffffff, 0x00010000, + 0xf009, 0xffffffff, 0x00030002, + 0xf00a, 0xffffffff, 0x00040007, + 0xf00b, 0xffffffff, 0x00060005, + 0xf00c, 0xffffffff, 0x00090008, + 0xf00d, 0xffffffff, 0x00010000, + 0xf00e, 0xffffffff, 0x00030002, + 0xf00f, 0xffffffff, 0x00040007, + 0xf010, 0xffffffff, 0x00060005, + 0xf011, 0xffffffff, 0x00090008, + 0xf012, 0xffffffff, 0x00010000, + 0xf013, 0xffffffff, 0x00030002, + 0xf014, 0xffffffff, 0x00040007, + 0xf015, 0xffffffff, 0x00060005, + 0xf016, 0xffffffff, 0x00090008, + 0xf017, 0xffffffff, 0x00010000, + 0xf018, 0xffffffff, 0x00030002, + 0xf019, 0xffffffff, 0x00040007, + 0xf01a, 0xffffffff, 0x00060005, + 0xf01b, 0xffffffff, 0x00090008, + 0xf01c, 0xffffffff, 0x00010000, + 0xf01d, 0xffffffff, 0x00030002, + 0xf01e, 0xffffffff, 0x00040007, + 0xf01f, 0xffffffff, 0x00060005, + 0xf020, 0xffffffff, 0x00090008, + 0xf021, 0xffffffff, 0x00010000, + 0xf022, 0xffffffff, 0x00030002, + 0xf023, 0xffffffff, 0x00040007, + 0xf024, 0xffffffff, 0x00060005, + 0xf025, 0xffffffff, 0x00090008, + 0xf026, 0xffffffff, 0x00010000, + 0xf027, 0xffffffff, 0x00030002, + 0xf028, 0xffffffff, 0x00040007, + 0xf029, 0xffffffff, 0x00060005, + 0xf02a, 0xffffffff, 0x00090008, + 0xf02b, 0xffffffff, 0x00010000, + 0xf02c, 0xffffffff, 0x00030002, + 0xf02d, 0xffffffff, 0x00040007, + 0xf02e, 0xffffffff, 0x00060005, + 0xf02f, 0xffffffff, 0x00090008, + 0xf030, 0xffffffff, 0x00010000, + 0xf031, 0xffffffff, 0x00030002, + 0xf032, 0xffffffff, 0x00040007, + 0xf033, 0xffffffff, 0x00060005, + 0xf034, 0xffffffff, 0x00090008, + 0xf035, 0xffffffff, 0x00010000, + 0xf036, 0xffffffff, 0x00030002, + 0xf037, 0xffffffff, 0x00040007, + 0xf038, 0xffffffff, 0x00060005, + 0xf039, 0xffffffff, 0x00090008, + 0xf03a, 0xffffffff, 0x00010000, + 0xf03b, 0xffffffff, 0x00030002, + 0xf03c, 0xffffffff, 0x00040007, + 0xf03d, 0xffffffff, 0x00060005, + 0xf03e, 0xffffffff, 0x00090008, + 0x30c6, 0xffffffff, 0x00020200, + 0xcd4, 0xffffffff, 0x00000200, + 0x570, 0xffffffff, 0x00000400, + 0x157a, 0xffffffff, 0x00000000, + 0xbd4, 0xffffffff, 0x00000902, + 0xf000, 0xffffffff, 0x96940200, + 0x21c2, 0xffffffff, 0x00900100, + 0x3109, 0xffffffff, 0x0020003f, + 0xe, 0xffffffff, 0x0140001c, + 0xf, 0x000f0000, 0x000f0000, + 0x88, 0xffffffff, 0xc060000c, + 0x89, 0xc0000fff, 0x00000100, + 0x3e4, 0xffffffff, 0x00000100, + 0x3e6, 0x00000101, 0x00000000, + 0x82a, 0xffffffff, 0x00000104, + 0x1579, 0xff000fff, 0x00000100, + 0xc33, 0xc0000fff, 0x00000104, + 0x3079, 0x00000001, 0x00000001, + 0x3403, 0xff000ff0, 0x00000100, + 0x3603, 0xff000ff0, 0x00000100 +}; + +static const u32 godavari_golden_registers[] = +{ + 0x1579, 0xff607fff, 0xfc000100, + 0x1bb6, 0x00010101, 0x00010000, + 0x260c, 0xffffffff, 0x00000000, + 0x260c0, 0xf00fffff, 0x00000400, + 0x184c, 0xffffffff, 0x00010000, + 0x16ec, 0x000000f0, 0x00000070, + 0x16f0, 0xf0311fff, 0x80300000, + 0x263e, 0x73773777, 0x12010001, + 0x263f, 0xffffffff, 0x00000010, + 0x200c, 0x00001f0f, 0x0000100a, + 0xbd2, 0x73773777, 0x12010001, + 0x902, 0x000fffff, 0x000c007f, + 0x2285, 0xf000003f, 0x00000007, + 0x22c9, 0xffffffff, 0x00ff0fff, + 0xc281, 0x0000ff0f, 0x00000000, + 0xa293, 0x07ffffff, 0x06000000, + 0x136, 0x00000fff, 0x00000100, + 0x3405, 0x00010000, 0x00810001, + 0x3605, 0x00010000, 0x00810001, + 0xf9e, 0x00000001, 0x00000002, + 0x31da, 0x00000008, 0x00000008, + 0x31dc, 0x00000f00, 0x00000800, + 0x31dd, 0x00000f00, 0x00000800, + 0x31e6, 0x00ffffff, 0x00ff7fbf, + 0x31e7, 0x00ffffff, 0x00ff7faf, + 0x2300, 0x000000ff, 0x00000001, + 0x853e, 0x01ff01ff, 0x00000002, + 0x8526, 0x007ff800, 0x00200000, + 0x8057, 0xffffffff, 0x00000f40, + 0x2231, 0x001f3ae3, 0x00000082, + 0x2235, 0x0000001f, 0x00000010, + 0xc24d, 0xffffffff, 0x00000000 +}; + +static void cik_init_golden_registers(struct amdgpu_device *adev) +{ + /* Some of the registers might be dependent on GRBM_GFX_INDEX */ + mutex_lock(&adev->grbm_idx_mutex); + + switch (adev->asic_type) { + case CHIP_BONAIRE: + amdgpu_program_register_sequence(adev, + bonaire_mgcg_cgcg_init, + (const u32)ARRAY_SIZE(bonaire_mgcg_cgcg_init)); + amdgpu_program_register_sequence(adev, + bonaire_golden_registers, + (const u32)ARRAY_SIZE(bonaire_golden_registers)); + amdgpu_program_register_sequence(adev, + bonaire_golden_common_registers, + (const u32)ARRAY_SIZE(bonaire_golden_common_registers)); + amdgpu_program_register_sequence(adev, + bonaire_golden_spm_registers, + (const u32)ARRAY_SIZE(bonaire_golden_spm_registers)); + break; + case CHIP_KABINI: + amdgpu_program_register_sequence(adev, + kalindi_mgcg_cgcg_init, + (const u32)ARRAY_SIZE(kalindi_mgcg_cgcg_init)); + amdgpu_program_register_sequence(adev, + kalindi_golden_registers, + (const u32)ARRAY_SIZE(kalindi_golden_registers)); + amdgpu_program_register_sequence(adev, + kalindi_golden_common_registers, + (const u32)ARRAY_SIZE(kalindi_golden_common_registers)); + amdgpu_program_register_sequence(adev, + kalindi_golden_spm_registers, + (const u32)ARRAY_SIZE(kalindi_golden_spm_registers)); + break; + case CHIP_MULLINS: + amdgpu_program_register_sequence(adev, + kalindi_mgcg_cgcg_init, + (const u32)ARRAY_SIZE(kalindi_mgcg_cgcg_init)); + amdgpu_program_register_sequence(adev, + godavari_golden_registers, + (const u32)ARRAY_SIZE(godavari_golden_registers)); + amdgpu_program_register_sequence(adev, + kalindi_golden_common_registers, + (const u32)ARRAY_SIZE(kalindi_golden_common_registers)); + amdgpu_program_register_sequence(adev, + kalindi_golden_spm_registers, + (const u32)ARRAY_SIZE(kalindi_golden_spm_registers)); + break; + case CHIP_KAVERI: + amdgpu_program_register_sequence(adev, + spectre_mgcg_cgcg_init, + (const u32)ARRAY_SIZE(spectre_mgcg_cgcg_init)); + amdgpu_program_register_sequence(adev, + spectre_golden_registers, + (const u32)ARRAY_SIZE(spectre_golden_registers)); + amdgpu_program_register_sequence(adev, + spectre_golden_common_registers, + (const u32)ARRAY_SIZE(spectre_golden_common_registers)); + amdgpu_program_register_sequence(adev, + spectre_golden_spm_registers, + (const u32)ARRAY_SIZE(spectre_golden_spm_registers)); + break; + case CHIP_HAWAII: + amdgpu_program_register_sequence(adev, + hawaii_mgcg_cgcg_init, + (const u32)ARRAY_SIZE(hawaii_mgcg_cgcg_init)); + amdgpu_program_register_sequence(adev, + hawaii_golden_registers, + (const u32)ARRAY_SIZE(hawaii_golden_registers)); + amdgpu_program_register_sequence(adev, + hawaii_golden_common_registers, + (const u32)ARRAY_SIZE(hawaii_golden_common_registers)); + amdgpu_program_register_sequence(adev, + hawaii_golden_spm_registers, + (const u32)ARRAY_SIZE(hawaii_golden_spm_registers)); + break; + default: + break; + } + mutex_unlock(&adev->grbm_idx_mutex); +} + +/** + * cik_get_xclk - get the xclk + * + * @adev: amdgpu_device pointer + * + * Returns the reference clock used by the gfx engine + * (CIK). + */ +static u32 cik_get_xclk(struct amdgpu_device *adev) +{ + u32 reference_clock = adev->clock.spll.reference_freq; + + if (adev->flags & AMDGPU_IS_APU) { + if (RREG32_SMC(ixGENERAL_PWRMGT) & GENERAL_PWRMGT__GPU_COUNTER_CLK_MASK) + return reference_clock / 2; + } else { + if (RREG32_SMC(ixCG_CLKPIN_CNTL) & CG_CLKPIN_CNTL__XTALIN_DIVIDE_MASK) + return reference_clock / 4; + } + return reference_clock; +} + +/** + * cik_srbm_select - select specific register instances + * + * @adev: amdgpu_device pointer + * @me: selected ME (micro engine) + * @pipe: pipe + * @queue: queue + * @vmid: VMID + * + * Switches the currently active registers instances. Some + * registers are instanced per VMID, others are instanced per + * me/pipe/queue combination. + */ +void cik_srbm_select(struct amdgpu_device *adev, + u32 me, u32 pipe, u32 queue, u32 vmid) +{ + u32 srbm_gfx_cntl = + (((pipe << SRBM_GFX_CNTL__PIPEID__SHIFT) & SRBM_GFX_CNTL__PIPEID_MASK)| + ((me << SRBM_GFX_CNTL__MEID__SHIFT) & SRBM_GFX_CNTL__MEID_MASK)| + ((vmid << SRBM_GFX_CNTL__VMID__SHIFT) & SRBM_GFX_CNTL__VMID_MASK)| + ((queue << SRBM_GFX_CNTL__QUEUEID__SHIFT) & SRBM_GFX_CNTL__QUEUEID_MASK)); + WREG32(mmSRBM_GFX_CNTL, srbm_gfx_cntl); +} + +static void cik_vga_set_state(struct amdgpu_device *adev, bool state) +{ + uint32_t tmp; + + tmp = RREG32(mmCONFIG_CNTL); + if (state == false) + tmp |= CONFIG_CNTL__VGA_DIS_MASK; + else + tmp &= ~CONFIG_CNTL__VGA_DIS_MASK; + WREG32(mmCONFIG_CNTL, tmp); +} + +static bool cik_read_disabled_bios(struct amdgpu_device *adev) +{ + u32 bus_cntl; + u32 d1vga_control = 0; + u32 d2vga_control = 0; + u32 vga_render_control = 0; + u32 rom_cntl; + bool r; + + bus_cntl = RREG32(mmBUS_CNTL); + if (adev->mode_info.num_crtc) { + d1vga_control = RREG32(mmD1VGA_CONTROL); + d2vga_control = RREG32(mmD2VGA_CONTROL); + vga_render_control = RREG32(mmVGA_RENDER_CONTROL); + } + rom_cntl = RREG32_SMC(ixROM_CNTL); + + /* enable the rom */ + WREG32(mmBUS_CNTL, (bus_cntl & ~BUS_CNTL__BIOS_ROM_DIS_MASK)); + if (adev->mode_info.num_crtc) { + /* Disable VGA mode */ + WREG32(mmD1VGA_CONTROL, + (d1vga_control & ~(D1VGA_CONTROL__D1VGA_MODE_ENABLE_MASK | + D1VGA_CONTROL__D1VGA_TIMING_SELECT_MASK))); + WREG32(mmD2VGA_CONTROL, + (d2vga_control & ~(D1VGA_CONTROL__D1VGA_MODE_ENABLE_MASK | + D1VGA_CONTROL__D1VGA_TIMING_SELECT_MASK))); + WREG32(mmVGA_RENDER_CONTROL, + (vga_render_control & ~VGA_RENDER_CONTROL__VGA_VSTATUS_CNTL_MASK)); + } + WREG32_SMC(ixROM_CNTL, rom_cntl | ROM_CNTL__SCK_OVERWRITE_MASK); + + r = amdgpu_read_bios(adev); + + /* restore regs */ + WREG32(mmBUS_CNTL, bus_cntl); + if (adev->mode_info.num_crtc) { + WREG32(mmD1VGA_CONTROL, d1vga_control); + WREG32(mmD2VGA_CONTROL, d2vga_control); + WREG32(mmVGA_RENDER_CONTROL, vga_render_control); + } + WREG32_SMC(ixROM_CNTL, rom_cntl); + return r; +} + +static struct amdgpu_allowed_register_entry cik_allowed_read_registers[] = { + {mmGRBM_STATUS, false}, + {mmGB_ADDR_CONFIG, false}, + {mmMC_ARB_RAMCFG, false}, + {mmGB_TILE_MODE0, false}, + {mmGB_TILE_MODE1, false}, + {mmGB_TILE_MODE2, false}, + {mmGB_TILE_MODE3, false}, + {mmGB_TILE_MODE4, false}, + {mmGB_TILE_MODE5, false}, + {mmGB_TILE_MODE6, false}, + {mmGB_TILE_MODE7, false}, + {mmGB_TILE_MODE8, false}, + {mmGB_TILE_MODE9, false}, + {mmGB_TILE_MODE10, false}, + {mmGB_TILE_MODE11, false}, + {mmGB_TILE_MODE12, false}, + {mmGB_TILE_MODE13, false}, + {mmGB_TILE_MODE14, false}, + {mmGB_TILE_MODE15, false}, + {mmGB_TILE_MODE16, false}, + {mmGB_TILE_MODE17, false}, + {mmGB_TILE_MODE18, false}, + {mmGB_TILE_MODE19, false}, + {mmGB_TILE_MODE20, false}, + {mmGB_TILE_MODE21, false}, + {mmGB_TILE_MODE22, false}, + {mmGB_TILE_MODE23, false}, + {mmGB_TILE_MODE24, false}, + {mmGB_TILE_MODE25, false}, + {mmGB_TILE_MODE26, false}, + {mmGB_TILE_MODE27, false}, + {mmGB_TILE_MODE28, false}, + {mmGB_TILE_MODE29, false}, + {mmGB_TILE_MODE30, false}, + {mmGB_TILE_MODE31, false}, + {mmGB_MACROTILE_MODE0, false}, + {mmGB_MACROTILE_MODE1, false}, + {mmGB_MACROTILE_MODE2, false}, + {mmGB_MACROTILE_MODE3, false}, + {mmGB_MACROTILE_MODE4, false}, + {mmGB_MACROTILE_MODE5, false}, + {mmGB_MACROTILE_MODE6, false}, + {mmGB_MACROTILE_MODE7, false}, + {mmGB_MACROTILE_MODE8, false}, + {mmGB_MACROTILE_MODE9, false}, + {mmGB_MACROTILE_MODE10, false}, + {mmGB_MACROTILE_MODE11, false}, + {mmGB_MACROTILE_MODE12, false}, + {mmGB_MACROTILE_MODE13, false}, + {mmGB_MACROTILE_MODE14, false}, + {mmGB_MACROTILE_MODE15, false}, + {mmCC_RB_BACKEND_DISABLE, false, true}, + {mmGC_USER_RB_BACKEND_DISABLE, false, true}, + {mmGB_BACKEND_MAP, false, false}, + {mmPA_SC_RASTER_CONFIG, false, true}, + {mmPA_SC_RASTER_CONFIG_1, false, true}, +}; + +static uint32_t cik_read_indexed_register(struct amdgpu_device *adev, + u32 se_num, u32 sh_num, + u32 reg_offset) +{ + uint32_t val; + + mutex_lock(&adev->grbm_idx_mutex); + if (se_num != 0xffffffff || sh_num != 0xffffffff) + gfx_v7_0_select_se_sh(adev, se_num, sh_num); + + val = RREG32(reg_offset); + + if (se_num != 0xffffffff || sh_num != 0xffffffff) + gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff); + mutex_unlock(&adev->grbm_idx_mutex); + return val; +} + +static int cik_read_register(struct amdgpu_device *adev, u32 se_num, + u32 sh_num, u32 reg_offset, u32 *value) +{ + uint32_t i; + + *value = 0; + for (i = 0; i < ARRAY_SIZE(cik_allowed_read_registers); i++) { + if (reg_offset != cik_allowed_read_registers[i].reg_offset) + continue; + + if (!cik_allowed_read_registers[i].untouched) + *value = cik_allowed_read_registers[i].grbm_indexed ? + cik_read_indexed_register(adev, se_num, + sh_num, reg_offset) : + RREG32(reg_offset); + return 0; + } + return -EINVAL; +} + +static void cik_print_gpu_status_regs(struct amdgpu_device *adev) +{ + dev_info(adev->dev, " GRBM_STATUS=0x%08X\n", + RREG32(mmGRBM_STATUS)); + dev_info(adev->dev, " GRBM_STATUS2=0x%08X\n", + RREG32(mmGRBM_STATUS2)); + dev_info(adev->dev, " GRBM_STATUS_SE0=0x%08X\n", + RREG32(mmGRBM_STATUS_SE0)); + dev_info(adev->dev, " GRBM_STATUS_SE1=0x%08X\n", + RREG32(mmGRBM_STATUS_SE1)); + dev_info(adev->dev, " GRBM_STATUS_SE2=0x%08X\n", + RREG32(mmGRBM_STATUS_SE2)); + dev_info(adev->dev, " GRBM_STATUS_SE3=0x%08X\n", + RREG32(mmGRBM_STATUS_SE3)); + dev_info(adev->dev, " SRBM_STATUS=0x%08X\n", + RREG32(mmSRBM_STATUS)); + dev_info(adev->dev, " SRBM_STATUS2=0x%08X\n", + RREG32(mmSRBM_STATUS2)); + dev_info(adev->dev, " SDMA0_STATUS_REG = 0x%08X\n", + RREG32(mmSDMA0_STATUS_REG + SDMA0_REGISTER_OFFSET)); + dev_info(adev->dev, " SDMA1_STATUS_REG = 0x%08X\n", + RREG32(mmSDMA0_STATUS_REG + SDMA1_REGISTER_OFFSET)); + dev_info(adev->dev, " CP_STAT = 0x%08x\n", RREG32(mmCP_STAT)); + dev_info(adev->dev, " CP_STALLED_STAT1 = 0x%08x\n", + RREG32(mmCP_STALLED_STAT1)); + dev_info(adev->dev, " CP_STALLED_STAT2 = 0x%08x\n", + RREG32(mmCP_STALLED_STAT2)); + dev_info(adev->dev, " CP_STALLED_STAT3 = 0x%08x\n", + RREG32(mmCP_STALLED_STAT3)); + dev_info(adev->dev, " CP_CPF_BUSY_STAT = 0x%08x\n", + RREG32(mmCP_CPF_BUSY_STAT)); + dev_info(adev->dev, " CP_CPF_STALLED_STAT1 = 0x%08x\n", + RREG32(mmCP_CPF_STALLED_STAT1)); + dev_info(adev->dev, " CP_CPF_STATUS = 0x%08x\n", RREG32(mmCP_CPF_STATUS)); + dev_info(adev->dev, " CP_CPC_BUSY_STAT = 0x%08x\n", RREG32(mmCP_CPC_BUSY_STAT)); + dev_info(adev->dev, " CP_CPC_STALLED_STAT1 = 0x%08x\n", + RREG32(mmCP_CPC_STALLED_STAT1)); + dev_info(adev->dev, " CP_CPC_STATUS = 0x%08x\n", RREG32(mmCP_CPC_STATUS)); +} + +/** + * cik_gpu_check_soft_reset - check which blocks are busy + * + * @adev: amdgpu_device pointer + * + * Check which blocks are busy and return the relevant reset + * mask to be used by cik_gpu_soft_reset(). + * Returns a mask of the blocks to be reset. + */ +u32 amdgpu_cik_gpu_check_soft_reset(struct amdgpu_device *adev) +{ + u32 reset_mask = 0; + u32 tmp; + + /* GRBM_STATUS */ + tmp = RREG32(mmGRBM_STATUS); + if (tmp & (GRBM_STATUS__PA_BUSY_MASK | GRBM_STATUS__SC_BUSY_MASK | + GRBM_STATUS__BCI_BUSY_MASK | GRBM_STATUS__SX_BUSY_MASK | + GRBM_STATUS__TA_BUSY_MASK | GRBM_STATUS__VGT_BUSY_MASK | + GRBM_STATUS__DB_BUSY_MASK | GRBM_STATUS__CB_BUSY_MASK | + GRBM_STATUS__GDS_BUSY_MASK | GRBM_STATUS__SPI_BUSY_MASK | + GRBM_STATUS__IA_BUSY_MASK | GRBM_STATUS__IA_BUSY_NO_DMA_MASK)) + reset_mask |= AMDGPU_RESET_GFX; + + if (tmp & (GRBM_STATUS__CP_BUSY_MASK | GRBM_STATUS__CP_COHERENCY_BUSY_MASK)) + reset_mask |= AMDGPU_RESET_CP; + + /* GRBM_STATUS2 */ + tmp = RREG32(mmGRBM_STATUS2); + if (tmp & GRBM_STATUS2__RLC_BUSY_MASK) + reset_mask |= AMDGPU_RESET_RLC; + + /* SDMA0_STATUS_REG */ + tmp = RREG32(mmSDMA0_STATUS_REG + SDMA0_REGISTER_OFFSET); + if (!(tmp & SDMA0_STATUS_REG__IDLE_MASK)) + reset_mask |= AMDGPU_RESET_DMA; + + /* SDMA1_STATUS_REG */ + tmp = RREG32(mmSDMA0_STATUS_REG + SDMA1_REGISTER_OFFSET); + if (!(tmp & SDMA0_STATUS_REG__IDLE_MASK)) + reset_mask |= AMDGPU_RESET_DMA1; + + /* SRBM_STATUS2 */ + tmp = RREG32(mmSRBM_STATUS2); + if (tmp & SRBM_STATUS2__SDMA_BUSY_MASK) + reset_mask |= AMDGPU_RESET_DMA; + + if (tmp & SRBM_STATUS2__SDMA1_BUSY_MASK) + reset_mask |= AMDGPU_RESET_DMA1; + + /* SRBM_STATUS */ + tmp = RREG32(mmSRBM_STATUS); + + if (tmp & SRBM_STATUS__IH_BUSY_MASK) + reset_mask |= AMDGPU_RESET_IH; + + if (tmp & SRBM_STATUS__SEM_BUSY_MASK) + reset_mask |= AMDGPU_RESET_SEM; + + if (tmp & SRBM_STATUS__GRBM_RQ_PENDING_MASK) + reset_mask |= AMDGPU_RESET_GRBM; + + if (tmp & SRBM_STATUS__VMC_BUSY_MASK) + reset_mask |= AMDGPU_RESET_VMC; + + if (tmp & (SRBM_STATUS__MCB_BUSY_MASK | SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK | + SRBM_STATUS__MCC_BUSY_MASK | SRBM_STATUS__MCD_BUSY_MASK)) + reset_mask |= AMDGPU_RESET_MC; + + if (amdgpu_display_is_display_hung(adev)) + reset_mask |= AMDGPU_RESET_DISPLAY; + + /* Skip MC reset as it's mostly likely not hung, just busy */ + if (reset_mask & AMDGPU_RESET_MC) { + DRM_DEBUG("MC busy: 0x%08X, clearing.\n", reset_mask); + reset_mask &= ~AMDGPU_RESET_MC; + } + + return reset_mask; +} + +/** + * cik_gpu_soft_reset - soft reset GPU + * + * @adev: amdgpu_device pointer + * @reset_mask: mask of which blocks to reset + * + * Soft reset the blocks specified in @reset_mask. + */ +static void cik_gpu_soft_reset(struct amdgpu_device *adev, u32 reset_mask) +{ + struct amdgpu_mode_mc_save save; + u32 grbm_soft_reset = 0, srbm_soft_reset = 0; + u32 tmp; + + if (reset_mask == 0) + return; + + dev_info(adev->dev, "GPU softreset: 0x%08X\n", reset_mask); + + cik_print_gpu_status_regs(adev); + dev_info(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n", + RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_ADDR)); + dev_info(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n", + RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_STATUS)); + + /* disable CG/PG */ + + /* stop the rlc */ + gfx_v7_0_rlc_stop(adev); + + /* Disable GFX parsing/prefetching */ + WREG32(mmCP_ME_CNTL, CP_ME_CNTL__ME_HALT_MASK | CP_ME_CNTL__PFP_HALT_MASK | CP_ME_CNTL__CE_HALT_MASK); + + /* Disable MEC parsing/prefetching */ + WREG32(mmCP_MEC_CNTL, CP_MEC_CNTL__MEC_ME1_HALT_MASK | CP_MEC_CNTL__MEC_ME2_HALT_MASK); + + if (reset_mask & AMDGPU_RESET_DMA) { + /* sdma0 */ + tmp = RREG32(mmSDMA0_F32_CNTL + SDMA0_REGISTER_OFFSET); + tmp |= SDMA0_F32_CNTL__HALT_MASK; + WREG32(mmSDMA0_F32_CNTL + SDMA0_REGISTER_OFFSET, tmp); + } + if (reset_mask & AMDGPU_RESET_DMA1) { + /* sdma1 */ + tmp = RREG32(mmSDMA0_F32_CNTL + SDMA1_REGISTER_OFFSET); + tmp |= SDMA0_F32_CNTL__HALT_MASK; + WREG32(mmSDMA0_F32_CNTL + SDMA1_REGISTER_OFFSET, tmp); + } + + gmc_v7_0_mc_stop(adev, &save); + if (amdgpu_asic_wait_for_mc_idle(adev)) { + dev_warn(adev->dev, "Wait for MC idle timedout !\n"); + } + + if (reset_mask & (AMDGPU_RESET_GFX | AMDGPU_RESET_COMPUTE | AMDGPU_RESET_CP)) + grbm_soft_reset = GRBM_SOFT_RESET__SOFT_RESET_CP_MASK | + GRBM_SOFT_RESET__SOFT_RESET_GFX_MASK; + + if (reset_mask & AMDGPU_RESET_CP) { + grbm_soft_reset |= GRBM_SOFT_RESET__SOFT_RESET_CP_MASK; + + srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_GRBM_MASK; + } + + if (reset_mask & AMDGPU_RESET_DMA) + srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_SDMA_MASK; + + if (reset_mask & AMDGPU_RESET_DMA1) + srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_SDMA1_MASK; + + if (reset_mask & AMDGPU_RESET_DISPLAY) + srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_DC_MASK; + + if (reset_mask & AMDGPU_RESET_RLC) + grbm_soft_reset |= GRBM_SOFT_RESET__SOFT_RESET_RLC_MASK; + + if (reset_mask & AMDGPU_RESET_SEM) + srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_SEM_MASK; + + if (reset_mask & AMDGPU_RESET_IH) + srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_IH_MASK; + + if (reset_mask & AMDGPU_RESET_GRBM) + srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_GRBM_MASK; + + if (reset_mask & AMDGPU_RESET_VMC) + srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_VMC_MASK; + + if (!(adev->flags & AMDGPU_IS_APU)) { + if (reset_mask & AMDGPU_RESET_MC) + srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_MC_MASK; + } + + if (grbm_soft_reset) { + tmp = RREG32(mmGRBM_SOFT_RESET); + tmp |= grbm_soft_reset; + dev_info(adev->dev, "GRBM_SOFT_RESET=0x%08X\n", tmp); + WREG32(mmGRBM_SOFT_RESET, tmp); + tmp = RREG32(mmGRBM_SOFT_RESET); + + udelay(50); + + tmp &= ~grbm_soft_reset; + WREG32(mmGRBM_SOFT_RESET, tmp); + tmp = RREG32(mmGRBM_SOFT_RESET); + } + + if (srbm_soft_reset) { + tmp = RREG32(mmSRBM_SOFT_RESET); + tmp |= srbm_soft_reset; + dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp); + WREG32(mmSRBM_SOFT_RESET, tmp); + tmp = RREG32(mmSRBM_SOFT_RESET); + + udelay(50); + + tmp &= ~srbm_soft_reset; + WREG32(mmSRBM_SOFT_RESET, tmp); + tmp = RREG32(mmSRBM_SOFT_RESET); + } + + /* Wait a little for things to settle down */ + udelay(50); + + gmc_v7_0_mc_resume(adev, &save); + udelay(50); + + cik_print_gpu_status_regs(adev); +} + +struct kv_reset_save_regs { + u32 gmcon_reng_execute; + u32 gmcon_misc; + u32 gmcon_misc3; +}; + +static void kv_save_regs_for_reset(struct amdgpu_device *adev, + struct kv_reset_save_regs *save) +{ + save->gmcon_reng_execute = RREG32(mmGMCON_RENG_EXECUTE); + save->gmcon_misc = RREG32(mmGMCON_MISC); + save->gmcon_misc3 = RREG32(mmGMCON_MISC3); + + WREG32(mmGMCON_RENG_EXECUTE, save->gmcon_reng_execute & + ~GMCON_RENG_EXECUTE__RENG_EXECUTE_ON_PWR_UP_MASK); + WREG32(mmGMCON_MISC, save->gmcon_misc & + ~(GMCON_MISC__RENG_EXECUTE_ON_REG_UPDATE_MASK | + GMCON_MISC__STCTRL_STUTTER_EN_MASK)); +} + +static void kv_restore_regs_for_reset(struct amdgpu_device *adev, + struct kv_reset_save_regs *save) +{ + int i; + + WREG32(mmGMCON_PGFSM_WRITE, 0); + WREG32(mmGMCON_PGFSM_CONFIG, 0x200010ff); + + for (i = 0; i < 5; i++) + WREG32(mmGMCON_PGFSM_WRITE, 0); + + WREG32(mmGMCON_PGFSM_WRITE, 0); + WREG32(mmGMCON_PGFSM_CONFIG, 0x300010ff); + + for (i = 0; i < 5; i++) + WREG32(mmGMCON_PGFSM_WRITE, 0); + + WREG32(mmGMCON_PGFSM_WRITE, 0x210000); + WREG32(mmGMCON_PGFSM_CONFIG, 0xa00010ff); + + for (i = 0; i < 5; i++) + WREG32(mmGMCON_PGFSM_WRITE, 0); + + WREG32(mmGMCON_PGFSM_WRITE, 0x21003); + WREG32(mmGMCON_PGFSM_CONFIG, 0xb00010ff); + + for (i = 0; i < 5; i++) + WREG32(mmGMCON_PGFSM_WRITE, 0); + + WREG32(mmGMCON_PGFSM_WRITE, 0x2b00); + WREG32(mmGMCON_PGFSM_CONFIG, 0xc00010ff); + + for (i = 0; i < 5; i++) + WREG32(mmGMCON_PGFSM_WRITE, 0); + + WREG32(mmGMCON_PGFSM_WRITE, 0); + WREG32(mmGMCON_PGFSM_CONFIG, 0xd00010ff); + + for (i = 0; i < 5; i++) + WREG32(mmGMCON_PGFSM_WRITE, 0); + + WREG32(mmGMCON_PGFSM_WRITE, 0x420000); + WREG32(mmGMCON_PGFSM_CONFIG, 0x100010ff); + + for (i = 0; i < 5; i++) + WREG32(mmGMCON_PGFSM_WRITE, 0); + + WREG32(mmGMCON_PGFSM_WRITE, 0x120202); + WREG32(mmGMCON_PGFSM_CONFIG, 0x500010ff); + + for (i = 0; i < 5; i++) + WREG32(mmGMCON_PGFSM_WRITE, 0); + + WREG32(mmGMCON_PGFSM_WRITE, 0x3e3e36); + WREG32(mmGMCON_PGFSM_CONFIG, 0x600010ff); + + for (i = 0; i < 5; i++) + WREG32(mmGMCON_PGFSM_WRITE, 0); + + WREG32(mmGMCON_PGFSM_WRITE, 0x373f3e); + WREG32(mmGMCON_PGFSM_CONFIG, 0x700010ff); + + for (i = 0; i < 5; i++) + WREG32(mmGMCON_PGFSM_WRITE, 0); + + WREG32(mmGMCON_PGFSM_WRITE, 0x3e1332); + WREG32(mmGMCON_PGFSM_CONFIG, 0xe00010ff); + + WREG32(mmGMCON_MISC3, save->gmcon_misc3); + WREG32(mmGMCON_MISC, save->gmcon_misc); + WREG32(mmGMCON_RENG_EXECUTE, save->gmcon_reng_execute); +} + +static void cik_gpu_pci_config_reset(struct amdgpu_device *adev) +{ + struct amdgpu_mode_mc_save save; + struct kv_reset_save_regs kv_save = { 0 }; + u32 tmp, i; + + dev_info(adev->dev, "GPU pci config reset\n"); + + /* disable dpm? */ + + /* disable cg/pg */ + + /* Disable GFX parsing/prefetching */ + WREG32(mmCP_ME_CNTL, CP_ME_CNTL__ME_HALT_MASK | + CP_ME_CNTL__PFP_HALT_MASK | CP_ME_CNTL__CE_HALT_MASK); + + /* Disable MEC parsing/prefetching */ + WREG32(mmCP_MEC_CNTL, + CP_MEC_CNTL__MEC_ME1_HALT_MASK | CP_MEC_CNTL__MEC_ME2_HALT_MASK); + + /* sdma0 */ + tmp = RREG32(mmSDMA0_F32_CNTL + SDMA0_REGISTER_OFFSET); + tmp |= SDMA0_F32_CNTL__HALT_MASK; + WREG32(mmSDMA0_F32_CNTL + SDMA0_REGISTER_OFFSET, tmp); + /* sdma1 */ + tmp = RREG32(mmSDMA0_F32_CNTL + SDMA1_REGISTER_OFFSET); + tmp |= SDMA0_F32_CNTL__HALT_MASK; + WREG32(mmSDMA0_F32_CNTL + SDMA1_REGISTER_OFFSET, tmp); + /* XXX other engines? */ + + /* halt the rlc, disable cp internal ints */ + gfx_v7_0_rlc_stop(adev); + + udelay(50); + + /* disable mem access */ + gmc_v7_0_mc_stop(adev, &save); + if (amdgpu_asic_wait_for_mc_idle(adev)) { + dev_warn(adev->dev, "Wait for MC idle timed out !\n"); + } + + if (adev->flags & AMDGPU_IS_APU) + kv_save_regs_for_reset(adev, &kv_save); + + /* disable BM */ + pci_clear_master(adev->pdev); + /* reset */ + amdgpu_pci_config_reset(adev); + + udelay(100); + + /* wait for asic to come out of reset */ + for (i = 0; i < adev->usec_timeout; i++) { + if (RREG32(mmCONFIG_MEMSIZE) != 0xffffffff) + break; + udelay(1); + } + + /* does asic init need to be run first??? */ + if (adev->flags & AMDGPU_IS_APU) + kv_restore_regs_for_reset(adev, &kv_save); +} + +static void cik_set_bios_scratch_engine_hung(struct amdgpu_device *adev, bool hung) +{ + u32 tmp = RREG32(mmBIOS_SCRATCH_3); + + if (hung) + tmp |= ATOM_S3_ASIC_GUI_ENGINE_HUNG; + else + tmp &= ~ATOM_S3_ASIC_GUI_ENGINE_HUNG; + + WREG32(mmBIOS_SCRATCH_3, tmp); +} + +/** + * cik_asic_reset - soft reset GPU + * + * @adev: amdgpu_device pointer + * + * Look up which blocks are hung and attempt + * to reset them. + * Returns 0 for success. + */ +static int cik_asic_reset(struct amdgpu_device *adev) +{ + u32 reset_mask; + + reset_mask = amdgpu_cik_gpu_check_soft_reset(adev); + + if (reset_mask) + cik_set_bios_scratch_engine_hung(adev, true); + + /* try soft reset */ + cik_gpu_soft_reset(adev, reset_mask); + + reset_mask = amdgpu_cik_gpu_check_soft_reset(adev); + + /* try pci config reset */ + if (reset_mask && amdgpu_hard_reset) + cik_gpu_pci_config_reset(adev); + + reset_mask = amdgpu_cik_gpu_check_soft_reset(adev); + + if (!reset_mask) + cik_set_bios_scratch_engine_hung(adev, false); + + return 0; +} + +static int cik_set_uvd_clock(struct amdgpu_device *adev, u32 clock, + u32 cntl_reg, u32 status_reg) +{ + int r, i; + struct atom_clock_dividers dividers; + uint32_t tmp; + + r = amdgpu_atombios_get_clock_dividers(adev, + COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK, + clock, false, ÷rs); + if (r) + return r; + + tmp = RREG32_SMC(cntl_reg); + tmp &= ~(CG_DCLK_CNTL__DCLK_DIR_CNTL_EN_MASK | + CG_DCLK_CNTL__DCLK_DIVIDER_MASK); + tmp |= dividers.post_divider; + WREG32_SMC(cntl_reg, tmp); + + for (i = 0; i < 100; i++) { + if (RREG32_SMC(status_reg) & CG_DCLK_STATUS__DCLK_STATUS_MASK) + break; + mdelay(10); + } + if (i == 100) + return -ETIMEDOUT; + + return 0; +} + +static int cik_set_uvd_clocks(struct amdgpu_device *adev, u32 vclk, u32 dclk) +{ + int r = 0; + + r = cik_set_uvd_clock(adev, vclk, ixCG_VCLK_CNTL, ixCG_VCLK_STATUS); + if (r) + return r; + + r = cik_set_uvd_clock(adev, dclk, ixCG_DCLK_CNTL, ixCG_DCLK_STATUS); + return r; +} + +static int cik_set_vce_clocks(struct amdgpu_device *adev, u32 evclk, u32 ecclk) +{ + int r, i; + struct atom_clock_dividers dividers; + u32 tmp; + + r = amdgpu_atombios_get_clock_dividers(adev, + COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK, + ecclk, false, ÷rs); + if (r) + return r; + + for (i = 0; i < 100; i++) { + if (RREG32_SMC(ixCG_ECLK_STATUS) & CG_ECLK_STATUS__ECLK_STATUS_MASK) + break; + mdelay(10); + } + if (i == 100) + return -ETIMEDOUT; + + tmp = RREG32_SMC(ixCG_ECLK_CNTL); + tmp &= ~(CG_ECLK_CNTL__ECLK_DIR_CNTL_EN_MASK | + CG_ECLK_CNTL__ECLK_DIVIDER_MASK); + tmp |= dividers.post_divider; + WREG32_SMC(ixCG_ECLK_CNTL, tmp); + + for (i = 0; i < 100; i++) { + if (RREG32_SMC(ixCG_ECLK_STATUS) & CG_ECLK_STATUS__ECLK_STATUS_MASK) + break; + mdelay(10); + } + if (i == 100) + return -ETIMEDOUT; + + return 0; +} + +static void cik_pcie_gen3_enable(struct amdgpu_device *adev) +{ + struct pci_dev *root = adev->pdev->bus->self; + int bridge_pos, gpu_pos; + u32 speed_cntl, mask, current_data_rate; + int ret, i; + u16 tmp16; + + if (amdgpu_pcie_gen2 == 0) + return; + + if (adev->flags & AMDGPU_IS_APU) + return; + + ret = drm_pcie_get_speed_cap_mask(adev->ddev, &mask); + if (ret != 0) + return; + + if (!(mask & (DRM_PCIE_SPEED_50 | DRM_PCIE_SPEED_80))) + return; + + speed_cntl = RREG32_PCIE(ixPCIE_LC_SPEED_CNTL); + current_data_rate = (speed_cntl & PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE_MASK) >> + PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE__SHIFT; + if (mask & DRM_PCIE_SPEED_80) { + if (current_data_rate == 2) { + DRM_INFO("PCIE gen 3 link speeds already enabled\n"); + return; + } + DRM_INFO("enabling PCIE gen 3 link speeds, disable with amdgpu.pcie_gen2=0\n"); + } else if (mask & DRM_PCIE_SPEED_50) { + if (current_data_rate == 1) { + DRM_INFO("PCIE gen 2 link speeds already enabled\n"); + return; + } + DRM_INFO("enabling PCIE gen 2 link speeds, disable with amdgpu.pcie_gen2=0\n"); + } + + bridge_pos = pci_pcie_cap(root); + if (!bridge_pos) + return; + + gpu_pos = pci_pcie_cap(adev->pdev); + if (!gpu_pos) + return; + + if (mask & DRM_PCIE_SPEED_80) { + /* re-try equalization if gen3 is not already enabled */ + if (current_data_rate != 2) { + u16 bridge_cfg, gpu_cfg; + u16 bridge_cfg2, gpu_cfg2; + u32 max_lw, current_lw, tmp; + + pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL, &bridge_cfg); + pci_read_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL, &gpu_cfg); + + tmp16 = bridge_cfg | PCI_EXP_LNKCTL_HAWD; + pci_write_config_word(root, bridge_pos + PCI_EXP_LNKCTL, tmp16); + + tmp16 = gpu_cfg | PCI_EXP_LNKCTL_HAWD; + pci_write_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL, tmp16); + + tmp = RREG32_PCIE(ixPCIE_LC_STATUS1); + max_lw = (tmp & PCIE_LC_STATUS1__LC_DETECTED_LINK_WIDTH_MASK) >> + PCIE_LC_STATUS1__LC_DETECTED_LINK_WIDTH__SHIFT; + current_lw = (tmp & PCIE_LC_STATUS1__LC_OPERATING_LINK_WIDTH_MASK) + >> PCIE_LC_STATUS1__LC_OPERATING_LINK_WIDTH__SHIFT; + + if (current_lw < max_lw) { + tmp = RREG32_PCIE(ixPCIE_LC_LINK_WIDTH_CNTL); + if (tmp & PCIE_LC_LINK_WIDTH_CNTL__LC_RENEGOTIATION_SUPPORT_MASK) { + tmp &= ~(PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_MASK | + PCIE_LC_LINK_WIDTH_CNTL__LC_UPCONFIGURE_DIS_MASK); + tmp |= (max_lw << + PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH__SHIFT); + tmp |= PCIE_LC_LINK_WIDTH_CNTL__LC_UPCONFIGURE_SUPPORT_MASK | + PCIE_LC_LINK_WIDTH_CNTL__LC_RENEGOTIATE_EN_MASK | + PCIE_LC_LINK_WIDTH_CNTL__LC_RECONFIG_NOW_MASK; + WREG32_PCIE(ixPCIE_LC_LINK_WIDTH_CNTL, tmp); + } + } + + for (i = 0; i < 10; i++) { + /* check status */ + pci_read_config_word(adev->pdev, gpu_pos + PCI_EXP_DEVSTA, &tmp16); + if (tmp16 & PCI_EXP_DEVSTA_TRPND) + break; + + pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL, &bridge_cfg); + pci_read_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL, &gpu_cfg); + + pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL2, &bridge_cfg2); + pci_read_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL2, &gpu_cfg2); + + tmp = RREG32_PCIE(ixPCIE_LC_CNTL4); + tmp |= PCIE_LC_CNTL4__LC_SET_QUIESCE_MASK; + WREG32_PCIE(ixPCIE_LC_CNTL4, tmp); + + tmp = RREG32_PCIE(ixPCIE_LC_CNTL4); + tmp |= PCIE_LC_CNTL4__LC_REDO_EQ_MASK; + WREG32_PCIE(ixPCIE_LC_CNTL4, tmp); + + mdelay(100); + + /* linkctl */ + pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL, &tmp16); + tmp16 &= ~PCI_EXP_LNKCTL_HAWD; + tmp16 |= (bridge_cfg & PCI_EXP_LNKCTL_HAWD); + pci_write_config_word(root, bridge_pos + PCI_EXP_LNKCTL, tmp16); + + pci_read_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL, &tmp16); + tmp16 &= ~PCI_EXP_LNKCTL_HAWD; + tmp16 |= (gpu_cfg & PCI_EXP_LNKCTL_HAWD); + pci_write_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL, tmp16); + + /* linkctl2 */ + pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL2, &tmp16); + tmp16 &= ~((1 << 4) | (7 << 9)); + tmp16 |= (bridge_cfg2 & ((1 << 4) | (7 << 9))); + pci_write_config_word(root, bridge_pos + PCI_EXP_LNKCTL2, tmp16); + + pci_read_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL2, &tmp16); + tmp16 &= ~((1 << 4) | (7 << 9)); + tmp16 |= (gpu_cfg2 & ((1 << 4) | (7 << 9))); + pci_write_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL2, tmp16); + + tmp = RREG32_PCIE(ixPCIE_LC_CNTL4); + tmp &= ~PCIE_LC_CNTL4__LC_SET_QUIESCE_MASK; + WREG32_PCIE(ixPCIE_LC_CNTL4, tmp); + } + } + } + + /* set the link speed */ + speed_cntl |= PCIE_LC_SPEED_CNTL__LC_FORCE_EN_SW_SPEED_CHANGE_MASK | + PCIE_LC_SPEED_CNTL__LC_FORCE_DIS_HW_SPEED_CHANGE_MASK; + speed_cntl &= ~PCIE_LC_SPEED_CNTL__LC_FORCE_DIS_SW_SPEED_CHANGE_MASK; + WREG32_PCIE(ixPCIE_LC_SPEED_CNTL, speed_cntl); + + pci_read_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL2, &tmp16); + tmp16 &= ~0xf; + if (mask & DRM_PCIE_SPEED_80) + tmp16 |= 3; /* gen3 */ + else if (mask & DRM_PCIE_SPEED_50) + tmp16 |= 2; /* gen2 */ + else + tmp16 |= 1; /* gen1 */ + pci_write_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL2, tmp16); + + speed_cntl = RREG32_PCIE(ixPCIE_LC_SPEED_CNTL); + speed_cntl |= PCIE_LC_SPEED_CNTL__LC_INITIATE_LINK_SPEED_CHANGE_MASK; + WREG32_PCIE(ixPCIE_LC_SPEED_CNTL, speed_cntl); + + for (i = 0; i < adev->usec_timeout; i++) { + speed_cntl = RREG32_PCIE(ixPCIE_LC_SPEED_CNTL); + if ((speed_cntl & PCIE_LC_SPEED_CNTL__LC_INITIATE_LINK_SPEED_CHANGE_MASK) == 0) + break; + udelay(1); + } +} + +static void cik_program_aspm(struct amdgpu_device *adev) +{ + u32 data, orig; + bool disable_l0s = false, disable_l1 = false, disable_plloff_in_l1 = false; + bool disable_clkreq = false; + + if (amdgpu_aspm == 0) + return; + + /* XXX double check APUs */ + if (adev->flags & AMDGPU_IS_APU) + return; + + orig = data = RREG32_PCIE(ixPCIE_LC_N_FTS_CNTL); + data &= ~PCIE_LC_N_FTS_CNTL__LC_XMIT_N_FTS_MASK; + data |= (0x24 << PCIE_LC_N_FTS_CNTL__LC_XMIT_N_FTS__SHIFT) | + PCIE_LC_N_FTS_CNTL__LC_XMIT_N_FTS_OVERRIDE_EN_MASK; + if (orig != data) + WREG32_PCIE(ixPCIE_LC_N_FTS_CNTL, data); + + orig = data = RREG32_PCIE(ixPCIE_LC_CNTL3); + data |= PCIE_LC_CNTL3__LC_GO_TO_RECOVERY_MASK; + if (orig != data) + WREG32_PCIE(ixPCIE_LC_CNTL3, data); + + orig = data = RREG32_PCIE(ixPCIE_P_CNTL); + data |= PCIE_P_CNTL__P_IGNORE_EDB_ERR_MASK; + if (orig != data) + WREG32_PCIE(ixPCIE_P_CNTL, data); + + orig = data = RREG32_PCIE(ixPCIE_LC_CNTL); + data &= ~(PCIE_LC_CNTL__LC_L0S_INACTIVITY_MASK | + PCIE_LC_CNTL__LC_L1_INACTIVITY_MASK); + data |= PCIE_LC_CNTL__LC_PMI_TO_L1_DIS_MASK; + if (!disable_l0s) + data |= (7 << PCIE_LC_CNTL__LC_L0S_INACTIVITY__SHIFT); + + if (!disable_l1) { + data |= (7 << PCIE_LC_CNTL__LC_L1_INACTIVITY__SHIFT); + data &= ~PCIE_LC_CNTL__LC_PMI_TO_L1_DIS_MASK; + if (orig != data) + WREG32_PCIE(ixPCIE_LC_CNTL, data); + + if (!disable_plloff_in_l1) { + bool clk_req_support; + + orig = data = RREG32_PCIE(ixPB0_PIF_PWRDOWN_0); + data &= ~(PB0_PIF_PWRDOWN_0__PLL_POWER_STATE_IN_OFF_0_MASK | + PB0_PIF_PWRDOWN_0__PLL_POWER_STATE_IN_TXS2_0_MASK); + data |= (7 << PB0_PIF_PWRDOWN_0__PLL_POWER_STATE_IN_OFF_0__SHIFT) | + (7 << PB0_PIF_PWRDOWN_0__PLL_POWER_STATE_IN_TXS2_0__SHIFT); + if (orig != data) + WREG32_PCIE(ixPB0_PIF_PWRDOWN_0, data); + + orig = data = RREG32_PCIE(ixPB0_PIF_PWRDOWN_1); + data &= ~(PB0_PIF_PWRDOWN_1__PLL_POWER_STATE_IN_OFF_1_MASK | + PB0_PIF_PWRDOWN_1__PLL_POWER_STATE_IN_TXS2_1_MASK); + data |= (7 << PB0_PIF_PWRDOWN_1__PLL_POWER_STATE_IN_OFF_1__SHIFT) | + (7 << PB0_PIF_PWRDOWN_1__PLL_POWER_STATE_IN_TXS2_1__SHIFT); + if (orig != data) + WREG32_PCIE(ixPB0_PIF_PWRDOWN_1, data); + + orig = data = RREG32_PCIE(ixPB1_PIF_PWRDOWN_0); + data &= ~(PB1_PIF_PWRDOWN_0__PLL_POWER_STATE_IN_OFF_0_MASK | + PB1_PIF_PWRDOWN_0__PLL_POWER_STATE_IN_TXS2_0_MASK); + data |= (7 << PB1_PIF_PWRDOWN_0__PLL_POWER_STATE_IN_OFF_0__SHIFT) | + (7 << PB1_PIF_PWRDOWN_0__PLL_POWER_STATE_IN_TXS2_0__SHIFT); + if (orig != data) + WREG32_PCIE(ixPB1_PIF_PWRDOWN_0, data); + + orig = data = RREG32_PCIE(ixPB1_PIF_PWRDOWN_1); + data &= ~(PB1_PIF_PWRDOWN_1__PLL_POWER_STATE_IN_OFF_1_MASK | + PB1_PIF_PWRDOWN_1__PLL_POWER_STATE_IN_TXS2_1_MASK); + data |= (7 << PB1_PIF_PWRDOWN_1__PLL_POWER_STATE_IN_OFF_1__SHIFT) | + (7 << PB1_PIF_PWRDOWN_1__PLL_POWER_STATE_IN_TXS2_1__SHIFT); + if (orig != data) + WREG32_PCIE(ixPB1_PIF_PWRDOWN_1, data); + + orig = data = RREG32_PCIE(ixPCIE_LC_LINK_WIDTH_CNTL); + data &= ~PCIE_LC_LINK_WIDTH_CNTL__LC_DYN_LANES_PWR_STATE_MASK; + data |= ~(3 << PCIE_LC_LINK_WIDTH_CNTL__LC_DYN_LANES_PWR_STATE__SHIFT); + if (orig != data) + WREG32_PCIE(ixPCIE_LC_LINK_WIDTH_CNTL, data); + + if (!disable_clkreq) { + struct pci_dev *root = adev->pdev->bus->self; + u32 lnkcap; + + clk_req_support = false; + pcie_capability_read_dword(root, PCI_EXP_LNKCAP, &lnkcap); + if (lnkcap & PCI_EXP_LNKCAP_CLKPM) + clk_req_support = true; + } else { + clk_req_support = false; + } + + if (clk_req_support) { + orig = data = RREG32_PCIE(ixPCIE_LC_CNTL2); + data |= PCIE_LC_CNTL2__LC_ALLOW_PDWN_IN_L1_MASK | + PCIE_LC_CNTL2__LC_ALLOW_PDWN_IN_L23_MASK; + if (orig != data) + WREG32_PCIE(ixPCIE_LC_CNTL2, data); + + orig = data = RREG32_SMC(ixTHM_CLK_CNTL); + data &= ~(THM_CLK_CNTL__CMON_CLK_SEL_MASK | + THM_CLK_CNTL__TMON_CLK_SEL_MASK); + data |= (1 << THM_CLK_CNTL__CMON_CLK_SEL__SHIFT) | + (1 << THM_CLK_CNTL__TMON_CLK_SEL__SHIFT); + if (orig != data) + WREG32_SMC(ixTHM_CLK_CNTL, data); + + orig = data = RREG32_SMC(ixMISC_CLK_CTRL); + data &= ~(MISC_CLK_CTRL__DEEP_SLEEP_CLK_SEL_MASK | + MISC_CLK_CTRL__ZCLK_SEL_MASK); + data |= (1 << MISC_CLK_CTRL__DEEP_SLEEP_CLK_SEL__SHIFT) | + (1 << MISC_CLK_CTRL__ZCLK_SEL__SHIFT); + if (orig != data) + WREG32_SMC(ixMISC_CLK_CTRL, data); + + orig = data = RREG32_SMC(ixCG_CLKPIN_CNTL); + data &= ~CG_CLKPIN_CNTL__BCLK_AS_XCLK_MASK; + if (orig != data) + WREG32_SMC(ixCG_CLKPIN_CNTL, data); + + orig = data = RREG32_SMC(ixCG_CLKPIN_CNTL_2); + data &= ~CG_CLKPIN_CNTL_2__FORCE_BIF_REFCLK_EN_MASK; + if (orig != data) + WREG32_SMC(ixCG_CLKPIN_CNTL_2, data); + + orig = data = RREG32_SMC(ixMPLL_BYPASSCLK_SEL); + data &= ~MPLL_BYPASSCLK_SEL__MPLL_CLKOUT_SEL_MASK; + data |= (4 << MPLL_BYPASSCLK_SEL__MPLL_CLKOUT_SEL__SHIFT); + if (orig != data) + WREG32_SMC(ixMPLL_BYPASSCLK_SEL, data); + } + } + } else { + if (orig != data) + WREG32_PCIE(ixPCIE_LC_CNTL, data); + } + + orig = data = RREG32_PCIE(ixPCIE_CNTL2); + data |= PCIE_CNTL2__SLV_MEM_LS_EN_MASK | + PCIE_CNTL2__MST_MEM_LS_EN_MASK | + PCIE_CNTL2__REPLAY_MEM_LS_EN_MASK; + if (orig != data) + WREG32_PCIE(ixPCIE_CNTL2, data); + + if (!disable_l0s) { + data = RREG32_PCIE(ixPCIE_LC_N_FTS_CNTL); + if ((data & PCIE_LC_N_FTS_CNTL__LC_N_FTS_MASK) == + PCIE_LC_N_FTS_CNTL__LC_N_FTS_MASK) { + data = RREG32_PCIE(ixPCIE_LC_STATUS1); + if ((data & PCIE_LC_STATUS1__LC_REVERSE_XMIT_MASK) && + (data & PCIE_LC_STATUS1__LC_REVERSE_RCVR_MASK)) { + orig = data = RREG32_PCIE(ixPCIE_LC_CNTL); + data &= ~PCIE_LC_CNTL__LC_L0S_INACTIVITY_MASK; + if (orig != data) + WREG32_PCIE(ixPCIE_LC_CNTL, data); + } + } + } +} + +static uint32_t cik_get_rev_id(struct amdgpu_device *adev) +{ + return (RREG32(mmCC_DRM_ID_STRAPS) & CC_DRM_ID_STRAPS__ATI_REV_ID_MASK) + >> CC_DRM_ID_STRAPS__ATI_REV_ID__SHIFT; +} + +static const struct amdgpu_ip_block_version bonaire_ip_blocks[] = +{ + /* ORDER MATTERS! */ + { + .type = AMDGPU_IP_BLOCK_TYPE_COMMON, + .major = 1, + .minor = 0, + .rev = 0, + .funcs = &cik_common_ip_funcs, + }, + { + .type = AMDGPU_IP_BLOCK_TYPE_GMC, + .major = 7, + .minor = 0, + .rev = 0, + .funcs = &gmc_v7_0_ip_funcs, + }, + { + .type = AMDGPU_IP_BLOCK_TYPE_IH, + .major = 2, + .minor = 0, + .rev = 0, + .funcs = &cik_ih_ip_funcs, + }, + { + .type = AMDGPU_IP_BLOCK_TYPE_SMC, + .major = 7, + .minor = 0, + .rev = 0, + .funcs = &ci_dpm_ip_funcs, + }, + { + .type = AMDGPU_IP_BLOCK_TYPE_DCE, + .major = 8, + .minor = 2, + .rev = 0, + .funcs = &dce_v8_0_ip_funcs, + }, + { + .type = AMDGPU_IP_BLOCK_TYPE_GFX, + .major = 7, + .minor = 2, + .rev = 0, + .funcs = &gfx_v7_0_ip_funcs, + }, + { + .type = AMDGPU_IP_BLOCK_TYPE_SDMA, + .major = 2, + .minor = 0, + .rev = 0, + .funcs = &cik_sdma_ip_funcs, + }, + { + .type = AMDGPU_IP_BLOCK_TYPE_UVD, + .major = 4, + .minor = 2, + .rev = 0, + .funcs = &uvd_v4_2_ip_funcs, + }, + { + .type = AMDGPU_IP_BLOCK_TYPE_VCE, + .major = 2, + .minor = 0, + .rev = 0, + .funcs = &vce_v2_0_ip_funcs, + }, +}; + +static const struct amdgpu_ip_block_version hawaii_ip_blocks[] = +{ + /* ORDER MATTERS! */ + { + .type = AMDGPU_IP_BLOCK_TYPE_COMMON, + .major = 1, + .minor = 0, + .rev = 0, + .funcs = &cik_common_ip_funcs, + }, + { + .type = AMDGPU_IP_BLOCK_TYPE_GMC, + .major = 7, + .minor = 0, + .rev = 0, + .funcs = &gmc_v7_0_ip_funcs, + }, + { + .type = AMDGPU_IP_BLOCK_TYPE_IH, + .major = 2, + .minor = 0, + .rev = 0, + .funcs = &cik_ih_ip_funcs, + }, + { + .type = AMDGPU_IP_BLOCK_TYPE_SMC, + .major = 7, + .minor = 0, + .rev = 0, + .funcs = &ci_dpm_ip_funcs, + }, + { + .type = AMDGPU_IP_BLOCK_TYPE_DCE, + .major = 8, + .minor = 5, + .rev = 0, + .funcs = &dce_v8_0_ip_funcs, + }, + { + .type = AMDGPU_IP_BLOCK_TYPE_GFX, + .major = 7, + .minor = 3, + .rev = 0, + .funcs = &gfx_v7_0_ip_funcs, + }, + { + .type = AMDGPU_IP_BLOCK_TYPE_SDMA, + .major = 2, + .minor = 0, + .rev = 0, + .funcs = &cik_sdma_ip_funcs, + }, + { + .type = AMDGPU_IP_BLOCK_TYPE_UVD, + .major = 4, + .minor = 2, + .rev = 0, + .funcs = &uvd_v4_2_ip_funcs, + }, + { + .type = AMDGPU_IP_BLOCK_TYPE_VCE, + .major = 2, + .minor = 0, + .rev = 0, + .funcs = &vce_v2_0_ip_funcs, + }, +}; + +static const struct amdgpu_ip_block_version kabini_ip_blocks[] = +{ + /* ORDER MATTERS! */ + { + .type = AMDGPU_IP_BLOCK_TYPE_COMMON, + .major = 1, + .minor = 0, + .rev = 0, + .funcs = &cik_common_ip_funcs, + }, + { + .type = AMDGPU_IP_BLOCK_TYPE_GMC, + .major = 7, + .minor = 0, + .rev = 0, + .funcs = &gmc_v7_0_ip_funcs, + }, + { + .type = AMDGPU_IP_BLOCK_TYPE_IH, + .major = 2, + .minor = 0, + .rev = 0, + .funcs = &cik_ih_ip_funcs, + }, + { + .type = AMDGPU_IP_BLOCK_TYPE_SMC, + .major = 7, + .minor = 0, + .rev = 0, + .funcs = &kv_dpm_ip_funcs, + }, + { + .type = AMDGPU_IP_BLOCK_TYPE_DCE, + .major = 8, + .minor = 3, + .rev = 0, + .funcs = &dce_v8_0_ip_funcs, + }, + { + .type = AMDGPU_IP_BLOCK_TYPE_GFX, + .major = 7, + .minor = 2, + .rev = 0, + .funcs = &gfx_v7_0_ip_funcs, + }, + { + .type = AMDGPU_IP_BLOCK_TYPE_SDMA, + .major = 2, + .minor = 0, + .rev = 0, + .funcs = &cik_sdma_ip_funcs, + }, + { + .type = AMDGPU_IP_BLOCK_TYPE_UVD, + .major = 4, + .minor = 2, + .rev = 0, + .funcs = &uvd_v4_2_ip_funcs, + }, + { + .type = AMDGPU_IP_BLOCK_TYPE_VCE, + .major = 2, + .minor = 0, + .rev = 0, + .funcs = &vce_v2_0_ip_funcs, + }, +}; + +static const struct amdgpu_ip_block_version mullins_ip_blocks[] = +{ + /* ORDER MATTERS! */ + { + .type = AMDGPU_IP_BLOCK_TYPE_COMMON, + .major = 1, + .minor = 0, + .rev = 0, + .funcs = &cik_common_ip_funcs, + }, + { + .type = AMDGPU_IP_BLOCK_TYPE_GMC, + .major = 7, + .minor = 0, + .rev = 0, + .funcs = &gmc_v7_0_ip_funcs, + }, + { + .type = AMDGPU_IP_BLOCK_TYPE_IH, + .major = 2, + .minor = 0, + .rev = 0, + .funcs = &cik_ih_ip_funcs, + }, + { + .type = AMDGPU_IP_BLOCK_TYPE_SMC, + .major = 7, + .minor = 0, + .rev = 0, + .funcs = &kv_dpm_ip_funcs, + }, + { + .type = AMDGPU_IP_BLOCK_TYPE_DCE, + .major = 8, + .minor = 3, + .rev = 0, + .funcs = &dce_v8_0_ip_funcs, + }, + { + .type = AMDGPU_IP_BLOCK_TYPE_GFX, + .major = 7, + .minor = 2, + .rev = 0, + .funcs = &gfx_v7_0_ip_funcs, + }, + { + .type = AMDGPU_IP_BLOCK_TYPE_SDMA, + .major = 2, + .minor = 0, + .rev = 0, + .funcs = &cik_sdma_ip_funcs, + }, + { + .type = AMDGPU_IP_BLOCK_TYPE_UVD, + .major = 4, + .minor = 2, + .rev = 0, + .funcs = &uvd_v4_2_ip_funcs, + }, + { + .type = AMDGPU_IP_BLOCK_TYPE_VCE, + .major = 2, + .minor = 0, + .rev = 0, + .funcs = &vce_v2_0_ip_funcs, + }, +}; + +static const struct amdgpu_ip_block_version kaveri_ip_blocks[] = +{ + /* ORDER MATTERS! */ + { + .type = AMDGPU_IP_BLOCK_TYPE_COMMON, + .major = 1, + .minor = 0, + .rev = 0, + .funcs = &cik_common_ip_funcs, + }, + { + .type = AMDGPU_IP_BLOCK_TYPE_GMC, + .major = 7, + .minor = 0, + .rev = 0, + .funcs = &gmc_v7_0_ip_funcs, + }, + { + .type = AMDGPU_IP_BLOCK_TYPE_IH, + .major = 2, + .minor = 0, + .rev = 0, + .funcs = &cik_ih_ip_funcs, + }, + { + .type = AMDGPU_IP_BLOCK_TYPE_SMC, + .major = 7, + .minor = 0, + .rev = 0, + .funcs = &kv_dpm_ip_funcs, + }, + { + .type = AMDGPU_IP_BLOCK_TYPE_DCE, + .major = 8, + .minor = 1, + .rev = 0, + .funcs = &dce_v8_0_ip_funcs, + }, + { + .type = AMDGPU_IP_BLOCK_TYPE_GFX, + .major = 7, + .minor = 1, + .rev = 0, + .funcs = &gfx_v7_0_ip_funcs, + }, + { + .type = AMDGPU_IP_BLOCK_TYPE_SDMA, + .major = 2, + .minor = 0, + .rev = 0, + .funcs = &cik_sdma_ip_funcs, + }, + { + .type = AMDGPU_IP_BLOCK_TYPE_UVD, + .major = 4, + .minor = 2, + .rev = 0, + .funcs = &uvd_v4_2_ip_funcs, + }, + { + .type = AMDGPU_IP_BLOCK_TYPE_VCE, + .major = 2, + .minor = 0, + .rev = 0, + .funcs = &vce_v2_0_ip_funcs, + }, +}; + +int cik_set_ip_blocks(struct amdgpu_device *adev) +{ + switch (adev->asic_type) { + case CHIP_BONAIRE: + adev->ip_blocks = bonaire_ip_blocks; + adev->num_ip_blocks = ARRAY_SIZE(bonaire_ip_blocks); + break; + case CHIP_HAWAII: + adev->ip_blocks = hawaii_ip_blocks; + adev->num_ip_blocks = ARRAY_SIZE(hawaii_ip_blocks); + break; + case CHIP_KAVERI: + adev->ip_blocks = kaveri_ip_blocks; + adev->num_ip_blocks = ARRAY_SIZE(kaveri_ip_blocks); + break; + case CHIP_KABINI: + adev->ip_blocks = kabini_ip_blocks; + adev->num_ip_blocks = ARRAY_SIZE(kabini_ip_blocks); + break; + case CHIP_MULLINS: + adev->ip_blocks = mullins_ip_blocks; + adev->num_ip_blocks = ARRAY_SIZE(mullins_ip_blocks); + break; + default: + /* FIXME: not supported yet */ + return -EINVAL; + } + + adev->ip_block_enabled = kcalloc(adev->num_ip_blocks, sizeof(bool), GFP_KERNEL); + if (adev->ip_block_enabled == NULL) + return -ENOMEM; + + return 0; +} + +static const struct amdgpu_asic_funcs cik_asic_funcs = +{ + .read_disabled_bios = &cik_read_disabled_bios, + .read_register = &cik_read_register, + .reset = &cik_asic_reset, + .set_vga_state = &cik_vga_set_state, + .get_xclk = &cik_get_xclk, + .set_uvd_clocks = &cik_set_uvd_clocks, + .set_vce_clocks = &cik_set_vce_clocks, + .get_cu_info = &gfx_v7_0_get_cu_info, + /* these should be moved to their own ip modules */ + .get_gpu_clock_counter = &gfx_v7_0_get_gpu_clock_counter, + .wait_for_mc_idle = &gmc_v7_0_mc_wait_for_idle, +}; + +static int cik_common_early_init(struct amdgpu_device *adev) +{ + adev->smc_rreg = &cik_smc_rreg; + adev->smc_wreg = &cik_smc_wreg; + adev->pcie_rreg = &cik_pcie_rreg; + adev->pcie_wreg = &cik_pcie_wreg; + adev->uvd_ctx_rreg = &cik_uvd_ctx_rreg; + adev->uvd_ctx_wreg = &cik_uvd_ctx_wreg; + adev->didt_rreg = &cik_didt_rreg; + adev->didt_wreg = &cik_didt_wreg; + + adev->asic_funcs = &cik_asic_funcs; + + adev->has_uvd = true; + + adev->rev_id = cik_get_rev_id(adev); + adev->external_rev_id = 0xFF; + switch (adev->asic_type) { + case CHIP_BONAIRE: + adev->cg_flags = + AMDGPU_CG_SUPPORT_GFX_MGCG | + AMDGPU_CG_SUPPORT_GFX_MGLS | + /*AMDGPU_CG_SUPPORT_GFX_CGCG |*/ + AMDGPU_CG_SUPPORT_GFX_CGLS | + AMDGPU_CG_SUPPORT_GFX_CGTS | + AMDGPU_CG_SUPPORT_GFX_CGTS_LS | + AMDGPU_CG_SUPPORT_GFX_CP_LS | + AMDGPU_CG_SUPPORT_MC_LS | + AMDGPU_CG_SUPPORT_MC_MGCG | + AMDGPU_CG_SUPPORT_SDMA_MGCG | + AMDGPU_CG_SUPPORT_SDMA_LS | + AMDGPU_CG_SUPPORT_BIF_LS | + AMDGPU_CG_SUPPORT_VCE_MGCG | + AMDGPU_CG_SUPPORT_UVD_MGCG | + AMDGPU_CG_SUPPORT_HDP_LS | + AMDGPU_CG_SUPPORT_HDP_MGCG; + adev->pg_flags = 0; + adev->external_rev_id = adev->rev_id + 0x14; + break; + case CHIP_HAWAII: + adev->cg_flags = + AMDGPU_CG_SUPPORT_GFX_MGCG | + AMDGPU_CG_SUPPORT_GFX_MGLS | + /*AMDGPU_CG_SUPPORT_GFX_CGCG |*/ + AMDGPU_CG_SUPPORT_GFX_CGLS | + AMDGPU_CG_SUPPORT_GFX_CGTS | + AMDGPU_CG_SUPPORT_GFX_CP_LS | + AMDGPU_CG_SUPPORT_MC_LS | + AMDGPU_CG_SUPPORT_MC_MGCG | + AMDGPU_CG_SUPPORT_SDMA_MGCG | + AMDGPU_CG_SUPPORT_SDMA_LS | + AMDGPU_CG_SUPPORT_BIF_LS | + AMDGPU_CG_SUPPORT_VCE_MGCG | + AMDGPU_CG_SUPPORT_UVD_MGCG | + AMDGPU_CG_SUPPORT_HDP_LS | + AMDGPU_CG_SUPPORT_HDP_MGCG; + adev->pg_flags = 0; + adev->external_rev_id = 0x28; + break; + case CHIP_KAVERI: + adev->cg_flags = + AMDGPU_CG_SUPPORT_GFX_MGCG | + AMDGPU_CG_SUPPORT_GFX_MGLS | + /*AMDGPU_CG_SUPPORT_GFX_CGCG |*/ + AMDGPU_CG_SUPPORT_GFX_CGLS | + AMDGPU_CG_SUPPORT_GFX_CGTS | + AMDGPU_CG_SUPPORT_GFX_CGTS_LS | + AMDGPU_CG_SUPPORT_GFX_CP_LS | + AMDGPU_CG_SUPPORT_SDMA_MGCG | + AMDGPU_CG_SUPPORT_SDMA_LS | + AMDGPU_CG_SUPPORT_BIF_LS | + AMDGPU_CG_SUPPORT_VCE_MGCG | + AMDGPU_CG_SUPPORT_UVD_MGCG | + AMDGPU_CG_SUPPORT_HDP_LS | + AMDGPU_CG_SUPPORT_HDP_MGCG; + adev->pg_flags = + /*AMDGPU_PG_SUPPORT_GFX_PG | + AMDGPU_PG_SUPPORT_GFX_SMG | + AMDGPU_PG_SUPPORT_GFX_DMG |*/ + AMDGPU_PG_SUPPORT_UVD | + /*AMDGPU_PG_SUPPORT_VCE | + AMDGPU_PG_SUPPORT_CP | + AMDGPU_PG_SUPPORT_GDS | + AMDGPU_PG_SUPPORT_RLC_SMU_HS | + AMDGPU_PG_SUPPORT_ACP | + AMDGPU_PG_SUPPORT_SAMU |*/ + 0; + if (adev->pdev->device == 0x1312 || + adev->pdev->device == 0x1316 || + adev->pdev->device == 0x1317) + adev->external_rev_id = 0x41; + else + adev->external_rev_id = 0x1; + break; + case CHIP_KABINI: + case CHIP_MULLINS: + adev->cg_flags = + AMDGPU_CG_SUPPORT_GFX_MGCG | + AMDGPU_CG_SUPPORT_GFX_MGLS | + /*AMDGPU_CG_SUPPORT_GFX_CGCG |*/ + AMDGPU_CG_SUPPORT_GFX_CGLS | + AMDGPU_CG_SUPPORT_GFX_CGTS | + AMDGPU_CG_SUPPORT_GFX_CGTS_LS | + AMDGPU_CG_SUPPORT_GFX_CP_LS | + AMDGPU_CG_SUPPORT_SDMA_MGCG | + AMDGPU_CG_SUPPORT_SDMA_LS | + AMDGPU_CG_SUPPORT_BIF_LS | + AMDGPU_CG_SUPPORT_VCE_MGCG | + AMDGPU_CG_SUPPORT_UVD_MGCG | + AMDGPU_CG_SUPPORT_HDP_LS | + AMDGPU_CG_SUPPORT_HDP_MGCG; + adev->pg_flags = + /*AMDGPU_PG_SUPPORT_GFX_PG | + AMDGPU_PG_SUPPORT_GFX_SMG | */ + AMDGPU_PG_SUPPORT_UVD | + /*AMDGPU_PG_SUPPORT_VCE | + AMDGPU_PG_SUPPORT_CP | + AMDGPU_PG_SUPPORT_GDS | + AMDGPU_PG_SUPPORT_RLC_SMU_HS | + AMDGPU_PG_SUPPORT_SAMU |*/ + 0; + if (adev->asic_type == CHIP_KABINI) { + if (adev->rev_id == 0) + adev->external_rev_id = 0x81; + else if (adev->rev_id == 1) + adev->external_rev_id = 0x82; + else if (adev->rev_id == 2) + adev->external_rev_id = 0x85; + } else + adev->external_rev_id = adev->rev_id + 0xa1; + break; + default: + /* FIXME: not supported yet */ + return -EINVAL; + } + + return 0; +} + +static int cik_common_sw_init(struct amdgpu_device *adev) +{ + return 0; +} + +static int cik_common_sw_fini(struct amdgpu_device *adev) +{ + return 0; +} + +static int cik_common_hw_init(struct amdgpu_device *adev) +{ + /* move the golden regs per IP block */ + cik_init_golden_registers(adev); + /* enable pcie gen2/3 link */ + cik_pcie_gen3_enable(adev); + /* enable aspm */ + cik_program_aspm(adev); + + return 0; +} + +static int cik_common_hw_fini(struct amdgpu_device *adev) +{ + return 0; +} + +static int cik_common_suspend(struct amdgpu_device *adev) +{ + return cik_common_hw_fini(adev); +} + +static int cik_common_resume(struct amdgpu_device *adev) +{ + return cik_common_hw_init(adev); +} + +static bool cik_common_is_idle(struct amdgpu_device *adev) +{ + return true; +} + +static int cik_common_wait_for_idle(struct amdgpu_device *adev) +{ + return 0; +} + +static void cik_common_print_status(struct amdgpu_device *adev) +{ + +} + +static int cik_common_soft_reset(struct amdgpu_device *adev) +{ + /* XXX hard reset?? */ + return 0; +} + +static int cik_common_set_clockgating_state(struct amdgpu_device *adev, + enum amdgpu_clockgating_state state) +{ + return 0; +} + +static int cik_common_set_powergating_state(struct amdgpu_device *adev, + enum amdgpu_powergating_state state) +{ + return 0; +} + +const struct amdgpu_ip_funcs cik_common_ip_funcs = { + .early_init = cik_common_early_init, + .late_init = NULL, + .sw_init = cik_common_sw_init, + .sw_fini = cik_common_sw_fini, + .hw_init = cik_common_hw_init, + .hw_fini = cik_common_hw_fini, + .suspend = cik_common_suspend, + .resume = cik_common_resume, + .is_idle = cik_common_is_idle, + .wait_for_idle = cik_common_wait_for_idle, + .soft_reset = cik_common_soft_reset, + .print_status = cik_common_print_status, + .set_clockgating_state = cik_common_set_clockgating_state, + .set_powergating_state = cik_common_set_powergating_state, +}; diff --git a/drivers/gpu/drm/amd/amdgpu/cik.h b/drivers/gpu/drm/amd/amdgpu/cik.h new file mode 100644 index 000000000000..967d630a4dcb --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/cik.h @@ -0,0 +1,33 @@ +/* + * Copyright 2014 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef __CIK_H__ +#define __CIK_H__ + +extern const struct amdgpu_ip_funcs cik_common_ip_funcs; + +void cik_srbm_select(struct amdgpu_device *adev, + u32 me, u32 pipe, u32 queue, u32 vmid); +int cik_set_ip_blocks(struct amdgpu_device *adev); + +#endif diff --git a/drivers/gpu/drm/amd/amdgpu/cik_dpm.h b/drivers/gpu/drm/amd/amdgpu/cik_dpm.h new file mode 100644 index 000000000000..35d8efdcde7e --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/cik_dpm.h @@ -0,0 +1,30 @@ +/* + * Copyright 2014 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef __CIK_DPM_H__ +#define __CIK_DPM_H__ + +extern const struct amdgpu_ip_funcs ci_dpm_ip_funcs; +extern const struct amdgpu_ip_funcs kv_dpm_ip_funcs; + +#endif diff --git a/drivers/gpu/drm/amd/amdgpu/cik_ih.c b/drivers/gpu/drm/amd/amdgpu/cik_ih.c new file mode 100644 index 000000000000..81e8bbaba3e8 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/cik_ih.c @@ -0,0 +1,453 @@ +/* + * Copyright 2012 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#include "drmP.h" +#include "amdgpu.h" +#include "amdgpu_ih.h" +#include "cikd.h" + +#include "bif/bif_4_1_d.h" +#include "bif/bif_4_1_sh_mask.h" + +#include "oss/oss_2_0_d.h" +#include "oss/oss_2_0_sh_mask.h" + +/* + * Interrupts + * Starting with r6xx, interrupts are handled via a ring buffer. + * Ring buffers are areas of GPU accessible memory that the GPU + * writes interrupt vectors into and the host reads vectors out of. + * There is a rptr (read pointer) that determines where the + * host is currently reading, and a wptr (write pointer) + * which determines where the GPU has written. When the + * pointers are equal, the ring is idle. When the GPU + * writes vectors to the ring buffer, it increments the + * wptr. When there is an interrupt, the host then starts + * fetching commands and processing them until the pointers are + * equal again at which point it updates the rptr. + */ + +static void cik_ih_set_interrupt_funcs(struct amdgpu_device *adev); + +/** + * cik_ih_enable_interrupts - Enable the interrupt ring buffer + * + * @adev: amdgpu_device pointer + * + * Enable the interrupt ring buffer (CIK). + */ +static void cik_ih_enable_interrupts(struct amdgpu_device *adev) +{ + u32 ih_cntl = RREG32(mmIH_CNTL); + u32 ih_rb_cntl = RREG32(mmIH_RB_CNTL); + + ih_cntl |= IH_CNTL__ENABLE_INTR_MASK; + ih_rb_cntl |= IH_RB_CNTL__RB_ENABLE_MASK; + WREG32(mmIH_CNTL, ih_cntl); + WREG32(mmIH_RB_CNTL, ih_rb_cntl); + adev->irq.ih.enabled = true; +} + +/** + * cik_ih_disable_interrupts - Disable the interrupt ring buffer + * + * @adev: amdgpu_device pointer + * + * Disable the interrupt ring buffer (CIK). + */ +static void cik_ih_disable_interrupts(struct amdgpu_device *adev) +{ + u32 ih_rb_cntl = RREG32(mmIH_RB_CNTL); + u32 ih_cntl = RREG32(mmIH_CNTL); + + ih_rb_cntl &= ~IH_RB_CNTL__RB_ENABLE_MASK; + ih_cntl &= ~IH_CNTL__ENABLE_INTR_MASK; + WREG32(mmIH_RB_CNTL, ih_rb_cntl); + WREG32(mmIH_CNTL, ih_cntl); + /* set rptr, wptr to 0 */ + WREG32(mmIH_RB_RPTR, 0); + WREG32(mmIH_RB_WPTR, 0); + adev->irq.ih.enabled = false; + adev->irq.ih.rptr = 0; +} + +/** + * cik_ih_irq_init - init and enable the interrupt ring + * + * @adev: amdgpu_device pointer + * + * Allocate a ring buffer for the interrupt controller, + * enable the RLC, disable interrupts, enable the IH + * ring buffer and enable it (CIK). + * Called at device load and reume. + * Returns 0 for success, errors for failure. + */ +static int cik_ih_irq_init(struct amdgpu_device *adev) +{ + int ret = 0; + int rb_bufsz; + u32 interrupt_cntl, ih_cntl, ih_rb_cntl; + u64 wptr_off; + + /* disable irqs */ + cik_ih_disable_interrupts(adev); + + /* setup interrupt control */ + WREG32(mmINTERRUPT_CNTL2, adev->dummy_page.addr >> 8); + interrupt_cntl = RREG32(mmINTERRUPT_CNTL); + /* INTERRUPT_CNTL__IH_DUMMY_RD_OVERRIDE_MASK=0 - dummy read disabled with msi, enabled without msi + * INTERRUPT_CNTL__IH_DUMMY_RD_OVERRIDE_MASK=1 - dummy read controlled by IH_DUMMY_RD_EN + */ + interrupt_cntl &= ~INTERRUPT_CNTL__IH_DUMMY_RD_OVERRIDE_MASK; + /* INTERRUPT_CNTL__IH_REQ_NONSNOOP_EN_MASK=1 if ring is in non-cacheable memory, e.g., vram */ + interrupt_cntl &= ~INTERRUPT_CNTL__IH_REQ_NONSNOOP_EN_MASK; + WREG32(mmINTERRUPT_CNTL, interrupt_cntl); + + WREG32(mmIH_RB_BASE, adev->irq.ih.gpu_addr >> 8); + rb_bufsz = order_base_2(adev->irq.ih.ring_size / 4); + + ih_rb_cntl = (IH_RB_CNTL__WPTR_OVERFLOW_ENABLE_MASK | + IH_RB_CNTL__WPTR_OVERFLOW_CLEAR_MASK | + (rb_bufsz << 1)); + + ih_rb_cntl |= IH_RB_CNTL__WPTR_WRITEBACK_ENABLE_MASK; + + /* set the writeback address whether it's enabled or not */ + wptr_off = adev->wb.gpu_addr + (adev->irq.ih.wptr_offs * 4); + WREG32(mmIH_RB_WPTR_ADDR_LO, lower_32_bits(wptr_off)); + WREG32(mmIH_RB_WPTR_ADDR_HI, upper_32_bits(wptr_off) & 0xFF); + + WREG32(mmIH_RB_CNTL, ih_rb_cntl); + + /* set rptr, wptr to 0 */ + WREG32(mmIH_RB_RPTR, 0); + WREG32(mmIH_RB_WPTR, 0); + + /* Default settings for IH_CNTL (disabled at first) */ + ih_cntl = (0x10 << IH_CNTL__MC_WRREQ_CREDIT__SHIFT) | + (0x10 << IH_CNTL__MC_WR_CLEAN_CNT__SHIFT) | + (0 << IH_CNTL__MC_VMID__SHIFT); + /* IH_CNTL__RPTR_REARM_MASK only works if msi's are enabled */ + if (adev->irq.msi_enabled) + ih_cntl |= IH_CNTL__RPTR_REARM_MASK; + WREG32(mmIH_CNTL, ih_cntl); + + pci_set_master(adev->pdev); + + /* enable irqs */ + cik_ih_enable_interrupts(adev); + + return ret; +} + +/** + * cik_ih_irq_disable - disable interrupts + * + * @adev: amdgpu_device pointer + * + * Disable interrupts on the hw (CIK). + */ +static void cik_ih_irq_disable(struct amdgpu_device *adev) +{ + cik_ih_disable_interrupts(adev); + /* Wait and acknowledge irq */ + mdelay(1); +} + +/** + * cik_ih_get_wptr - get the IH ring buffer wptr + * + * @adev: amdgpu_device pointer + * + * Get the IH ring buffer wptr from either the register + * or the writeback memory buffer (CIK). Also check for + * ring buffer overflow and deal with it. + * Used by cik_irq_process(). + * Returns the value of the wptr. + */ +static u32 cik_ih_get_wptr(struct amdgpu_device *adev) +{ + u32 wptr, tmp; + + wptr = le32_to_cpu(adev->wb.wb[adev->irq.ih.wptr_offs]); + + if (wptr & IH_RB_WPTR__RB_OVERFLOW_MASK) { + wptr &= ~IH_RB_WPTR__RB_OVERFLOW_MASK; + /* When a ring buffer overflow happen start parsing interrupt + * from the last not overwritten vector (wptr + 16). Hopefully + * this should allow us to catchup. + */ + dev_warn(adev->dev, "IH ring buffer overflow (0x%08X, 0x%08X, 0x%08X)\n", + wptr, adev->irq.ih.rptr, (wptr + 16) & adev->irq.ih.ptr_mask); + adev->irq.ih.rptr = (wptr + 16) & adev->irq.ih.ptr_mask; + tmp = RREG32(mmIH_RB_CNTL); + tmp |= IH_RB_CNTL__WPTR_OVERFLOW_CLEAR_MASK; + WREG32(mmIH_RB_CNTL, tmp); + } + return (wptr & adev->irq.ih.ptr_mask); +} + +/* CIK IV Ring + * Each IV ring entry is 128 bits: + * [7:0] - interrupt source id + * [31:8] - reserved + * [59:32] - interrupt source data + * [63:60] - reserved + * [71:64] - RINGID + * CP: + * ME_ID [1:0], PIPE_ID[1:0], QUEUE_ID[2:0] + * QUEUE_ID - for compute, which of the 8 queues owned by the dispatcher + * - for gfx, hw shader state (0=PS...5=LS, 6=CS) + * ME_ID - 0 = gfx, 1 = first 4 CS pipes, 2 = second 4 CS pipes + * PIPE_ID - ME0 0=3D + * - ME1&2 compute dispatcher (4 pipes each) + * SDMA: + * INSTANCE_ID [1:0], QUEUE_ID[1:0] + * INSTANCE_ID - 0 = sdma0, 1 = sdma1 + * QUEUE_ID - 0 = gfx, 1 = rlc0, 2 = rlc1 + * [79:72] - VMID + * [95:80] - PASID + * [127:96] - reserved + */ + + /** + * cik_ih_decode_iv - decode an interrupt vector + * + * @adev: amdgpu_device pointer + * + * Decodes the interrupt vector at the current rptr + * position and also advance the position. + */ +static void cik_ih_decode_iv(struct amdgpu_device *adev, + struct amdgpu_iv_entry *entry) +{ + /* wptr/rptr are in bytes! */ + u32 ring_index = adev->irq.ih.rptr >> 2; + uint32_t dw[4]; + + dw[0] = le32_to_cpu(adev->irq.ih.ring[ring_index + 0]); + dw[1] = le32_to_cpu(adev->irq.ih.ring[ring_index + 1]); + dw[2] = le32_to_cpu(adev->irq.ih.ring[ring_index + 2]); + dw[3] = le32_to_cpu(adev->irq.ih.ring[ring_index + 3]); + + entry->src_id = dw[0] & 0xff; + entry->src_data = dw[1] & 0xfffffff; + entry->ring_id = dw[2] & 0xff; + entry->vm_id = (dw[2] >> 8) & 0xff; + entry->pas_id = (dw[2] >> 16) & 0xffff; + + /* wptr/rptr are in bytes! */ + adev->irq.ih.rptr += 16; +} + +/** + * cik_ih_set_rptr - set the IH ring buffer rptr + * + * @adev: amdgpu_device pointer + * + * Set the IH ring buffer rptr. + */ +static void cik_ih_set_rptr(struct amdgpu_device *adev) +{ + WREG32(mmIH_RB_RPTR, adev->irq.ih.rptr); +} + +static int cik_ih_early_init(struct amdgpu_device *adev) +{ + cik_ih_set_interrupt_funcs(adev); + + return 0; +} + +static int cik_ih_sw_init(struct amdgpu_device *adev) +{ + int r; + + r = amdgpu_ih_ring_init(adev, 64 * 1024, false); + if (r) + return r; + + r = amdgpu_irq_init(adev); + + return r; +} + +static int cik_ih_sw_fini(struct amdgpu_device *adev) +{ + amdgpu_irq_fini(adev); + amdgpu_ih_ring_fini(adev); + + return 0; +} + +static int cik_ih_hw_init(struct amdgpu_device *adev) +{ + int r; + + r = cik_ih_irq_init(adev); + if (r) + return r; + + return 0; +} + +static int cik_ih_hw_fini(struct amdgpu_device *adev) +{ + cik_ih_irq_disable(adev); + + return 0; +} + +static int cik_ih_suspend(struct amdgpu_device *adev) +{ + return cik_ih_hw_fini(adev); +} + +static int cik_ih_resume(struct amdgpu_device *adev) +{ + return cik_ih_hw_init(adev); +} + +static bool cik_ih_is_idle(struct amdgpu_device *adev) +{ + u32 tmp = RREG32(mmSRBM_STATUS); + + if (tmp & SRBM_STATUS__IH_BUSY_MASK) + return false; + + return true; +} + +static int cik_ih_wait_for_idle(struct amdgpu_device *adev) +{ + unsigned i; + u32 tmp; + + for (i = 0; i < adev->usec_timeout; i++) { + /* read MC_STATUS */ + tmp = RREG32(mmSRBM_STATUS) & SRBM_STATUS__IH_BUSY_MASK; + if (!tmp) + return 0; + udelay(1); + } + return -ETIMEDOUT; +} + +static void cik_ih_print_status(struct amdgpu_device *adev) +{ + dev_info(adev->dev, "CIK IH registers\n"); + dev_info(adev->dev, " SRBM_STATUS=0x%08X\n", + RREG32(mmSRBM_STATUS)); + dev_info(adev->dev, " SRBM_STATUS2=0x%08X\n", + RREG32(mmSRBM_STATUS2)); + dev_info(adev->dev, " INTERRUPT_CNTL=0x%08X\n", + RREG32(mmINTERRUPT_CNTL)); + dev_info(adev->dev, " INTERRUPT_CNTL2=0x%08X\n", + RREG32(mmINTERRUPT_CNTL2)); + dev_info(adev->dev, " IH_CNTL=0x%08X\n", + RREG32(mmIH_CNTL)); + dev_info(adev->dev, " IH_RB_CNTL=0x%08X\n", + RREG32(mmIH_RB_CNTL)); + dev_info(adev->dev, " IH_RB_BASE=0x%08X\n", + RREG32(mmIH_RB_BASE)); + dev_info(adev->dev, " IH_RB_WPTR_ADDR_LO=0x%08X\n", + RREG32(mmIH_RB_WPTR_ADDR_LO)); + dev_info(adev->dev, " IH_RB_WPTR_ADDR_HI=0x%08X\n", + RREG32(mmIH_RB_WPTR_ADDR_HI)); + dev_info(adev->dev, " IH_RB_RPTR=0x%08X\n", + RREG32(mmIH_RB_RPTR)); + dev_info(adev->dev, " IH_RB_WPTR=0x%08X\n", + RREG32(mmIH_RB_WPTR)); +} + +static int cik_ih_soft_reset(struct amdgpu_device *adev) +{ + u32 srbm_soft_reset = 0; + u32 tmp = RREG32(mmSRBM_STATUS); + + if (tmp & SRBM_STATUS__IH_BUSY_MASK) + srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_IH_MASK; + + if (srbm_soft_reset) { + cik_ih_print_status(adev); + + tmp = RREG32(mmSRBM_SOFT_RESET); + tmp |= srbm_soft_reset; + dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp); + WREG32(mmSRBM_SOFT_RESET, tmp); + tmp = RREG32(mmSRBM_SOFT_RESET); + + udelay(50); + + tmp &= ~srbm_soft_reset; + WREG32(mmSRBM_SOFT_RESET, tmp); + tmp = RREG32(mmSRBM_SOFT_RESET); + + /* Wait a little for things to settle down */ + udelay(50); + + cik_ih_print_status(adev); + } + + return 0; +} + +static int cik_ih_set_clockgating_state(struct amdgpu_device *adev, + enum amdgpu_clockgating_state state) +{ + return 0; +} + +static int cik_ih_set_powergating_state(struct amdgpu_device *adev, + enum amdgpu_powergating_state state) +{ + return 0; +} + +const struct amdgpu_ip_funcs cik_ih_ip_funcs = { + .early_init = cik_ih_early_init, + .late_init = NULL, + .sw_init = cik_ih_sw_init, + .sw_fini = cik_ih_sw_fini, + .hw_init = cik_ih_hw_init, + .hw_fini = cik_ih_hw_fini, + .suspend = cik_ih_suspend, + .resume = cik_ih_resume, + .is_idle = cik_ih_is_idle, + .wait_for_idle = cik_ih_wait_for_idle, + .soft_reset = cik_ih_soft_reset, + .print_status = cik_ih_print_status, + .set_clockgating_state = cik_ih_set_clockgating_state, + .set_powergating_state = cik_ih_set_powergating_state, +}; + +static const struct amdgpu_ih_funcs cik_ih_funcs = { + .get_wptr = cik_ih_get_wptr, + .decode_iv = cik_ih_decode_iv, + .set_rptr = cik_ih_set_rptr +}; + +static void cik_ih_set_interrupt_funcs(struct amdgpu_device *adev) +{ + if (adev->irq.ih_funcs == NULL) + adev->irq.ih_funcs = &cik_ih_funcs; +} diff --git a/drivers/gpu/drm/amd/amdgpu/cik_ih.h b/drivers/gpu/drm/amd/amdgpu/cik_ih.h new file mode 100644 index 000000000000..f70162525034 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/cik_ih.h @@ -0,0 +1,29 @@ +/* + * Copyright 2014 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef __CIK_IH_H__ +#define __CIK_IH_H__ + +extern const struct amdgpu_ip_funcs cik_ih_ip_funcs; + +#endif diff --git a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c new file mode 100644 index 000000000000..ae2bb26fa46e --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c @@ -0,0 +1,1422 @@ +/* + * Copyright 2013 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Alex Deucher + */ +#include +#include +#include "amdgpu.h" +#include "amdgpu_ucode.h" +#include "amdgpu_trace.h" +#include "cikd.h" +#include "cik.h" + +#include "bif/bif_4_1_d.h" +#include "bif/bif_4_1_sh_mask.h" + +#include "gca/gfx_7_2_d.h" + +#include "gmc/gmc_7_1_d.h" +#include "gmc/gmc_7_1_sh_mask.h" + +#include "oss/oss_2_0_d.h" +#include "oss/oss_2_0_sh_mask.h" + +static const u32 sdma_offsets[SDMA_MAX_INSTANCE] = +{ + SDMA0_REGISTER_OFFSET, + SDMA1_REGISTER_OFFSET +}; + +static void cik_sdma_set_ring_funcs(struct amdgpu_device *adev); +static void cik_sdma_set_irq_funcs(struct amdgpu_device *adev); +static void cik_sdma_set_buffer_funcs(struct amdgpu_device *adev); +static void cik_sdma_set_vm_pte_funcs(struct amdgpu_device *adev); + +MODULE_FIRMWARE("radeon/bonaire_sdma.bin"); +MODULE_FIRMWARE("radeon/bonaire_sdma1.bin"); +MODULE_FIRMWARE("radeon/hawaii_sdma.bin"); +MODULE_FIRMWARE("radeon/hawaii_sdma1.bin"); +MODULE_FIRMWARE("radeon/kaveri_sdma.bin"); +MODULE_FIRMWARE("radeon/kaveri_sdma1.bin"); +MODULE_FIRMWARE("radeon/kabini_sdma.bin"); +MODULE_FIRMWARE("radeon/kabini_sdma1.bin"); +MODULE_FIRMWARE("radeon/mullins_sdma.bin"); +MODULE_FIRMWARE("radeon/mullins_sdma1.bin"); + +u32 amdgpu_cik_gpu_check_soft_reset(struct amdgpu_device *adev); + +/* + * sDMA - System DMA + * Starting with CIK, the GPU has new asynchronous + * DMA engines. These engines are used for compute + * and gfx. There are two DMA engines (SDMA0, SDMA1) + * and each one supports 1 ring buffer used for gfx + * and 2 queues used for compute. + * + * The programming model is very similar to the CP + * (ring buffer, IBs, etc.), but sDMA has it's own + * packet format that is different from the PM4 format + * used by the CP. sDMA supports copying data, writing + * embedded data, solid fills, and a number of other + * things. It also has support for tiling/detiling of + * buffers. + */ + +/** + * cik_sdma_init_microcode - load ucode images from disk + * + * @adev: amdgpu_device pointer + * + * Use the firmware interface to load the ucode images into + * the driver (not loaded into hw). + * Returns 0 on success, error on failure. + */ +static int cik_sdma_init_microcode(struct amdgpu_device *adev) +{ + const char *chip_name; + char fw_name[30]; + int err, i; + + DRM_DEBUG("\n"); + + switch (adev->asic_type) { + case CHIP_BONAIRE: + chip_name = "bonaire"; + break; + case CHIP_HAWAII: + chip_name = "hawaii"; + break; + case CHIP_KAVERI: + chip_name = "kaveri"; + break; + case CHIP_KABINI: + chip_name = "kabini"; + break; + case CHIP_MULLINS: + chip_name = "mullins"; + break; + default: BUG(); + } + + for (i = 0; i < SDMA_MAX_INSTANCE; i++) { + if (i == 0) + snprintf(fw_name, sizeof(fw_name), "radeon/%s_sdma.bin", chip_name); + else + snprintf(fw_name, sizeof(fw_name), "radeon/%s_sdma1.bin", chip_name); + err = request_firmware(&adev->sdma[i].fw, fw_name, adev->dev); + if (err) + goto out; + err = amdgpu_ucode_validate(adev->sdma[i].fw); + } +out: + if (err) { + printk(KERN_ERR + "cik_sdma: Failed to load firmware \"%s\"\n", + fw_name); + for (i = 0; i < SDMA_MAX_INSTANCE; i++) { + release_firmware(adev->sdma[i].fw); + adev->sdma[i].fw = NULL; + } + } + return err; +} + +/** + * cik_sdma_ring_get_rptr - get the current read pointer + * + * @ring: amdgpu ring pointer + * + * Get the current rptr from the hardware (CIK+). + */ +static uint32_t cik_sdma_ring_get_rptr(struct amdgpu_ring *ring) +{ + u32 rptr; + + rptr = ring->adev->wb.wb[ring->rptr_offs]; + + return (rptr & 0x3fffc) >> 2; +} + +/** + * cik_sdma_ring_get_wptr - get the current write pointer + * + * @ring: amdgpu ring pointer + * + * Get the current wptr from the hardware (CIK+). + */ +static uint32_t cik_sdma_ring_get_wptr(struct amdgpu_ring *ring) +{ + struct amdgpu_device *adev = ring->adev; + u32 me = (ring == &adev->sdma[0].ring) ? 0 : 1; + + return (RREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[me]) & 0x3fffc) >> 2; +} + +/** + * cik_sdma_ring_set_wptr - commit the write pointer + * + * @ring: amdgpu ring pointer + * + * Write the wptr back to the hardware (CIK+). + */ +static void cik_sdma_ring_set_wptr(struct amdgpu_ring *ring) +{ + struct amdgpu_device *adev = ring->adev; + u32 me = (ring == &adev->sdma[0].ring) ? 0 : 1; + + WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[me], (ring->wptr << 2) & 0x3fffc); +} + +static void cik_sdma_hdp_flush_ring_emit(struct amdgpu_ring *); + +/** + * cik_sdma_ring_emit_ib - Schedule an IB on the DMA engine + * + * @ring: amdgpu ring pointer + * @ib: IB object to schedule + * + * Schedule an IB in the DMA ring (CIK). + */ +static void cik_sdma_ring_emit_ib(struct amdgpu_ring *ring, + struct amdgpu_ib *ib) +{ + u32 extra_bits = (ib->vm ? ib->vm->ids[ring->idx].id : 0) & 0xf; + u32 next_rptr = ring->wptr + 5; + + if (ib->flush_hdp_writefifo) + next_rptr += 6; + + while ((next_rptr & 7) != 4) + next_rptr++; + + next_rptr += 4; + amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_WRITE, SDMA_WRITE_SUB_OPCODE_LINEAR, 0)); + amdgpu_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc); + amdgpu_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xffffffff); + amdgpu_ring_write(ring, 1); /* number of DWs to follow */ + amdgpu_ring_write(ring, next_rptr); + + if (ib->flush_hdp_writefifo) { + /* flush HDP */ + cik_sdma_hdp_flush_ring_emit(ring); + } + + /* IB packet must end on a 8 DW boundary */ + while ((ring->wptr & 7) != 4) + amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_NOP, 0, 0)); + amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_INDIRECT_BUFFER, 0, extra_bits)); + amdgpu_ring_write(ring, ib->gpu_addr & 0xffffffe0); /* base must be 32 byte aligned */ + amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xffffffff); + amdgpu_ring_write(ring, ib->length_dw); + +} + +/** + * cik_sdma_hdp_flush_ring_emit - emit an hdp flush on the DMA ring + * + * @ring: amdgpu ring pointer + * + * Emit an hdp flush packet on the requested DMA ring. + */ +static void cik_sdma_hdp_flush_ring_emit(struct amdgpu_ring *ring) +{ + u32 extra_bits = (SDMA_POLL_REG_MEM_EXTRA_OP(1) | + SDMA_POLL_REG_MEM_EXTRA_FUNC(3)); /* == */ + u32 ref_and_mask; + + if (ring == &ring->adev->sdma[0].ring) + ref_and_mask = GPU_HDP_FLUSH_DONE__SDMA0_MASK; + else + ref_and_mask = GPU_HDP_FLUSH_DONE__SDMA1_MASK; + + amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_POLL_REG_MEM, 0, extra_bits)); + amdgpu_ring_write(ring, mmGPU_HDP_FLUSH_DONE << 2); + amdgpu_ring_write(ring, mmGPU_HDP_FLUSH_REQ << 2); + amdgpu_ring_write(ring, ref_and_mask); /* reference */ + amdgpu_ring_write(ring, ref_and_mask); /* mask */ + amdgpu_ring_write(ring, (0xfff << 16) | 10); /* retry count, poll interval */ +} + +/** + * cik_sdma_ring_emit_fence - emit a fence on the DMA ring + * + * @ring: amdgpu ring pointer + * @fence: amdgpu fence object + * + * Add a DMA fence packet to the ring to write + * the fence seq number and DMA trap packet to generate + * an interrupt if needed (CIK). + */ +static void cik_sdma_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq, + bool write64bit) +{ + /* write the fence */ + amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_FENCE, 0, 0)); + amdgpu_ring_write(ring, lower_32_bits(addr)); + amdgpu_ring_write(ring, upper_32_bits(addr)); + amdgpu_ring_write(ring, lower_32_bits(seq)); + + /* optionally write high bits as well */ + if (write64bit) { + addr += 4; + amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_FENCE, 0, 0)); + amdgpu_ring_write(ring, lower_32_bits(addr)); + amdgpu_ring_write(ring, upper_32_bits(addr)); + amdgpu_ring_write(ring, upper_32_bits(seq)); + } + + /* generate an interrupt */ + amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_TRAP, 0, 0)); +} + +/** + * cik_sdma_ring_emit_semaphore - emit a semaphore on the dma ring + * + * @ring: amdgpu_ring structure holding ring information + * @semaphore: amdgpu semaphore object + * @emit_wait: wait or signal semaphore + * + * Add a DMA semaphore packet to the ring wait on or signal + * other rings (CIK). + */ +static bool cik_sdma_ring_emit_semaphore(struct amdgpu_ring *ring, + struct amdgpu_semaphore *semaphore, + bool emit_wait) +{ + u64 addr = semaphore->gpu_addr; + u32 extra_bits = emit_wait ? 0 : SDMA_SEMAPHORE_EXTRA_S; + + amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SEMAPHORE, 0, extra_bits)); + amdgpu_ring_write(ring, addr & 0xfffffff8); + amdgpu_ring_write(ring, upper_32_bits(addr) & 0xffffffff); + + return true; +} + +/** + * cik_sdma_gfx_stop - stop the gfx async dma engines + * + * @adev: amdgpu_device pointer + * + * Stop the gfx async dma ring buffers (CIK). + */ +static void cik_sdma_gfx_stop(struct amdgpu_device *adev) +{ + struct amdgpu_ring *sdma0 = &adev->sdma[0].ring; + struct amdgpu_ring *sdma1 = &adev->sdma[1].ring; + u32 rb_cntl; + int i; + + if ((adev->mman.buffer_funcs_ring == sdma0) || + (adev->mman.buffer_funcs_ring == sdma1)) + amdgpu_ttm_set_active_vram_size(adev, adev->mc.visible_vram_size); + + for (i = 0; i < SDMA_MAX_INSTANCE; i++) { + rb_cntl = RREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i]); + rb_cntl &= ~SDMA0_GFX_RB_CNTL__RB_ENABLE_MASK; + WREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i], rb_cntl); + WREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i], 0); + } + sdma0->ready = false; + sdma1->ready = false; +} + +/** + * cik_sdma_rlc_stop - stop the compute async dma engines + * + * @adev: amdgpu_device pointer + * + * Stop the compute async dma queues (CIK). + */ +static void cik_sdma_rlc_stop(struct amdgpu_device *adev) +{ + /* XXX todo */ +} + +/** + * cik_sdma_enable - stop the async dma engines + * + * @adev: amdgpu_device pointer + * @enable: enable/disable the DMA MEs. + * + * Halt or unhalt the async dma engines (CIK). + */ +static void cik_sdma_enable(struct amdgpu_device *adev, bool enable) +{ + u32 me_cntl; + int i; + + if (enable == false) { + cik_sdma_gfx_stop(adev); + cik_sdma_rlc_stop(adev); + } + + for (i = 0; i < SDMA_MAX_INSTANCE; i++) { + me_cntl = RREG32(mmSDMA0_F32_CNTL + sdma_offsets[i]); + if (enable) + me_cntl &= ~SDMA0_F32_CNTL__HALT_MASK; + else + me_cntl |= SDMA0_F32_CNTL__HALT_MASK; + WREG32(mmSDMA0_F32_CNTL + sdma_offsets[i], me_cntl); + } +} + +/** + * cik_sdma_gfx_resume - setup and start the async dma engines + * + * @adev: amdgpu_device pointer + * + * Set up the gfx DMA ring buffers and enable them (CIK). + * Returns 0 for success, error for failure. + */ +static int cik_sdma_gfx_resume(struct amdgpu_device *adev) +{ + struct amdgpu_ring *ring; + u32 rb_cntl, ib_cntl; + u32 rb_bufsz; + u32 wb_offset; + int i, j, r; + + for (i = 0; i < SDMA_MAX_INSTANCE; i++) { + ring = &adev->sdma[i].ring; + wb_offset = (ring->rptr_offs * 4); + + mutex_lock(&adev->srbm_mutex); + for (j = 0; j < 16; j++) { + cik_srbm_select(adev, 0, 0, 0, j); + /* SDMA GFX */ + WREG32(mmSDMA0_GFX_VIRTUAL_ADDR + sdma_offsets[i], 0); + WREG32(mmSDMA0_GFX_APE1_CNTL + sdma_offsets[i], 0); + /* XXX SDMA RLC - todo */ + } + cik_srbm_select(adev, 0, 0, 0, 0); + mutex_unlock(&adev->srbm_mutex); + + WREG32(mmSDMA0_SEM_INCOMPLETE_TIMER_CNTL + sdma_offsets[i], 0); + WREG32(mmSDMA0_SEM_WAIT_FAIL_TIMER_CNTL + sdma_offsets[i], 0); + + /* Set ring buffer size in dwords */ + rb_bufsz = order_base_2(ring->ring_size / 4); + rb_cntl = rb_bufsz << 1; +#ifdef __BIG_ENDIAN + rb_cntl |= SDMA_RB_SWAP_ENABLE | SDMA_RPTR_WRITEBACK_SWAP_ENABLE; +#endif + WREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i], rb_cntl); + + /* Initialize the ring buffer's read and write pointers */ + WREG32(mmSDMA0_GFX_RB_RPTR + sdma_offsets[i], 0); + WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[i], 0); + + /* set the wb address whether it's enabled or not */ + WREG32(mmSDMA0_GFX_RB_RPTR_ADDR_HI + sdma_offsets[i], + upper_32_bits(adev->wb.gpu_addr + wb_offset) & 0xFFFFFFFF); + WREG32(mmSDMA0_GFX_RB_RPTR_ADDR_LO + sdma_offsets[i], + ((adev->wb.gpu_addr + wb_offset) & 0xFFFFFFFC)); + + rb_cntl |= SDMA0_GFX_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK; + + WREG32(mmSDMA0_GFX_RB_BASE + sdma_offsets[i], ring->gpu_addr >> 8); + WREG32(mmSDMA0_GFX_RB_BASE_HI + sdma_offsets[i], ring->gpu_addr >> 40); + + ring->wptr = 0; + WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[i], ring->wptr << 2); + + /* enable DMA RB */ + WREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i], + rb_cntl | SDMA0_GFX_RB_CNTL__RB_ENABLE_MASK); + + ib_cntl = SDMA0_GFX_IB_CNTL__IB_ENABLE_MASK; +#ifdef __BIG_ENDIAN + ib_cntl |= SDMA0_GFX_IB_CNTL__IB_SWAP_ENABLE_MASK; +#endif + /* enable DMA IBs */ + WREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i], ib_cntl); + + ring->ready = true; + + r = amdgpu_ring_test_ring(ring); + if (r) { + ring->ready = false; + return r; + } + + if (adev->mman.buffer_funcs_ring == ring) + amdgpu_ttm_set_active_vram_size(adev, adev->mc.real_vram_size); + } + + return 0; +} + +/** + * cik_sdma_rlc_resume - setup and start the async dma engines + * + * @adev: amdgpu_device pointer + * + * Set up the compute DMA queues and enable them (CIK). + * Returns 0 for success, error for failure. + */ +static int cik_sdma_rlc_resume(struct amdgpu_device *adev) +{ + /* XXX todo */ + return 0; +} + +/** + * cik_sdma_load_microcode - load the sDMA ME ucode + * + * @adev: amdgpu_device pointer + * + * Loads the sDMA0/1 ucode. + * Returns 0 for success, -EINVAL if the ucode is not available. + */ +static int cik_sdma_load_microcode(struct amdgpu_device *adev) +{ + const struct sdma_firmware_header_v1_0 *hdr; + const __le32 *fw_data; + u32 fw_size; + int i, j; + + if (!adev->sdma[0].fw || !adev->sdma[1].fw) + return -EINVAL; + + /* halt the MEs */ + cik_sdma_enable(adev, false); + + for (i = 0; i < SDMA_MAX_INSTANCE; i++) { + hdr = (const struct sdma_firmware_header_v1_0 *)adev->sdma[i].fw->data; + amdgpu_ucode_print_sdma_hdr(&hdr->header); + fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4; + adev->sdma[i].fw_version = le32_to_cpu(hdr->header.ucode_version); + fw_data = (const __le32 *) + (adev->sdma[i].fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes)); + WREG32(mmSDMA0_UCODE_ADDR + sdma_offsets[i], 0); + for (j = 0; j < fw_size; j++) + WREG32(mmSDMA0_UCODE_DATA + sdma_offsets[i], le32_to_cpup(fw_data++)); + WREG32(mmSDMA0_UCODE_ADDR + sdma_offsets[i], adev->sdma[i].fw_version); + } + + return 0; +} + +/** + * cik_sdma_start - setup and start the async dma engines + * + * @adev: amdgpu_device pointer + * + * Set up the DMA engines and enable them (CIK). + * Returns 0 for success, error for failure. + */ +static int cik_sdma_start(struct amdgpu_device *adev) +{ + int r; + + r = cik_sdma_load_microcode(adev); + if (r) + return r; + + /* unhalt the MEs */ + cik_sdma_enable(adev, true); + + /* start the gfx rings and rlc compute queues */ + r = cik_sdma_gfx_resume(adev); + if (r) + return r; + r = cik_sdma_rlc_resume(adev); + if (r) + return r; + + return 0; +} + +/** + * cik_sdma_ring_test_ring - simple async dma engine test + * + * @ring: amdgpu_ring structure holding ring information + * + * Test the DMA engine by writing using it to write an + * value to memory. (CIK). + * Returns 0 for success, error for failure. + */ +static int cik_sdma_ring_test_ring(struct amdgpu_ring *ring) +{ + struct amdgpu_device *adev = ring->adev; + unsigned i; + unsigned index; + int r; + u32 tmp; + u64 gpu_addr; + + r = amdgpu_wb_get(adev, &index); + if (r) { + dev_err(adev->dev, "(%d) failed to allocate wb slot\n", r); + return r; + } + + gpu_addr = adev->wb.gpu_addr + (index * 4); + tmp = 0xCAFEDEAD; + adev->wb.wb[index] = cpu_to_le32(tmp); + + r = amdgpu_ring_lock(ring, 5); + if (r) { + DRM_ERROR("amdgpu: dma failed to lock ring %d (%d).\n", ring->idx, r); + amdgpu_wb_free(adev, index); + return r; + } + amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_WRITE, SDMA_WRITE_SUB_OPCODE_LINEAR, 0)); + amdgpu_ring_write(ring, lower_32_bits(gpu_addr)); + amdgpu_ring_write(ring, upper_32_bits(gpu_addr)); + amdgpu_ring_write(ring, 1); /* number of DWs to follow */ + amdgpu_ring_write(ring, 0xDEADBEEF); + amdgpu_ring_unlock_commit(ring); + + for (i = 0; i < adev->usec_timeout; i++) { + tmp = le32_to_cpu(adev->wb.wb[index]); + if (tmp == 0xDEADBEEF) + break; + DRM_UDELAY(1); + } + + if (i < adev->usec_timeout) { + DRM_INFO("ring test on %d succeeded in %d usecs\n", ring->idx, i); + } else { + DRM_ERROR("amdgpu: ring %d test failed (0x%08X)\n", + ring->idx, tmp); + r = -EINVAL; + } + amdgpu_wb_free(adev, index); + + return r; +} + +/** + * cik_sdma_ring_test_ib - test an IB on the DMA engine + * + * @ring: amdgpu_ring structure holding ring information + * + * Test a simple IB in the DMA ring (CIK). + * Returns 0 on success, error on failure. + */ +static int cik_sdma_ring_test_ib(struct amdgpu_ring *ring) +{ + struct amdgpu_device *adev = ring->adev; + struct amdgpu_ib ib; + unsigned i; + unsigned index; + int r; + u32 tmp = 0; + u64 gpu_addr; + + r = amdgpu_wb_get(adev, &index); + if (r) { + dev_err(adev->dev, "(%d) failed to allocate wb slot\n", r); + return r; + } + + gpu_addr = adev->wb.gpu_addr + (index * 4); + tmp = 0xCAFEDEAD; + adev->wb.wb[index] = cpu_to_le32(tmp); + + r = amdgpu_ib_get(ring, NULL, 256, &ib); + if (r) { + amdgpu_wb_free(adev, index); + DRM_ERROR("amdgpu: failed to get ib (%d).\n", r); + return r; + } + + ib.ptr[0] = SDMA_PACKET(SDMA_OPCODE_WRITE, SDMA_WRITE_SUB_OPCODE_LINEAR, 0); + ib.ptr[1] = lower_32_bits(gpu_addr); + ib.ptr[2] = upper_32_bits(gpu_addr); + ib.ptr[3] = 1; + ib.ptr[4] = 0xDEADBEEF; + ib.length_dw = 5; + + r = amdgpu_ib_schedule(adev, 1, &ib, AMDGPU_FENCE_OWNER_UNDEFINED); + if (r) { + amdgpu_ib_free(adev, &ib); + amdgpu_wb_free(adev, index); + DRM_ERROR("amdgpu: failed to schedule ib (%d).\n", r); + return r; + } + r = amdgpu_fence_wait(ib.fence, false); + if (r) { + amdgpu_ib_free(adev, &ib); + amdgpu_wb_free(adev, index); + DRM_ERROR("amdgpu: fence wait failed (%d).\n", r); + return r; + } + for (i = 0; i < adev->usec_timeout; i++) { + tmp = le32_to_cpu(adev->wb.wb[index]); + if (tmp == 0xDEADBEEF) + break; + DRM_UDELAY(1); + } + if (i < adev->usec_timeout) { + DRM_INFO("ib test on ring %d succeeded in %u usecs\n", + ib.fence->ring->idx, i); + } else { + DRM_ERROR("amdgpu: ib test failed (0x%08X)\n", tmp); + r = -EINVAL; + } + amdgpu_ib_free(adev, &ib); + amdgpu_wb_free(adev, index); + return r; +} + +/** + * cik_sdma_vm_copy_pages - update PTEs by copying them from the GART + * + * @ib: indirect buffer to fill with commands + * @pe: addr of the page entry + * @src: src addr to copy from + * @count: number of page entries to update + * + * Update PTEs by copying them from the GART using sDMA (CIK). + */ +static void cik_sdma_vm_copy_pte(struct amdgpu_ib *ib, + uint64_t pe, uint64_t src, + unsigned count) +{ + while (count) { + unsigned bytes = count * 8; + if (bytes > 0x1FFFF8) + bytes = 0x1FFFF8; + + ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_COPY, + SDMA_WRITE_SUB_OPCODE_LINEAR, 0); + ib->ptr[ib->length_dw++] = bytes; + ib->ptr[ib->length_dw++] = 0; /* src/dst endian swap */ + ib->ptr[ib->length_dw++] = lower_32_bits(src); + ib->ptr[ib->length_dw++] = upper_32_bits(src); + ib->ptr[ib->length_dw++] = lower_32_bits(pe); + ib->ptr[ib->length_dw++] = upper_32_bits(pe); + + pe += bytes; + src += bytes; + count -= bytes / 8; + } +} + +/** + * cik_sdma_vm_write_pages - update PTEs by writing them manually + * + * @ib: indirect buffer to fill with commands + * @pe: addr of the page entry + * @addr: dst addr to write into pe + * @count: number of page entries to update + * @incr: increase next addr by incr bytes + * @flags: access flags + * + * Update PTEs by writing them manually using sDMA (CIK). + */ +static void cik_sdma_vm_write_pte(struct amdgpu_ib *ib, + uint64_t pe, + uint64_t addr, unsigned count, + uint32_t incr, uint32_t flags) +{ + uint64_t value; + unsigned ndw; + + while (count) { + ndw = count * 2; + if (ndw > 0xFFFFE) + ndw = 0xFFFFE; + + /* for non-physically contiguous pages (system) */ + ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_WRITE, + SDMA_WRITE_SUB_OPCODE_LINEAR, 0); + ib->ptr[ib->length_dw++] = pe; + ib->ptr[ib->length_dw++] = upper_32_bits(pe); + ib->ptr[ib->length_dw++] = ndw; + for (; ndw > 0; ndw -= 2, --count, pe += 8) { + if (flags & AMDGPU_PTE_SYSTEM) { + value = amdgpu_vm_map_gart(ib->ring->adev, addr); + value &= 0xFFFFFFFFFFFFF000ULL; + } else if (flags & AMDGPU_PTE_VALID) { + value = addr; + } else { + value = 0; + } + addr += incr; + value |= flags; + ib->ptr[ib->length_dw++] = value; + ib->ptr[ib->length_dw++] = upper_32_bits(value); + } + } +} + +/** + * cik_sdma_vm_set_pages - update the page tables using sDMA + * + * @ib: indirect buffer to fill with commands + * @pe: addr of the page entry + * @addr: dst addr to write into pe + * @count: number of page entries to update + * @incr: increase next addr by incr bytes + * @flags: access flags + * + * Update the page tables using sDMA (CIK). + */ +static void cik_sdma_vm_set_pte_pde(struct amdgpu_ib *ib, + uint64_t pe, + uint64_t addr, unsigned count, + uint32_t incr, uint32_t flags) +{ + uint64_t value; + unsigned ndw; + + while (count) { + ndw = count; + if (ndw > 0x7FFFF) + ndw = 0x7FFFF; + + if (flags & AMDGPU_PTE_VALID) + value = addr; + else + value = 0; + + /* for physically contiguous pages (vram) */ + ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_GENERATE_PTE_PDE, 0, 0); + ib->ptr[ib->length_dw++] = pe; /* dst addr */ + ib->ptr[ib->length_dw++] = upper_32_bits(pe); + ib->ptr[ib->length_dw++] = flags; /* mask */ + ib->ptr[ib->length_dw++] = 0; + ib->ptr[ib->length_dw++] = value; /* value */ + ib->ptr[ib->length_dw++] = upper_32_bits(value); + ib->ptr[ib->length_dw++] = incr; /* increment size */ + ib->ptr[ib->length_dw++] = 0; + ib->ptr[ib->length_dw++] = ndw; /* number of entries */ + + pe += ndw * 8; + addr += ndw * incr; + count -= ndw; + } +} + +/** + * cik_sdma_vm_pad_ib - pad the IB to the required number of dw + * + * @ib: indirect buffer to fill with padding + * + */ +static void cik_sdma_vm_pad_ib(struct amdgpu_ib *ib) +{ + while (ib->length_dw & 0x7) + ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_NOP, 0, 0); +} + +/** + * cik_sdma_ring_emit_vm_flush - cik vm flush using sDMA + * + * @ring: amdgpu_ring pointer + * @vm: amdgpu_vm pointer + * + * Update the page table base and flush the VM TLB + * using sDMA (CIK). + */ +static void cik_sdma_ring_emit_vm_flush(struct amdgpu_ring *ring, + unsigned vm_id, uint64_t pd_addr) +{ + u32 extra_bits = (SDMA_POLL_REG_MEM_EXTRA_OP(0) | + SDMA_POLL_REG_MEM_EXTRA_FUNC(0)); /* always */ + + amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000)); + if (vm_id < 8) { + amdgpu_ring_write(ring, (mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + vm_id)); + } else { + amdgpu_ring_write(ring, (mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + vm_id - 8)); + } + amdgpu_ring_write(ring, pd_addr >> 12); + + /* update SH_MEM_* regs */ + amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000)); + amdgpu_ring_write(ring, mmSRBM_GFX_CNTL); + amdgpu_ring_write(ring, VMID(vm_id)); + + amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000)); + amdgpu_ring_write(ring, mmSH_MEM_BASES); + amdgpu_ring_write(ring, 0); + + amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000)); + amdgpu_ring_write(ring, mmSH_MEM_CONFIG); + amdgpu_ring_write(ring, 0); + + amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000)); + amdgpu_ring_write(ring, mmSH_MEM_APE1_BASE); + amdgpu_ring_write(ring, 1); + + amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000)); + amdgpu_ring_write(ring, mmSH_MEM_APE1_LIMIT); + amdgpu_ring_write(ring, 0); + + amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000)); + amdgpu_ring_write(ring, mmSRBM_GFX_CNTL); + amdgpu_ring_write(ring, VMID(0)); + + /* flush TLB */ + amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000)); + amdgpu_ring_write(ring, mmVM_INVALIDATE_REQUEST); + amdgpu_ring_write(ring, 1 << vm_id); + + amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_POLL_REG_MEM, 0, extra_bits)); + amdgpu_ring_write(ring, mmVM_INVALIDATE_REQUEST << 2); + amdgpu_ring_write(ring, 0); + amdgpu_ring_write(ring, 0); /* reference */ + amdgpu_ring_write(ring, 0); /* mask */ + amdgpu_ring_write(ring, (0xfff << 16) | 10); /* retry count, poll interval */ +} + +static void cik_enable_sdma_mgcg(struct amdgpu_device *adev, + bool enable) +{ + u32 orig, data; + + if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_SDMA_MGCG)) { + WREG32(mmSDMA0_CLK_CTRL + SDMA0_REGISTER_OFFSET, 0x00000100); + WREG32(mmSDMA0_CLK_CTRL + SDMA1_REGISTER_OFFSET, 0x00000100); + } else { + orig = data = RREG32(mmSDMA0_CLK_CTRL + SDMA0_REGISTER_OFFSET); + data |= 0xff000000; + if (data != orig) + WREG32(mmSDMA0_CLK_CTRL + SDMA0_REGISTER_OFFSET, data); + + orig = data = RREG32(mmSDMA0_CLK_CTRL + SDMA1_REGISTER_OFFSET); + data |= 0xff000000; + if (data != orig) + WREG32(mmSDMA0_CLK_CTRL + SDMA1_REGISTER_OFFSET, data); + } +} + +static void cik_enable_sdma_mgls(struct amdgpu_device *adev, + bool enable) +{ + u32 orig, data; + + if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_SDMA_LS)) { + orig = data = RREG32(mmSDMA0_POWER_CNTL + SDMA0_REGISTER_OFFSET); + data |= 0x100; + if (orig != data) + WREG32(mmSDMA0_POWER_CNTL + SDMA0_REGISTER_OFFSET, data); + + orig = data = RREG32(mmSDMA0_POWER_CNTL + SDMA1_REGISTER_OFFSET); + data |= 0x100; + if (orig != data) + WREG32(mmSDMA0_POWER_CNTL + SDMA1_REGISTER_OFFSET, data); + } else { + orig = data = RREG32(mmSDMA0_POWER_CNTL + SDMA0_REGISTER_OFFSET); + data &= ~0x100; + if (orig != data) + WREG32(mmSDMA0_POWER_CNTL + SDMA0_REGISTER_OFFSET, data); + + orig = data = RREG32(mmSDMA0_POWER_CNTL + SDMA1_REGISTER_OFFSET); + data &= ~0x100; + if (orig != data) + WREG32(mmSDMA0_POWER_CNTL + SDMA1_REGISTER_OFFSET, data); + } +} + +static int cik_sdma_early_init(struct amdgpu_device *adev) +{ + cik_sdma_set_ring_funcs(adev); + cik_sdma_set_irq_funcs(adev); + cik_sdma_set_buffer_funcs(adev); + cik_sdma_set_vm_pte_funcs(adev); + + return 0; +} + +static int cik_sdma_sw_init(struct amdgpu_device *adev) +{ + struct amdgpu_ring *ring; + int r; + + r = cik_sdma_init_microcode(adev); + if (r) { + DRM_ERROR("Failed to load sdma firmware!\n"); + return r; + } + + /* SDMA trap event */ + r = amdgpu_irq_add_id(adev, 224, &adev->sdma_trap_irq); + if (r) + return r; + + /* SDMA Privileged inst */ + r = amdgpu_irq_add_id(adev, 241, &adev->sdma_illegal_inst_irq); + if (r) + return r; + + /* SDMA Privileged inst */ + r = amdgpu_irq_add_id(adev, 247, &adev->sdma_illegal_inst_irq); + if (r) + return r; + + ring = &adev->sdma[0].ring; + ring->ring_obj = NULL; + + ring = &adev->sdma[1].ring; + ring->ring_obj = NULL; + + ring = &adev->sdma[0].ring; + sprintf(ring->name, "sdma0"); + r = amdgpu_ring_init(adev, ring, 256 * 1024, + SDMA_PACKET(SDMA_OPCODE_NOP, 0, 0), 0xf, + &adev->sdma_trap_irq, AMDGPU_SDMA_IRQ_TRAP0, + AMDGPU_RING_TYPE_SDMA); + if (r) + return r; + + ring = &adev->sdma[1].ring; + sprintf(ring->name, "sdma1"); + r = amdgpu_ring_init(adev, ring, 256 * 1024, + SDMA_PACKET(SDMA_OPCODE_NOP, 0, 0), 0xf, + &adev->sdma_trap_irq, AMDGPU_SDMA_IRQ_TRAP1, + AMDGPU_RING_TYPE_SDMA); + if (r) + return r; + + return r; +} + +static int cik_sdma_sw_fini(struct amdgpu_device *adev) +{ + amdgpu_ring_fini(&adev->sdma[0].ring); + amdgpu_ring_fini(&adev->sdma[1].ring); + + return 0; +} + +static int cik_sdma_hw_init(struct amdgpu_device *adev) +{ + int r; + + r = cik_sdma_start(adev); + if (r) + return r; + + return r; +} + +static int cik_sdma_hw_fini(struct amdgpu_device *adev) +{ + cik_sdma_enable(adev, false); + + return 0; +} + +static int cik_sdma_suspend(struct amdgpu_device *adev) +{ + + return cik_sdma_hw_fini(adev); +} + +static int cik_sdma_resume(struct amdgpu_device *adev) +{ + + return cik_sdma_hw_init(adev); +} + +static bool cik_sdma_is_idle(struct amdgpu_device *adev) +{ + u32 tmp = RREG32(mmSRBM_STATUS2); + + if (tmp & (SRBM_STATUS2__SDMA_BUSY_MASK | + SRBM_STATUS2__SDMA1_BUSY_MASK)) + return false; + + return true; +} + +static int cik_sdma_wait_for_idle(struct amdgpu_device *adev) +{ + unsigned i; + u32 tmp; + + for (i = 0; i < adev->usec_timeout; i++) { + tmp = RREG32(mmSRBM_STATUS2) & (SRBM_STATUS2__SDMA_BUSY_MASK | + SRBM_STATUS2__SDMA1_BUSY_MASK); + + if (!tmp) + return 0; + udelay(1); + } + return -ETIMEDOUT; +} + +static void cik_sdma_print_status(struct amdgpu_device *adev) +{ + int i, j; + + dev_info(adev->dev, "CIK SDMA registers\n"); + dev_info(adev->dev, " SRBM_STATUS2=0x%08X\n", + RREG32(mmSRBM_STATUS2)); + for (i = 0; i < SDMA_MAX_INSTANCE; i++) { + dev_info(adev->dev, " SDMA%d_STATUS_REG=0x%08X\n", + i, RREG32(mmSDMA0_STATUS_REG + sdma_offsets[i])); + dev_info(adev->dev, " SDMA%d_ME_CNTL=0x%08X\n", + i, RREG32(mmSDMA0_F32_CNTL + sdma_offsets[i])); + dev_info(adev->dev, " SDMA%d_CNTL=0x%08X\n", + i, RREG32(mmSDMA0_CNTL + sdma_offsets[i])); + dev_info(adev->dev, " SDMA%d_SEM_INCOMPLETE_TIMER_CNTL=0x%08X\n", + i, RREG32(mmSDMA0_SEM_INCOMPLETE_TIMER_CNTL + sdma_offsets[i])); + dev_info(adev->dev, " SDMA%d_SEM_WAIT_FAIL_TIMER_CNTL=0x%08X\n", + i, RREG32(mmSDMA0_SEM_WAIT_FAIL_TIMER_CNTL + sdma_offsets[i])); + dev_info(adev->dev, " SDMA%d_GFX_IB_CNTL=0x%08X\n", + i, RREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i])); + dev_info(adev->dev, " SDMA%d_GFX_RB_CNTL=0x%08X\n", + i, RREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i])); + dev_info(adev->dev, " SDMA%d_GFX_RB_RPTR=0x%08X\n", + i, RREG32(mmSDMA0_GFX_RB_RPTR + sdma_offsets[i])); + dev_info(adev->dev, " SDMA%d_GFX_RB_WPTR=0x%08X\n", + i, RREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[i])); + dev_info(adev->dev, " SDMA%d_GFX_RB_RPTR_ADDR_HI=0x%08X\n", + i, RREG32(mmSDMA0_GFX_RB_RPTR_ADDR_HI + sdma_offsets[i])); + dev_info(adev->dev, " SDMA%d_GFX_RB_RPTR_ADDR_LO=0x%08X\n", + i, RREG32(mmSDMA0_GFX_RB_RPTR_ADDR_LO + sdma_offsets[i])); + dev_info(adev->dev, " SDMA%d_GFX_RB_BASE=0x%08X\n", + i, RREG32(mmSDMA0_GFX_RB_BASE + sdma_offsets[i])); + dev_info(adev->dev, " SDMA%d_GFX_RB_BASE_HI=0x%08X\n", + i, RREG32(mmSDMA0_GFX_RB_BASE_HI + sdma_offsets[i])); + mutex_lock(&adev->srbm_mutex); + for (j = 0; j < 16; j++) { + cik_srbm_select(adev, 0, 0, 0, j); + dev_info(adev->dev, " VM %d:\n", j); + dev_info(adev->dev, " SDMA0_GFX_VIRTUAL_ADDR=0x%08X\n", + RREG32(mmSDMA0_GFX_VIRTUAL_ADDR + sdma_offsets[i])); + dev_info(adev->dev, " SDMA0_GFX_APE1_CNTL=0x%08X\n", + RREG32(mmSDMA0_GFX_APE1_CNTL + sdma_offsets[i])); + } + cik_srbm_select(adev, 0, 0, 0, 0); + mutex_unlock(&adev->srbm_mutex); + } +} + +static int cik_sdma_soft_reset(struct amdgpu_device *adev) +{ + u32 srbm_soft_reset = 0; + u32 tmp = RREG32(mmSRBM_STATUS2); + + if (tmp & SRBM_STATUS2__SDMA_BUSY_MASK) { + /* sdma0 */ + tmp = RREG32(mmSDMA0_F32_CNTL + SDMA0_REGISTER_OFFSET); + tmp |= SDMA0_F32_CNTL__HALT_MASK; + WREG32(mmSDMA0_F32_CNTL + SDMA0_REGISTER_OFFSET, tmp); + srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_SDMA_MASK; + } + if (tmp & SRBM_STATUS2__SDMA1_BUSY_MASK) { + /* sdma1 */ + tmp = RREG32(mmSDMA0_F32_CNTL + SDMA1_REGISTER_OFFSET); + tmp |= SDMA0_F32_CNTL__HALT_MASK; + WREG32(mmSDMA0_F32_CNTL + SDMA1_REGISTER_OFFSET, tmp); + srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_SDMA1_MASK; + } + + if (srbm_soft_reset) { + cik_sdma_print_status(adev); + + tmp = RREG32(mmSRBM_SOFT_RESET); + tmp |= srbm_soft_reset; + dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp); + WREG32(mmSRBM_SOFT_RESET, tmp); + tmp = RREG32(mmSRBM_SOFT_RESET); + + udelay(50); + + tmp &= ~srbm_soft_reset; + WREG32(mmSRBM_SOFT_RESET, tmp); + tmp = RREG32(mmSRBM_SOFT_RESET); + + /* Wait a little for things to settle down */ + udelay(50); + + cik_sdma_print_status(adev); + } + + return 0; +} + +static int cik_sdma_set_trap_irq_state(struct amdgpu_device *adev, + struct amdgpu_irq_src *src, + unsigned type, + enum amdgpu_interrupt_state state) +{ + u32 sdma_cntl; + + switch (type) { + case AMDGPU_SDMA_IRQ_TRAP0: + switch (state) { + case AMDGPU_IRQ_STATE_DISABLE: + sdma_cntl = RREG32(mmSDMA0_CNTL + SDMA0_REGISTER_OFFSET); + sdma_cntl &= ~SDMA0_CNTL__TRAP_ENABLE_MASK; + WREG32(mmSDMA0_CNTL + SDMA0_REGISTER_OFFSET, sdma_cntl); + break; + case AMDGPU_IRQ_STATE_ENABLE: + sdma_cntl = RREG32(mmSDMA0_CNTL + SDMA0_REGISTER_OFFSET); + sdma_cntl |= SDMA0_CNTL__TRAP_ENABLE_MASK; + WREG32(mmSDMA0_CNTL + SDMA0_REGISTER_OFFSET, sdma_cntl); + break; + default: + break; + } + break; + case AMDGPU_SDMA_IRQ_TRAP1: + switch (state) { + case AMDGPU_IRQ_STATE_DISABLE: + sdma_cntl = RREG32(mmSDMA0_CNTL + SDMA1_REGISTER_OFFSET); + sdma_cntl &= ~SDMA0_CNTL__TRAP_ENABLE_MASK; + WREG32(mmSDMA0_CNTL + SDMA1_REGISTER_OFFSET, sdma_cntl); + break; + case AMDGPU_IRQ_STATE_ENABLE: + sdma_cntl = RREG32(mmSDMA0_CNTL + SDMA1_REGISTER_OFFSET); + sdma_cntl |= SDMA0_CNTL__TRAP_ENABLE_MASK; + WREG32(mmSDMA0_CNTL + SDMA1_REGISTER_OFFSET, sdma_cntl); + break; + default: + break; + } + break; + default: + break; + } + return 0; +} + +static int cik_sdma_process_trap_irq(struct amdgpu_device *adev, + struct amdgpu_irq_src *source, + struct amdgpu_iv_entry *entry) +{ + u8 instance_id, queue_id; + + instance_id = (entry->ring_id & 0x3) >> 0; + queue_id = (entry->ring_id & 0xc) >> 2; + DRM_DEBUG("IH: SDMA trap\n"); + switch (instance_id) { + case 0: + switch (queue_id) { + case 0: + amdgpu_fence_process(&adev->sdma[0].ring); + break; + case 1: + /* XXX compute */ + break; + case 2: + /* XXX compute */ + break; + } + break; + case 1: + switch (queue_id) { + case 0: + amdgpu_fence_process(&adev->sdma[1].ring); + break; + case 1: + /* XXX compute */ + break; + case 2: + /* XXX compute */ + break; + } + break; + } + + return 0; +} + +static int cik_sdma_process_illegal_inst_irq(struct amdgpu_device *adev, + struct amdgpu_irq_src *source, + struct amdgpu_iv_entry *entry) +{ + DRM_ERROR("Illegal instruction in SDMA command stream\n"); + schedule_work(&adev->reset_work); + return 0; +} + +static int cik_sdma_set_clockgating_state(struct amdgpu_device *adev, + enum amdgpu_clockgating_state state) +{ + bool gate = false; + + if (state == AMDGPU_CG_STATE_GATE) + gate = true; + + cik_enable_sdma_mgcg(adev, gate); + cik_enable_sdma_mgls(adev, gate); + + return 0; +} + +static int cik_sdma_set_powergating_state(struct amdgpu_device *adev, + enum amdgpu_powergating_state state) +{ + return 0; +} + +const struct amdgpu_ip_funcs cik_sdma_ip_funcs = { + .early_init = cik_sdma_early_init, + .late_init = NULL, + .sw_init = cik_sdma_sw_init, + .sw_fini = cik_sdma_sw_fini, + .hw_init = cik_sdma_hw_init, + .hw_fini = cik_sdma_hw_fini, + .suspend = cik_sdma_suspend, + .resume = cik_sdma_resume, + .is_idle = cik_sdma_is_idle, + .wait_for_idle = cik_sdma_wait_for_idle, + .soft_reset = cik_sdma_soft_reset, + .print_status = cik_sdma_print_status, + .set_clockgating_state = cik_sdma_set_clockgating_state, + .set_powergating_state = cik_sdma_set_powergating_state, +}; + +/** + * cik_sdma_ring_is_lockup - Check if the DMA engine is locked up + * + * @ring: amdgpu_ring structure holding ring information + * + * Check if the async DMA engine is locked up (CIK). + * Returns true if the engine appears to be locked up, false if not. + */ +static bool cik_sdma_ring_is_lockup(struct amdgpu_ring *ring) +{ + + if (cik_sdma_is_idle(ring->adev)) { + amdgpu_ring_lockup_update(ring); + return false; + } + return amdgpu_ring_test_lockup(ring); +} + +static const struct amdgpu_ring_funcs cik_sdma_ring_funcs = { + .get_rptr = cik_sdma_ring_get_rptr, + .get_wptr = cik_sdma_ring_get_wptr, + .set_wptr = cik_sdma_ring_set_wptr, + .parse_cs = NULL, + .emit_ib = cik_sdma_ring_emit_ib, + .emit_fence = cik_sdma_ring_emit_fence, + .emit_semaphore = cik_sdma_ring_emit_semaphore, + .emit_vm_flush = cik_sdma_ring_emit_vm_flush, + .test_ring = cik_sdma_ring_test_ring, + .test_ib = cik_sdma_ring_test_ib, + .is_lockup = cik_sdma_ring_is_lockup, +}; + +static void cik_sdma_set_ring_funcs(struct amdgpu_device *adev) +{ + adev->sdma[0].ring.funcs = &cik_sdma_ring_funcs; + adev->sdma[1].ring.funcs = &cik_sdma_ring_funcs; +} + +static const struct amdgpu_irq_src_funcs cik_sdma_trap_irq_funcs = { + .set = cik_sdma_set_trap_irq_state, + .process = cik_sdma_process_trap_irq, +}; + +static const struct amdgpu_irq_src_funcs cik_sdma_illegal_inst_irq_funcs = { + .process = cik_sdma_process_illegal_inst_irq, +}; + +static void cik_sdma_set_irq_funcs(struct amdgpu_device *adev) +{ + adev->sdma_trap_irq.num_types = AMDGPU_SDMA_IRQ_LAST; + adev->sdma_trap_irq.funcs = &cik_sdma_trap_irq_funcs; + adev->sdma_illegal_inst_irq.funcs = &cik_sdma_illegal_inst_irq_funcs; +} + +/** + * cik_sdma_emit_copy_buffer - copy buffer using the sDMA engine + * + * @ring: amdgpu_ring structure holding ring information + * @src_offset: src GPU address + * @dst_offset: dst GPU address + * @byte_count: number of bytes to xfer + * + * Copy GPU buffers using the DMA engine (CIK). + * Used by the amdgpu ttm implementation to move pages if + * registered as the asic copy callback. + */ +static void cik_sdma_emit_copy_buffer(struct amdgpu_ring *ring, + uint64_t src_offset, + uint64_t dst_offset, + uint32_t byte_count) +{ + amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_COPY, SDMA_COPY_SUB_OPCODE_LINEAR, 0)); + amdgpu_ring_write(ring, byte_count); + amdgpu_ring_write(ring, 0); /* src/dst endian swap */ + amdgpu_ring_write(ring, lower_32_bits(src_offset)); + amdgpu_ring_write(ring, upper_32_bits(src_offset)); + amdgpu_ring_write(ring, lower_32_bits(dst_offset)); + amdgpu_ring_write(ring, upper_32_bits(dst_offset)); +} + +/** + * cik_sdma_emit_fill_buffer - fill buffer using the sDMA engine + * + * @ring: amdgpu_ring structure holding ring information + * @src_data: value to write to buffer + * @dst_offset: dst GPU address + * @byte_count: number of bytes to xfer + * + * Fill GPU buffers using the DMA engine (CIK). + */ +static void cik_sdma_emit_fill_buffer(struct amdgpu_ring *ring, + uint32_t src_data, + uint64_t dst_offset, + uint32_t byte_count) +{ + amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_CONSTANT_FILL, 0, 0)); + amdgpu_ring_write(ring, lower_32_bits(dst_offset)); + amdgpu_ring_write(ring, upper_32_bits(dst_offset)); + amdgpu_ring_write(ring, src_data); + amdgpu_ring_write(ring, byte_count); +} + +static const struct amdgpu_buffer_funcs cik_sdma_buffer_funcs = { + .copy_max_bytes = 0x1fffff, + .copy_num_dw = 7, + .emit_copy_buffer = cik_sdma_emit_copy_buffer, + + .fill_max_bytes = 0x1fffff, + .fill_num_dw = 5, + .emit_fill_buffer = cik_sdma_emit_fill_buffer, +}; + +static void cik_sdma_set_buffer_funcs(struct amdgpu_device *adev) +{ + if (adev->mman.buffer_funcs == NULL) { + adev->mman.buffer_funcs = &cik_sdma_buffer_funcs; + adev->mman.buffer_funcs_ring = &adev->sdma[0].ring; + } +} + +static const struct amdgpu_vm_pte_funcs cik_sdma_vm_pte_funcs = { + .copy_pte = cik_sdma_vm_copy_pte, + .write_pte = cik_sdma_vm_write_pte, + .set_pte_pde = cik_sdma_vm_set_pte_pde, + .pad_ib = cik_sdma_vm_pad_ib, +}; + +static void cik_sdma_set_vm_pte_funcs(struct amdgpu_device *adev) +{ + if (adev->vm_manager.vm_pte_funcs == NULL) { + adev->vm_manager.vm_pte_funcs = &cik_sdma_vm_pte_funcs; + adev->vm_manager.vm_pte_funcs_ring = &adev->sdma[0].ring; + } +} diff --git a/drivers/gpu/drm/amd/amdgpu/cik_sdma.h b/drivers/gpu/drm/amd/amdgpu/cik_sdma.h new file mode 100644 index 000000000000..42b59960bc53 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/cik_sdma.h @@ -0,0 +1,29 @@ +/* + * Copyright 2014 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef __CIK_SDMA_H__ +#define __CIK_SDMA_H__ + +extern const struct amdgpu_ip_funcs cik_sdma_ip_funcs; + +#endif diff --git a/drivers/gpu/drm/amd/amdgpu/clearstate_ci.h b/drivers/gpu/drm/amd/amdgpu/clearstate_ci.h new file mode 100644 index 000000000000..c3982f9475fb --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/clearstate_ci.h @@ -0,0 +1,944 @@ +/* + * Copyright 2013 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +static const unsigned int ci_SECT_CONTEXT_def_1[] = +{ + 0x00000000, // DB_RENDER_CONTROL + 0x00000000, // DB_COUNT_CONTROL + 0x00000000, // DB_DEPTH_VIEW + 0x00000000, // DB_RENDER_OVERRIDE + 0x00000000, // DB_RENDER_OVERRIDE2 + 0x00000000, // DB_HTILE_DATA_BASE + 0, // HOLE + 0, // HOLE + 0x00000000, // DB_DEPTH_BOUNDS_MIN + 0x00000000, // DB_DEPTH_BOUNDS_MAX + 0x00000000, // DB_STENCIL_CLEAR + 0x00000000, // DB_DEPTH_CLEAR + 0x00000000, // PA_SC_SCREEN_SCISSOR_TL + 0x40004000, // PA_SC_SCREEN_SCISSOR_BR + 0, // HOLE + 0x00000000, // DB_DEPTH_INFO + 0x00000000, // DB_Z_INFO + 0x00000000, // DB_STENCIL_INFO + 0x00000000, // DB_Z_READ_BASE + 0x00000000, // DB_STENCIL_READ_BASE + 0x00000000, // DB_Z_WRITE_BASE + 0x00000000, // DB_STENCIL_WRITE_BASE + 0x00000000, // DB_DEPTH_SIZE + 0x00000000, // DB_DEPTH_SLICE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0x00000000, // TA_BC_BASE_ADDR + 0x00000000, // TA_BC_BASE_ADDR_HI + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0x00000000, // COHER_DEST_BASE_HI_0 + 0x00000000, // COHER_DEST_BASE_HI_1 + 0x00000000, // COHER_DEST_BASE_HI_2 + 0x00000000, // COHER_DEST_BASE_HI_3 + 0x00000000, // COHER_DEST_BASE_2 + 0x00000000, // COHER_DEST_BASE_3 + 0x00000000, // PA_SC_WINDOW_OFFSET + 0x80000000, // PA_SC_WINDOW_SCISSOR_TL + 0x40004000, // PA_SC_WINDOW_SCISSOR_BR + 0x0000ffff, // PA_SC_CLIPRECT_RULE + 0x00000000, // PA_SC_CLIPRECT_0_TL + 0x40004000, // PA_SC_CLIPRECT_0_BR + 0x00000000, // PA_SC_CLIPRECT_1_TL + 0x40004000, // PA_SC_CLIPRECT_1_BR + 0x00000000, // PA_SC_CLIPRECT_2_TL + 0x40004000, // PA_SC_CLIPRECT_2_BR + 0x00000000, // PA_SC_CLIPRECT_3_TL + 0x40004000, // PA_SC_CLIPRECT_3_BR + 0xaa99aaaa, // PA_SC_EDGERULE + 0x00000000, // PA_SU_HARDWARE_SCREEN_OFFSET + 0xffffffff, // CB_TARGET_MASK + 0xffffffff, // CB_SHADER_MASK + 0x80000000, // PA_SC_GENERIC_SCISSOR_TL + 0x40004000, // PA_SC_GENERIC_SCISSOR_BR + 0x00000000, // COHER_DEST_BASE_0 + 0x00000000, // COHER_DEST_BASE_1 + 0x80000000, // PA_SC_VPORT_SCISSOR_0_TL + 0x40004000, // PA_SC_VPORT_SCISSOR_0_BR + 0x80000000, // PA_SC_VPORT_SCISSOR_1_TL + 0x40004000, // PA_SC_VPORT_SCISSOR_1_BR + 0x80000000, // PA_SC_VPORT_SCISSOR_2_TL + 0x40004000, // PA_SC_VPORT_SCISSOR_2_BR + 0x80000000, // PA_SC_VPORT_SCISSOR_3_TL + 0x40004000, // PA_SC_VPORT_SCISSOR_3_BR + 0x80000000, // PA_SC_VPORT_SCISSOR_4_TL + 0x40004000, // PA_SC_VPORT_SCISSOR_4_BR + 0x80000000, // PA_SC_VPORT_SCISSOR_5_TL + 0x40004000, // PA_SC_VPORT_SCISSOR_5_BR + 0x80000000, // PA_SC_VPORT_SCISSOR_6_TL + 0x40004000, // PA_SC_VPORT_SCISSOR_6_BR + 0x80000000, // PA_SC_VPORT_SCISSOR_7_TL + 0x40004000, // PA_SC_VPORT_SCISSOR_7_BR + 0x80000000, // PA_SC_VPORT_SCISSOR_8_TL + 0x40004000, // PA_SC_VPORT_SCISSOR_8_BR + 0x80000000, // PA_SC_VPORT_SCISSOR_9_TL + 0x40004000, // PA_SC_VPORT_SCISSOR_9_BR + 0x80000000, // PA_SC_VPORT_SCISSOR_10_TL + 0x40004000, // PA_SC_VPORT_SCISSOR_10_BR + 0x80000000, // PA_SC_VPORT_SCISSOR_11_TL + 0x40004000, // PA_SC_VPORT_SCISSOR_11_BR + 0x80000000, // PA_SC_VPORT_SCISSOR_12_TL + 0x40004000, // PA_SC_VPORT_SCISSOR_12_BR + 0x80000000, // PA_SC_VPORT_SCISSOR_13_TL + 0x40004000, // PA_SC_VPORT_SCISSOR_13_BR + 0x80000000, // PA_SC_VPORT_SCISSOR_14_TL + 0x40004000, // PA_SC_VPORT_SCISSOR_14_BR + 0x80000000, // PA_SC_VPORT_SCISSOR_15_TL + 0x40004000, // PA_SC_VPORT_SCISSOR_15_BR + 0x00000000, // PA_SC_VPORT_ZMIN_0 + 0x3f800000, // PA_SC_VPORT_ZMAX_0 + 0x00000000, // PA_SC_VPORT_ZMIN_1 + 0x3f800000, // PA_SC_VPORT_ZMAX_1 + 0x00000000, // PA_SC_VPORT_ZMIN_2 + 0x3f800000, // PA_SC_VPORT_ZMAX_2 + 0x00000000, // PA_SC_VPORT_ZMIN_3 + 0x3f800000, // PA_SC_VPORT_ZMAX_3 + 0x00000000, // PA_SC_VPORT_ZMIN_4 + 0x3f800000, // PA_SC_VPORT_ZMAX_4 + 0x00000000, // PA_SC_VPORT_ZMIN_5 + 0x3f800000, // PA_SC_VPORT_ZMAX_5 + 0x00000000, // PA_SC_VPORT_ZMIN_6 + 0x3f800000, // PA_SC_VPORT_ZMAX_6 + 0x00000000, // PA_SC_VPORT_ZMIN_7 + 0x3f800000, // PA_SC_VPORT_ZMAX_7 + 0x00000000, // PA_SC_VPORT_ZMIN_8 + 0x3f800000, // PA_SC_VPORT_ZMAX_8 + 0x00000000, // PA_SC_VPORT_ZMIN_9 + 0x3f800000, // PA_SC_VPORT_ZMAX_9 + 0x00000000, // PA_SC_VPORT_ZMIN_10 + 0x3f800000, // PA_SC_VPORT_ZMAX_10 + 0x00000000, // PA_SC_VPORT_ZMIN_11 + 0x3f800000, // PA_SC_VPORT_ZMAX_11 + 0x00000000, // PA_SC_VPORT_ZMIN_12 + 0x3f800000, // PA_SC_VPORT_ZMAX_12 + 0x00000000, // PA_SC_VPORT_ZMIN_13 + 0x3f800000, // PA_SC_VPORT_ZMAX_13 + 0x00000000, // PA_SC_VPORT_ZMIN_14 + 0x3f800000, // PA_SC_VPORT_ZMAX_14 + 0x00000000, // PA_SC_VPORT_ZMIN_15 + 0x3f800000, // PA_SC_VPORT_ZMAX_15 +}; +static const unsigned int ci_SECT_CONTEXT_def_2[] = +{ + 0x00000000, // PA_SC_SCREEN_EXTENT_CONTROL + 0, // HOLE + 0x00000000, // CP_PERFMON_CNTX_CNTL + 0x00000000, // CP_RINGID + 0x00000000, // CP_VMID + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0xffffffff, // VGT_MAX_VTX_INDX + 0x00000000, // VGT_MIN_VTX_INDX + 0x00000000, // VGT_INDX_OFFSET + 0x00000000, // VGT_MULTI_PRIM_IB_RESET_INDX + 0, // HOLE + 0x00000000, // CB_BLEND_RED + 0x00000000, // CB_BLEND_GREEN + 0x00000000, // CB_BLEND_BLUE + 0x00000000, // CB_BLEND_ALPHA + 0, // HOLE + 0, // HOLE + 0x00000000, // DB_STENCIL_CONTROL + 0x00000000, // DB_STENCILREFMASK + 0x00000000, // DB_STENCILREFMASK_BF + 0, // HOLE + 0x00000000, // PA_CL_VPORT_XSCALE + 0x00000000, // PA_CL_VPORT_XOFFSET + 0x00000000, // PA_CL_VPORT_YSCALE + 0x00000000, // PA_CL_VPORT_YOFFSET + 0x00000000, // PA_CL_VPORT_ZSCALE + 0x00000000, // PA_CL_VPORT_ZOFFSET + 0x00000000, // PA_CL_VPORT_XSCALE_1 + 0x00000000, // PA_CL_VPORT_XOFFSET_1 + 0x00000000, // PA_CL_VPORT_YSCALE_1 + 0x00000000, // PA_CL_VPORT_YOFFSET_1 + 0x00000000, // PA_CL_VPORT_ZSCALE_1 + 0x00000000, // PA_CL_VPORT_ZOFFSET_1 + 0x00000000, // PA_CL_VPORT_XSCALE_2 + 0x00000000, // PA_CL_VPORT_XOFFSET_2 + 0x00000000, // PA_CL_VPORT_YSCALE_2 + 0x00000000, // PA_CL_VPORT_YOFFSET_2 + 0x00000000, // PA_CL_VPORT_ZSCALE_2 + 0x00000000, // PA_CL_VPORT_ZOFFSET_2 + 0x00000000, // PA_CL_VPORT_XSCALE_3 + 0x00000000, // PA_CL_VPORT_XOFFSET_3 + 0x00000000, // PA_CL_VPORT_YSCALE_3 + 0x00000000, // PA_CL_VPORT_YOFFSET_3 + 0x00000000, // PA_CL_VPORT_ZSCALE_3 + 0x00000000, // PA_CL_VPORT_ZOFFSET_3 + 0x00000000, // PA_CL_VPORT_XSCALE_4 + 0x00000000, // PA_CL_VPORT_XOFFSET_4 + 0x00000000, // PA_CL_VPORT_YSCALE_4 + 0x00000000, // PA_CL_VPORT_YOFFSET_4 + 0x00000000, // PA_CL_VPORT_ZSCALE_4 + 0x00000000, // PA_CL_VPORT_ZOFFSET_4 + 0x00000000, // PA_CL_VPORT_XSCALE_5 + 0x00000000, // PA_CL_VPORT_XOFFSET_5 + 0x00000000, // PA_CL_VPORT_YSCALE_5 + 0x00000000, // PA_CL_VPORT_YOFFSET_5 + 0x00000000, // PA_CL_VPORT_ZSCALE_5 + 0x00000000, // PA_CL_VPORT_ZOFFSET_5 + 0x00000000, // PA_CL_VPORT_XSCALE_6 + 0x00000000, // PA_CL_VPORT_XOFFSET_6 + 0x00000000, // PA_CL_VPORT_YSCALE_6 + 0x00000000, // PA_CL_VPORT_YOFFSET_6 + 0x00000000, // PA_CL_VPORT_ZSCALE_6 + 0x00000000, // PA_CL_VPORT_ZOFFSET_6 + 0x00000000, // PA_CL_VPORT_XSCALE_7 + 0x00000000, // PA_CL_VPORT_XOFFSET_7 + 0x00000000, // PA_CL_VPORT_YSCALE_7 + 0x00000000, // PA_CL_VPORT_YOFFSET_7 + 0x00000000, // PA_CL_VPORT_ZSCALE_7 + 0x00000000, // PA_CL_VPORT_ZOFFSET_7 + 0x00000000, // PA_CL_VPORT_XSCALE_8 + 0x00000000, // PA_CL_VPORT_XOFFSET_8 + 0x00000000, // PA_CL_VPORT_YSCALE_8 + 0x00000000, // PA_CL_VPORT_YOFFSET_8 + 0x00000000, // PA_CL_VPORT_ZSCALE_8 + 0x00000000, // PA_CL_VPORT_ZOFFSET_8 + 0x00000000, // PA_CL_VPORT_XSCALE_9 + 0x00000000, // PA_CL_VPORT_XOFFSET_9 + 0x00000000, // PA_CL_VPORT_YSCALE_9 + 0x00000000, // PA_CL_VPORT_YOFFSET_9 + 0x00000000, // PA_CL_VPORT_ZSCALE_9 + 0x00000000, // PA_CL_VPORT_ZOFFSET_9 + 0x00000000, // PA_CL_VPORT_XSCALE_10 + 0x00000000, // PA_CL_VPORT_XOFFSET_10 + 0x00000000, // PA_CL_VPORT_YSCALE_10 + 0x00000000, // PA_CL_VPORT_YOFFSET_10 + 0x00000000, // PA_CL_VPORT_ZSCALE_10 + 0x00000000, // PA_CL_VPORT_ZOFFSET_10 + 0x00000000, // PA_CL_VPORT_XSCALE_11 + 0x00000000, // PA_CL_VPORT_XOFFSET_11 + 0x00000000, // PA_CL_VPORT_YSCALE_11 + 0x00000000, // PA_CL_VPORT_YOFFSET_11 + 0x00000000, // PA_CL_VPORT_ZSCALE_11 + 0x00000000, // PA_CL_VPORT_ZOFFSET_11 + 0x00000000, // PA_CL_VPORT_XSCALE_12 + 0x00000000, // PA_CL_VPORT_XOFFSET_12 + 0x00000000, // PA_CL_VPORT_YSCALE_12 + 0x00000000, // PA_CL_VPORT_YOFFSET_12 + 0x00000000, // PA_CL_VPORT_ZSCALE_12 + 0x00000000, // PA_CL_VPORT_ZOFFSET_12 + 0x00000000, // PA_CL_VPORT_XSCALE_13 + 0x00000000, // PA_CL_VPORT_XOFFSET_13 + 0x00000000, // PA_CL_VPORT_YSCALE_13 + 0x00000000, // PA_CL_VPORT_YOFFSET_13 + 0x00000000, // PA_CL_VPORT_ZSCALE_13 + 0x00000000, // PA_CL_VPORT_ZOFFSET_13 + 0x00000000, // PA_CL_VPORT_XSCALE_14 + 0x00000000, // PA_CL_VPORT_XOFFSET_14 + 0x00000000, // PA_CL_VPORT_YSCALE_14 + 0x00000000, // PA_CL_VPORT_YOFFSET_14 + 0x00000000, // PA_CL_VPORT_ZSCALE_14 + 0x00000000, // PA_CL_VPORT_ZOFFSET_14 + 0x00000000, // PA_CL_VPORT_XSCALE_15 + 0x00000000, // PA_CL_VPORT_XOFFSET_15 + 0x00000000, // PA_CL_VPORT_YSCALE_15 + 0x00000000, // PA_CL_VPORT_YOFFSET_15 + 0x00000000, // PA_CL_VPORT_ZSCALE_15 + 0x00000000, // PA_CL_VPORT_ZOFFSET_15 + 0x00000000, // PA_CL_UCP_0_X + 0x00000000, // PA_CL_UCP_0_Y + 0x00000000, // PA_CL_UCP_0_Z + 0x00000000, // PA_CL_UCP_0_W + 0x00000000, // PA_CL_UCP_1_X + 0x00000000, // PA_CL_UCP_1_Y + 0x00000000, // PA_CL_UCP_1_Z + 0x00000000, // PA_CL_UCP_1_W + 0x00000000, // PA_CL_UCP_2_X + 0x00000000, // PA_CL_UCP_2_Y + 0x00000000, // PA_CL_UCP_2_Z + 0x00000000, // PA_CL_UCP_2_W + 0x00000000, // PA_CL_UCP_3_X + 0x00000000, // PA_CL_UCP_3_Y + 0x00000000, // PA_CL_UCP_3_Z + 0x00000000, // PA_CL_UCP_3_W + 0x00000000, // PA_CL_UCP_4_X + 0x00000000, // PA_CL_UCP_4_Y + 0x00000000, // PA_CL_UCP_4_Z + 0x00000000, // PA_CL_UCP_4_W + 0x00000000, // PA_CL_UCP_5_X + 0x00000000, // PA_CL_UCP_5_Y + 0x00000000, // PA_CL_UCP_5_Z + 0x00000000, // PA_CL_UCP_5_W + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0x00000000, // SPI_PS_INPUT_CNTL_0 + 0x00000000, // SPI_PS_INPUT_CNTL_1 + 0x00000000, // SPI_PS_INPUT_CNTL_2 + 0x00000000, // SPI_PS_INPUT_CNTL_3 + 0x00000000, // SPI_PS_INPUT_CNTL_4 + 0x00000000, // SPI_PS_INPUT_CNTL_5 + 0x00000000, // SPI_PS_INPUT_CNTL_6 + 0x00000000, // SPI_PS_INPUT_CNTL_7 + 0x00000000, // SPI_PS_INPUT_CNTL_8 + 0x00000000, // SPI_PS_INPUT_CNTL_9 + 0x00000000, // SPI_PS_INPUT_CNTL_10 + 0x00000000, // SPI_PS_INPUT_CNTL_11 + 0x00000000, // SPI_PS_INPUT_CNTL_12 + 0x00000000, // SPI_PS_INPUT_CNTL_13 + 0x00000000, // SPI_PS_INPUT_CNTL_14 + 0x00000000, // SPI_PS_INPUT_CNTL_15 + 0x00000000, // SPI_PS_INPUT_CNTL_16 + 0x00000000, // SPI_PS_INPUT_CNTL_17 + 0x00000000, // SPI_PS_INPUT_CNTL_18 + 0x00000000, // SPI_PS_INPUT_CNTL_19 + 0x00000000, // SPI_PS_INPUT_CNTL_20 + 0x00000000, // SPI_PS_INPUT_CNTL_21 + 0x00000000, // SPI_PS_INPUT_CNTL_22 + 0x00000000, // SPI_PS_INPUT_CNTL_23 + 0x00000000, // SPI_PS_INPUT_CNTL_24 + 0x00000000, // SPI_PS_INPUT_CNTL_25 + 0x00000000, // SPI_PS_INPUT_CNTL_26 + 0x00000000, // SPI_PS_INPUT_CNTL_27 + 0x00000000, // SPI_PS_INPUT_CNTL_28 + 0x00000000, // SPI_PS_INPUT_CNTL_29 + 0x00000000, // SPI_PS_INPUT_CNTL_30 + 0x00000000, // SPI_PS_INPUT_CNTL_31 + 0x00000000, // SPI_VS_OUT_CONFIG + 0, // HOLE + 0x00000000, // SPI_PS_INPUT_ENA + 0x00000000, // SPI_PS_INPUT_ADDR + 0x00000000, // SPI_INTERP_CONTROL_0 + 0x00000002, // SPI_PS_IN_CONTROL + 0, // HOLE + 0x00000000, // SPI_BARYC_CNTL + 0, // HOLE + 0x00000000, // SPI_TMPRING_SIZE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0x00000000, // SPI_SHADER_POS_FORMAT + 0x00000000, // SPI_SHADER_Z_FORMAT + 0x00000000, // SPI_SHADER_COL_FORMAT + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0x00000000, // CB_BLEND0_CONTROL + 0x00000000, // CB_BLEND1_CONTROL + 0x00000000, // CB_BLEND2_CONTROL + 0x00000000, // CB_BLEND3_CONTROL + 0x00000000, // CB_BLEND4_CONTROL + 0x00000000, // CB_BLEND5_CONTROL + 0x00000000, // CB_BLEND6_CONTROL + 0x00000000, // CB_BLEND7_CONTROL +}; +static const unsigned int ci_SECT_CONTEXT_def_3[] = +{ + 0x00000000, // PA_CL_POINT_X_RAD + 0x00000000, // PA_CL_POINT_Y_RAD + 0x00000000, // PA_CL_POINT_SIZE + 0x00000000, // PA_CL_POINT_CULL_RAD + 0x00000000, // VGT_DMA_BASE_HI + 0x00000000, // VGT_DMA_BASE +}; +static const unsigned int ci_SECT_CONTEXT_def_4[] = +{ + 0x00000000, // DB_DEPTH_CONTROL + 0x00000000, // DB_EQAA + 0x00000000, // CB_COLOR_CONTROL + 0x00000000, // DB_SHADER_CONTROL + 0x00090000, // PA_CL_CLIP_CNTL + 0x00000004, // PA_SU_SC_MODE_CNTL + 0x00000000, // PA_CL_VTE_CNTL + 0x00000000, // PA_CL_VS_OUT_CNTL + 0x00000000, // PA_CL_NANINF_CNTL + 0x00000000, // PA_SU_LINE_STIPPLE_CNTL + 0x00000000, // PA_SU_LINE_STIPPLE_SCALE + 0x00000000, // PA_SU_PRIM_FILTER_CNTL + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0x00000000, // PA_SU_POINT_SIZE + 0x00000000, // PA_SU_POINT_MINMAX + 0x00000000, // PA_SU_LINE_CNTL + 0x00000000, // PA_SC_LINE_STIPPLE + 0x00000000, // VGT_OUTPUT_PATH_CNTL + 0x00000000, // VGT_HOS_CNTL + 0x00000000, // VGT_HOS_MAX_TESS_LEVEL + 0x00000000, // VGT_HOS_MIN_TESS_LEVEL + 0x00000000, // VGT_HOS_REUSE_DEPTH + 0x00000000, // VGT_GROUP_PRIM_TYPE + 0x00000000, // VGT_GROUP_FIRST_DECR + 0x00000000, // VGT_GROUP_DECR + 0x00000000, // VGT_GROUP_VECT_0_CNTL + 0x00000000, // VGT_GROUP_VECT_1_CNTL + 0x00000000, // VGT_GROUP_VECT_0_FMT_CNTL + 0x00000000, // VGT_GROUP_VECT_1_FMT_CNTL + 0x00000000, // VGT_GS_MODE + 0x00000000, // VGT_GS_ONCHIP_CNTL + 0x00000000, // PA_SC_MODE_CNTL_0 + 0x00000000, // PA_SC_MODE_CNTL_1 + 0x00000000, // VGT_ENHANCE + 0x00000100, // VGT_GS_PER_ES + 0x00000080, // VGT_ES_PER_GS + 0x00000002, // VGT_GS_PER_VS + 0x00000000, // VGT_GSVS_RING_OFFSET_1 + 0x00000000, // VGT_GSVS_RING_OFFSET_2 + 0x00000000, // VGT_GSVS_RING_OFFSET_3 + 0x00000000, // VGT_GS_OUT_PRIM_TYPE + 0x00000000, // IA_ENHANCE +}; +static const unsigned int ci_SECT_CONTEXT_def_5[] = +{ + 0x00000000, // WD_ENHANCE + 0x00000000, // VGT_PRIMITIVEID_EN +}; +static const unsigned int ci_SECT_CONTEXT_def_6[] = +{ + 0x00000000, // VGT_PRIMITIVEID_RESET +}; +static const unsigned int ci_SECT_CONTEXT_def_7[] = +{ + 0x00000000, // VGT_MULTI_PRIM_IB_RESET_EN + 0, // HOLE + 0, // HOLE + 0x00000000, // VGT_INSTANCE_STEP_RATE_0 + 0x00000000, // VGT_INSTANCE_STEP_RATE_1 + 0x000000ff, // IA_MULTI_VGT_PARAM + 0x00000000, // VGT_ESGS_RING_ITEMSIZE + 0x00000000, // VGT_GSVS_RING_ITEMSIZE + 0x00000000, // VGT_REUSE_OFF + 0x00000000, // VGT_VTX_CNT_EN + 0x00000000, // DB_HTILE_SURFACE + 0x00000000, // DB_SRESULTS_COMPARE_STATE0 + 0x00000000, // DB_SRESULTS_COMPARE_STATE1 + 0x00000000, // DB_PRELOAD_CONTROL + 0, // HOLE + 0x00000000, // VGT_STRMOUT_BUFFER_SIZE_0 + 0x00000000, // VGT_STRMOUT_VTX_STRIDE_0 + 0, // HOLE + 0x00000000, // VGT_STRMOUT_BUFFER_OFFSET_0 + 0x00000000, // VGT_STRMOUT_BUFFER_SIZE_1 + 0x00000000, // VGT_STRMOUT_VTX_STRIDE_1 + 0, // HOLE + 0x00000000, // VGT_STRMOUT_BUFFER_OFFSET_1 + 0x00000000, // VGT_STRMOUT_BUFFER_SIZE_2 + 0x00000000, // VGT_STRMOUT_VTX_STRIDE_2 + 0, // HOLE + 0x00000000, // VGT_STRMOUT_BUFFER_OFFSET_2 + 0x00000000, // VGT_STRMOUT_BUFFER_SIZE_3 + 0x00000000, // VGT_STRMOUT_VTX_STRIDE_3 + 0, // HOLE + 0x00000000, // VGT_STRMOUT_BUFFER_OFFSET_3 + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0x00000000, // VGT_STRMOUT_DRAW_OPAQUE_OFFSET + 0x00000000, // VGT_STRMOUT_DRAW_OPAQUE_BUFFER_FILLED_SIZE + 0x00000000, // VGT_STRMOUT_DRAW_OPAQUE_VERTEX_STRIDE + 0, // HOLE + 0x00000000, // VGT_GS_MAX_VERT_OUT + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0x00000000, // VGT_SHADER_STAGES_EN + 0x00000000, // VGT_LS_HS_CONFIG + 0x00000000, // VGT_GS_VERT_ITEMSIZE + 0x00000000, // VGT_GS_VERT_ITEMSIZE_1 + 0x00000000, // VGT_GS_VERT_ITEMSIZE_2 + 0x00000000, // VGT_GS_VERT_ITEMSIZE_3 + 0x00000000, // VGT_TF_PARAM + 0x00000000, // DB_ALPHA_TO_MASK + 0, // HOLE + 0x00000000, // PA_SU_POLY_OFFSET_DB_FMT_CNTL + 0x00000000, // PA_SU_POLY_OFFSET_CLAMP + 0x00000000, // PA_SU_POLY_OFFSET_FRONT_SCALE + 0x00000000, // PA_SU_POLY_OFFSET_FRONT_OFFSET + 0x00000000, // PA_SU_POLY_OFFSET_BACK_SCALE + 0x00000000, // PA_SU_POLY_OFFSET_BACK_OFFSET + 0x00000000, // VGT_GS_INSTANCE_CNT + 0x00000000, // VGT_STRMOUT_CONFIG + 0x00000000, // VGT_STRMOUT_BUFFER_CONFIG + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0x00000000, // PA_SC_CENTROID_PRIORITY_0 + 0x00000000, // PA_SC_CENTROID_PRIORITY_1 + 0x00001000, // PA_SC_LINE_CNTL + 0x00000000, // PA_SC_AA_CONFIG + 0x00000005, // PA_SU_VTX_CNTL + 0x3f800000, // PA_CL_GB_VERT_CLIP_ADJ + 0x3f800000, // PA_CL_GB_VERT_DISC_ADJ + 0x3f800000, // PA_CL_GB_HORZ_CLIP_ADJ + 0x3f800000, // PA_CL_GB_HORZ_DISC_ADJ + 0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0 + 0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_1 + 0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_2 + 0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_3 + 0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_0 + 0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_1 + 0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_2 + 0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_3 + 0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_0 + 0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_1 + 0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_2 + 0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_3 + 0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_0 + 0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_1 + 0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_2 + 0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_3 + 0xffffffff, // PA_SC_AA_MASK_X0Y0_X1Y0 + 0xffffffff, // PA_SC_AA_MASK_X0Y1_X1Y1 + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0x0000000e, // VGT_VERTEX_REUSE_BLOCK_CNTL + 0x00000010, // VGT_OUT_DEALLOC_CNTL + 0x00000000, // CB_COLOR0_BASE + 0x00000000, // CB_COLOR0_PITCH + 0x00000000, // CB_COLOR0_SLICE + 0x00000000, // CB_COLOR0_VIEW + 0x00000000, // CB_COLOR0_INFO + 0x00000000, // CB_COLOR0_ATTRIB + 0, // HOLE + 0x00000000, // CB_COLOR0_CMASK + 0x00000000, // CB_COLOR0_CMASK_SLICE + 0x00000000, // CB_COLOR0_FMASK + 0x00000000, // CB_COLOR0_FMASK_SLICE + 0x00000000, // CB_COLOR0_CLEAR_WORD0 + 0x00000000, // CB_COLOR0_CLEAR_WORD1 + 0, // HOLE + 0, // HOLE + 0x00000000, // CB_COLOR1_BASE + 0x00000000, // CB_COLOR1_PITCH + 0x00000000, // CB_COLOR1_SLICE + 0x00000000, // CB_COLOR1_VIEW + 0x00000000, // CB_COLOR1_INFO + 0x00000000, // CB_COLOR1_ATTRIB + 0, // HOLE + 0x00000000, // CB_COLOR1_CMASK + 0x00000000, // CB_COLOR1_CMASK_SLICE + 0x00000000, // CB_COLOR1_FMASK + 0x00000000, // CB_COLOR1_FMASK_SLICE + 0x00000000, // CB_COLOR1_CLEAR_WORD0 + 0x00000000, // CB_COLOR1_CLEAR_WORD1 + 0, // HOLE + 0, // HOLE + 0x00000000, // CB_COLOR2_BASE + 0x00000000, // CB_COLOR2_PITCH + 0x00000000, // CB_COLOR2_SLICE + 0x00000000, // CB_COLOR2_VIEW + 0x00000000, // CB_COLOR2_INFO + 0x00000000, // CB_COLOR2_ATTRIB + 0, // HOLE + 0x00000000, // CB_COLOR2_CMASK + 0x00000000, // CB_COLOR2_CMASK_SLICE + 0x00000000, // CB_COLOR2_FMASK + 0x00000000, // CB_COLOR2_FMASK_SLICE + 0x00000000, // CB_COLOR2_CLEAR_WORD0 + 0x00000000, // CB_COLOR2_CLEAR_WORD1 + 0, // HOLE + 0, // HOLE + 0x00000000, // CB_COLOR3_BASE + 0x00000000, // CB_COLOR3_PITCH + 0x00000000, // CB_COLOR3_SLICE + 0x00000000, // CB_COLOR3_VIEW + 0x00000000, // CB_COLOR3_INFO + 0x00000000, // CB_COLOR3_ATTRIB + 0, // HOLE + 0x00000000, // CB_COLOR3_CMASK + 0x00000000, // CB_COLOR3_CMASK_SLICE + 0x00000000, // CB_COLOR3_FMASK + 0x00000000, // CB_COLOR3_FMASK_SLICE + 0x00000000, // CB_COLOR3_CLEAR_WORD0 + 0x00000000, // CB_COLOR3_CLEAR_WORD1 + 0, // HOLE + 0, // HOLE + 0x00000000, // CB_COLOR4_BASE + 0x00000000, // CB_COLOR4_PITCH + 0x00000000, // CB_COLOR4_SLICE + 0x00000000, // CB_COLOR4_VIEW + 0x00000000, // CB_COLOR4_INFO + 0x00000000, // CB_COLOR4_ATTRIB + 0, // HOLE + 0x00000000, // CB_COLOR4_CMASK + 0x00000000, // CB_COLOR4_CMASK_SLICE + 0x00000000, // CB_COLOR4_FMASK + 0x00000000, // CB_COLOR4_FMASK_SLICE + 0x00000000, // CB_COLOR4_CLEAR_WORD0 + 0x00000000, // CB_COLOR4_CLEAR_WORD1 + 0, // HOLE + 0, // HOLE + 0x00000000, // CB_COLOR5_BASE + 0x00000000, // CB_COLOR5_PITCH + 0x00000000, // CB_COLOR5_SLICE + 0x00000000, // CB_COLOR5_VIEW + 0x00000000, // CB_COLOR5_INFO + 0x00000000, // CB_COLOR5_ATTRIB + 0, // HOLE + 0x00000000, // CB_COLOR5_CMASK + 0x00000000, // CB_COLOR5_CMASK_SLICE + 0x00000000, // CB_COLOR5_FMASK + 0x00000000, // CB_COLOR5_FMASK_SLICE + 0x00000000, // CB_COLOR5_CLEAR_WORD0 + 0x00000000, // CB_COLOR5_CLEAR_WORD1 + 0, // HOLE + 0, // HOLE + 0x00000000, // CB_COLOR6_BASE + 0x00000000, // CB_COLOR6_PITCH + 0x00000000, // CB_COLOR6_SLICE + 0x00000000, // CB_COLOR6_VIEW + 0x00000000, // CB_COLOR6_INFO + 0x00000000, // CB_COLOR6_ATTRIB + 0, // HOLE + 0x00000000, // CB_COLOR6_CMASK + 0x00000000, // CB_COLOR6_CMASK_SLICE + 0x00000000, // CB_COLOR6_FMASK + 0x00000000, // CB_COLOR6_FMASK_SLICE + 0x00000000, // CB_COLOR6_CLEAR_WORD0 + 0x00000000, // CB_COLOR6_CLEAR_WORD1 + 0, // HOLE + 0, // HOLE + 0x00000000, // CB_COLOR7_BASE + 0x00000000, // CB_COLOR7_PITCH + 0x00000000, // CB_COLOR7_SLICE + 0x00000000, // CB_COLOR7_VIEW + 0x00000000, // CB_COLOR7_INFO + 0x00000000, // CB_COLOR7_ATTRIB + 0, // HOLE + 0x00000000, // CB_COLOR7_CMASK + 0x00000000, // CB_COLOR7_CMASK_SLICE + 0x00000000, // CB_COLOR7_FMASK + 0x00000000, // CB_COLOR7_FMASK_SLICE + 0x00000000, // CB_COLOR7_CLEAR_WORD0 + 0x00000000, // CB_COLOR7_CLEAR_WORD1 +}; +static const struct cs_extent_def ci_SECT_CONTEXT_defs[] = +{ + {ci_SECT_CONTEXT_def_1, 0x0000a000, 212 }, + {ci_SECT_CONTEXT_def_2, 0x0000a0d6, 274 }, + {ci_SECT_CONTEXT_def_3, 0x0000a1f5, 6 }, + {ci_SECT_CONTEXT_def_4, 0x0000a200, 157 }, + {ci_SECT_CONTEXT_def_5, 0x0000a2a0, 2 }, + {ci_SECT_CONTEXT_def_6, 0x0000a2a3, 1 }, + {ci_SECT_CONTEXT_def_7, 0x0000a2a5, 233 }, + { 0, 0, 0 } +}; +static const struct cs_section_def ci_cs_data[] = { + { ci_SECT_CONTEXT_defs, SECT_CONTEXT }, + { 0, SECT_NONE } +}; diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c new file mode 100644 index 000000000000..c1bc6935c88e --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c @@ -0,0 +1,3830 @@ +/* + * Copyright 2014 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#include "drmP.h" +#include "amdgpu.h" +#include "amdgpu_pm.h" +#include "amdgpu_i2c.h" +#include "cikd.h" +#include "atom.h" +#include "amdgpu_atombios.h" +#include "atombios_crtc.h" +#include "atombios_encoders.h" +#include "amdgpu_pll.h" +#include "amdgpu_connectors.h" + +#include "dce/dce_8_0_d.h" +#include "dce/dce_8_0_sh_mask.h" + +#include "gca/gfx_7_2_enum.h" + +#include "gmc/gmc_7_1_d.h" +#include "gmc/gmc_7_1_sh_mask.h" + +#include "oss/oss_2_0_d.h" +#include "oss/oss_2_0_sh_mask.h" + +static void dce_v8_0_set_display_funcs(struct amdgpu_device *adev); +static void dce_v8_0_set_irq_funcs(struct amdgpu_device *adev); + +static const u32 crtc_offsets[6] = +{ + CRTC0_REGISTER_OFFSET, + CRTC1_REGISTER_OFFSET, + CRTC2_REGISTER_OFFSET, + CRTC3_REGISTER_OFFSET, + CRTC4_REGISTER_OFFSET, + CRTC5_REGISTER_OFFSET +}; + +static const uint32_t dig_offsets[] = { + CRTC0_REGISTER_OFFSET, + CRTC1_REGISTER_OFFSET, + CRTC2_REGISTER_OFFSET, + CRTC3_REGISTER_OFFSET, + CRTC4_REGISTER_OFFSET, + CRTC5_REGISTER_OFFSET, + (0x13830 - 0x7030) >> 2, +}; + +static const struct { + uint32_t reg; + uint32_t vblank; + uint32_t vline; + uint32_t hpd; + +} interrupt_status_offsets[6] = { { + .reg = mmDISP_INTERRUPT_STATUS, + .vblank = DISP_INTERRUPT_STATUS__LB_D1_VBLANK_INTERRUPT_MASK, + .vline = DISP_INTERRUPT_STATUS__LB_D1_VLINE_INTERRUPT_MASK, + .hpd = DISP_INTERRUPT_STATUS__DC_HPD1_INTERRUPT_MASK +}, { + .reg = mmDISP_INTERRUPT_STATUS_CONTINUE, + .vblank = DISP_INTERRUPT_STATUS_CONTINUE__LB_D2_VBLANK_INTERRUPT_MASK, + .vline = DISP_INTERRUPT_STATUS_CONTINUE__LB_D2_VLINE_INTERRUPT_MASK, + .hpd = DISP_INTERRUPT_STATUS_CONTINUE__DC_HPD2_INTERRUPT_MASK +}, { + .reg = mmDISP_INTERRUPT_STATUS_CONTINUE2, + .vblank = DISP_INTERRUPT_STATUS_CONTINUE2__LB_D3_VBLANK_INTERRUPT_MASK, + .vline = DISP_INTERRUPT_STATUS_CONTINUE2__LB_D3_VLINE_INTERRUPT_MASK, + .hpd = DISP_INTERRUPT_STATUS_CONTINUE2__DC_HPD3_INTERRUPT_MASK +}, { + .reg = mmDISP_INTERRUPT_STATUS_CONTINUE3, + .vblank = DISP_INTERRUPT_STATUS_CONTINUE3__LB_D4_VBLANK_INTERRUPT_MASK, + .vline = DISP_INTERRUPT_STATUS_CONTINUE3__LB_D4_VLINE_INTERRUPT_MASK, + .hpd = DISP_INTERRUPT_STATUS_CONTINUE3__DC_HPD4_INTERRUPT_MASK +}, { + .reg = mmDISP_INTERRUPT_STATUS_CONTINUE4, + .vblank = DISP_INTERRUPT_STATUS_CONTINUE4__LB_D5_VBLANK_INTERRUPT_MASK, + .vline = DISP_INTERRUPT_STATUS_CONTINUE4__LB_D5_VLINE_INTERRUPT_MASK, + .hpd = DISP_INTERRUPT_STATUS_CONTINUE4__DC_HPD5_INTERRUPT_MASK +}, { + .reg = mmDISP_INTERRUPT_STATUS_CONTINUE5, + .vblank = DISP_INTERRUPT_STATUS_CONTINUE5__LB_D6_VBLANK_INTERRUPT_MASK, + .vline = DISP_INTERRUPT_STATUS_CONTINUE5__LB_D6_VLINE_INTERRUPT_MASK, + .hpd = DISP_INTERRUPT_STATUS_CONTINUE5__DC_HPD6_INTERRUPT_MASK +} }; + +static const uint32_t hpd_int_control_offsets[6] = { + mmDC_HPD1_INT_CONTROL, + mmDC_HPD2_INT_CONTROL, + mmDC_HPD3_INT_CONTROL, + mmDC_HPD4_INT_CONTROL, + mmDC_HPD5_INT_CONTROL, + mmDC_HPD6_INT_CONTROL, +}; + +static u32 dce_v8_0_audio_endpt_rreg(struct amdgpu_device *adev, + u32 block_offset, u32 reg) +{ + unsigned long flags; + u32 r; + + spin_lock_irqsave(&adev->audio_endpt_idx_lock, flags); + WREG32(mmAZALIA_F0_CODEC_ENDPOINT_INDEX + block_offset, reg); + r = RREG32(mmAZALIA_F0_CODEC_ENDPOINT_DATA + block_offset); + spin_unlock_irqrestore(&adev->audio_endpt_idx_lock, flags); + + return r; +} + +static void dce_v8_0_audio_endpt_wreg(struct amdgpu_device *adev, + u32 block_offset, u32 reg, u32 v) +{ + unsigned long flags; + + spin_lock_irqsave(&adev->audio_endpt_idx_lock, flags); + WREG32(mmAZALIA_F0_CODEC_ENDPOINT_INDEX + block_offset, reg); + WREG32(mmAZALIA_F0_CODEC_ENDPOINT_DATA + block_offset, v); + spin_unlock_irqrestore(&adev->audio_endpt_idx_lock, flags); +} + +static bool dce_v8_0_is_in_vblank(struct amdgpu_device *adev, int crtc) +{ + if (RREG32(mmCRTC_STATUS + crtc_offsets[crtc]) & + CRTC_V_BLANK_START_END__CRTC_V_BLANK_START_MASK) + return true; + else + return false; +} + +static bool dce_v8_0_is_counter_moving(struct amdgpu_device *adev, int crtc) +{ + u32 pos1, pos2; + + pos1 = RREG32(mmCRTC_STATUS_POSITION + crtc_offsets[crtc]); + pos2 = RREG32(mmCRTC_STATUS_POSITION + crtc_offsets[crtc]); + + if (pos1 != pos2) + return true; + else + return false; +} + +/** + * dce_v8_0_vblank_wait - vblank wait asic callback. + * + * @adev: amdgpu_device pointer + * @crtc: crtc to wait for vblank on + * + * Wait for vblank on the requested crtc (evergreen+). + */ +static void dce_v8_0_vblank_wait(struct amdgpu_device *adev, int crtc) +{ + unsigned i = 0; + + if (crtc >= adev->mode_info.num_crtc) + return; + + if (!(RREG32(mmCRTC_CONTROL + crtc_offsets[crtc]) & CRTC_CONTROL__CRTC_MASTER_EN_MASK)) + return; + + /* depending on when we hit vblank, we may be close to active; if so, + * wait for another frame. + */ + while (dce_v8_0_is_in_vblank(adev, crtc)) { + if (i++ % 100 == 0) { + if (!dce_v8_0_is_counter_moving(adev, crtc)) + break; + } + } + + while (!dce_v8_0_is_in_vblank(adev, crtc)) { + if (i++ % 100 == 0) { + if (!dce_v8_0_is_counter_moving(adev, crtc)) + break; + } + } +} + +static u32 dce_v8_0_vblank_get_counter(struct amdgpu_device *adev, int crtc) +{ + if (crtc >= adev->mode_info.num_crtc) + return 0; + else + return RREG32(mmCRTC_STATUS_FRAME_COUNT + crtc_offsets[crtc]); +} + +/** + * dce_v8_0_page_flip - pageflip callback. + * + * @adev: amdgpu_device pointer + * @crtc_id: crtc to cleanup pageflip on + * @crtc_base: new address of the crtc (GPU MC address) + * + * Does the actual pageflip (evergreen+). + * During vblank we take the crtc lock and wait for the update_pending + * bit to go high, when it does, we release the lock, and allow the + * double buffered update to take place. + * Returns the current update pending status. + */ +static void dce_v8_0_page_flip(struct amdgpu_device *adev, + int crtc_id, u64 crtc_base) +{ + struct amdgpu_crtc *amdgpu_crtc = adev->mode_info.crtcs[crtc_id]; + u32 tmp = RREG32(mmGRPH_UPDATE + amdgpu_crtc->crtc_offset); + int i; + + /* Lock the graphics update lock */ + tmp |= GRPH_UPDATE__GRPH_UPDATE_LOCK_MASK; + WREG32(mmGRPH_UPDATE + amdgpu_crtc->crtc_offset, tmp); + + /* update the scanout addresses */ + WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset, + upper_32_bits(crtc_base)); + WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset, + (u32)crtc_base); + + WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset, + upper_32_bits(crtc_base)); + WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset, + (u32)crtc_base); + + /* Wait for update_pending to go high. */ + for (i = 0; i < adev->usec_timeout; i++) { + if (RREG32(mmGRPH_UPDATE + amdgpu_crtc->crtc_offset) & + GRPH_UPDATE__GRPH_SURFACE_UPDATE_PENDING_MASK) + break; + udelay(1); + } + DRM_DEBUG("Update pending now high. Unlocking vupdate_lock.\n"); + + /* Unlock the lock, so double-buffering can take place inside vblank */ + tmp &= ~GRPH_UPDATE__GRPH_UPDATE_LOCK_MASK; + WREG32(mmGRPH_UPDATE + amdgpu_crtc->crtc_offset, tmp); +} + +static int dce_v8_0_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc, + u32 *vbl, u32 *position) +{ + if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc)) + return -EINVAL; + + *vbl = RREG32(mmCRTC_V_BLANK_START_END + crtc_offsets[crtc]); + *position = RREG32(mmCRTC_STATUS_POSITION + crtc_offsets[crtc]); + + return 0; +} + +/** + * dce_v8_0_hpd_sense - hpd sense callback. + * + * @adev: amdgpu_device pointer + * @hpd: hpd (hotplug detect) pin + * + * Checks if a digital monitor is connected (evergreen+). + * Returns true if connected, false if not connected. + */ +static bool dce_v8_0_hpd_sense(struct amdgpu_device *adev, + enum amdgpu_hpd_id hpd) +{ + bool connected = false; + + switch (hpd) { + case AMDGPU_HPD_1: + if (RREG32(mmDC_HPD1_INT_STATUS) & DC_HPD1_INT_STATUS__DC_HPD1_SENSE_MASK) + connected = true; + break; + case AMDGPU_HPD_2: + if (RREG32(mmDC_HPD2_INT_STATUS) & DC_HPD2_INT_STATUS__DC_HPD2_SENSE_MASK) + connected = true; + break; + case AMDGPU_HPD_3: + if (RREG32(mmDC_HPD3_INT_STATUS) & DC_HPD3_INT_STATUS__DC_HPD3_SENSE_MASK) + connected = true; + break; + case AMDGPU_HPD_4: + if (RREG32(mmDC_HPD4_INT_STATUS) & DC_HPD4_INT_STATUS__DC_HPD4_SENSE_MASK) + connected = true; + break; + case AMDGPU_HPD_5: + if (RREG32(mmDC_HPD5_INT_STATUS) & DC_HPD5_INT_STATUS__DC_HPD5_SENSE_MASK) + connected = true; + break; + case AMDGPU_HPD_6: + if (RREG32(mmDC_HPD6_INT_STATUS) & DC_HPD6_INT_STATUS__DC_HPD6_SENSE_MASK) + connected = true; + break; + default: + break; + } + + return connected; +} + +/** + * dce_v8_0_hpd_set_polarity - hpd set polarity callback. + * + * @adev: amdgpu_device pointer + * @hpd: hpd (hotplug detect) pin + * + * Set the polarity of the hpd pin (evergreen+). + */ +static void dce_v8_0_hpd_set_polarity(struct amdgpu_device *adev, + enum amdgpu_hpd_id hpd) +{ + u32 tmp; + bool connected = dce_v8_0_hpd_sense(adev, hpd); + + switch (hpd) { + case AMDGPU_HPD_1: + tmp = RREG32(mmDC_HPD1_INT_CONTROL); + if (connected) + tmp &= ~DC_HPD1_INT_CONTROL__DC_HPD1_INT_POLARITY_MASK; + else + tmp |= DC_HPD1_INT_CONTROL__DC_HPD1_INT_POLARITY_MASK; + WREG32(mmDC_HPD1_INT_CONTROL, tmp); + break; + case AMDGPU_HPD_2: + tmp = RREG32(mmDC_HPD2_INT_CONTROL); + if (connected) + tmp &= ~DC_HPD2_INT_CONTROL__DC_HPD2_INT_POLARITY_MASK; + else + tmp |= DC_HPD2_INT_CONTROL__DC_HPD2_INT_POLARITY_MASK; + WREG32(mmDC_HPD2_INT_CONTROL, tmp); + break; + case AMDGPU_HPD_3: + tmp = RREG32(mmDC_HPD3_INT_CONTROL); + if (connected) + tmp &= ~DC_HPD3_INT_CONTROL__DC_HPD3_INT_POLARITY_MASK; + else + tmp |= DC_HPD3_INT_CONTROL__DC_HPD3_INT_POLARITY_MASK; + WREG32(mmDC_HPD3_INT_CONTROL, tmp); + break; + case AMDGPU_HPD_4: + tmp = RREG32(mmDC_HPD4_INT_CONTROL); + if (connected) + tmp &= ~DC_HPD4_INT_CONTROL__DC_HPD4_INT_POLARITY_MASK; + else + tmp |= DC_HPD4_INT_CONTROL__DC_HPD4_INT_POLARITY_MASK; + WREG32(mmDC_HPD4_INT_CONTROL, tmp); + break; + case AMDGPU_HPD_5: + tmp = RREG32(mmDC_HPD5_INT_CONTROL); + if (connected) + tmp &= ~DC_HPD5_INT_CONTROL__DC_HPD5_INT_POLARITY_MASK; + else + tmp |= DC_HPD5_INT_CONTROL__DC_HPD5_INT_POLARITY_MASK; + WREG32(mmDC_HPD5_INT_CONTROL, tmp); + break; + case AMDGPU_HPD_6: + tmp = RREG32(mmDC_HPD6_INT_CONTROL); + if (connected) + tmp &= ~DC_HPD6_INT_CONTROL__DC_HPD6_INT_POLARITY_MASK; + else + tmp |= DC_HPD6_INT_CONTROL__DC_HPD6_INT_POLARITY_MASK; + WREG32(mmDC_HPD6_INT_CONTROL, tmp); + break; + default: + break; + } +} + +/** + * dce_v8_0_hpd_init - hpd setup callback. + * + * @adev: amdgpu_device pointer + * + * Setup the hpd pins used by the card (evergreen+). + * Enable the pin, set the polarity, and enable the hpd interrupts. + */ +static void dce_v8_0_hpd_init(struct amdgpu_device *adev) +{ + struct drm_device *dev = adev->ddev; + struct drm_connector *connector; + u32 tmp = (0x9c4 << DC_HPD1_CONTROL__DC_HPD1_CONNECTION_TIMER__SHIFT) | + (0xfa << DC_HPD1_CONTROL__DC_HPD1_RX_INT_TIMER__SHIFT) | + DC_HPD1_CONTROL__DC_HPD1_EN_MASK; + + list_for_each_entry(connector, &dev->mode_config.connector_list, head) { + struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector); + + if (connector->connector_type == DRM_MODE_CONNECTOR_eDP || + connector->connector_type == DRM_MODE_CONNECTOR_LVDS) { + /* don't try to enable hpd on eDP or LVDS avoid breaking the + * aux dp channel on imac and help (but not completely fix) + * https://bugzilla.redhat.com/show_bug.cgi?id=726143 + * also avoid interrupt storms during dpms. + */ + continue; + } + switch (amdgpu_connector->hpd.hpd) { + case AMDGPU_HPD_1: + WREG32(mmDC_HPD1_CONTROL, tmp); + break; + case AMDGPU_HPD_2: + WREG32(mmDC_HPD2_CONTROL, tmp); + break; + case AMDGPU_HPD_3: + WREG32(mmDC_HPD3_CONTROL, tmp); + break; + case AMDGPU_HPD_4: + WREG32(mmDC_HPD4_CONTROL, tmp); + break; + case AMDGPU_HPD_5: + WREG32(mmDC_HPD5_CONTROL, tmp); + break; + case AMDGPU_HPD_6: + WREG32(mmDC_HPD6_CONTROL, tmp); + break; + default: + break; + } + dce_v8_0_hpd_set_polarity(adev, amdgpu_connector->hpd.hpd); + amdgpu_irq_get(adev, &adev->hpd_irq, amdgpu_connector->hpd.hpd); + } +} + +/** + * dce_v8_0_hpd_fini - hpd tear down callback. + * + * @adev: amdgpu_device pointer + * + * Tear down the hpd pins used by the card (evergreen+). + * Disable the hpd interrupts. + */ +static void dce_v8_0_hpd_fini(struct amdgpu_device *adev) +{ + struct drm_device *dev = adev->ddev; + struct drm_connector *connector; + + list_for_each_entry(connector, &dev->mode_config.connector_list, head) { + struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector); + + switch (amdgpu_connector->hpd.hpd) { + case AMDGPU_HPD_1: + WREG32(mmDC_HPD1_CONTROL, 0); + break; + case AMDGPU_HPD_2: + WREG32(mmDC_HPD2_CONTROL, 0); + break; + case AMDGPU_HPD_3: + WREG32(mmDC_HPD3_CONTROL, 0); + break; + case AMDGPU_HPD_4: + WREG32(mmDC_HPD4_CONTROL, 0); + break; + case AMDGPU_HPD_5: + WREG32(mmDC_HPD5_CONTROL, 0); + break; + case AMDGPU_HPD_6: + WREG32(mmDC_HPD6_CONTROL, 0); + break; + default: + break; + } + amdgpu_irq_put(adev, &adev->hpd_irq, amdgpu_connector->hpd.hpd); + } +} + +static u32 dce_v8_0_hpd_get_gpio_reg(struct amdgpu_device *adev) +{ + return mmDC_GPIO_HPD_A; +} + +static bool dce_v8_0_is_display_hung(struct amdgpu_device *adev) +{ + u32 crtc_hung = 0; + u32 crtc_status[6]; + u32 i, j, tmp; + + for (i = 0; i < adev->mode_info.num_crtc; i++) { + if (RREG32(mmCRTC_CONTROL + crtc_offsets[i]) & CRTC_CONTROL__CRTC_MASTER_EN_MASK) { + crtc_status[i] = RREG32(mmCRTC_STATUS_HV_COUNT + crtc_offsets[i]); + crtc_hung |= (1 << i); + } + } + + for (j = 0; j < 10; j++) { + for (i = 0; i < adev->mode_info.num_crtc; i++) { + if (crtc_hung & (1 << i)) { + tmp = RREG32(mmCRTC_STATUS_HV_COUNT + crtc_offsets[i]); + if (tmp != crtc_status[i]) + crtc_hung &= ~(1 << i); + } + } + if (crtc_hung == 0) + return false; + udelay(100); + } + + return true; +} + +static void dce_v8_0_stop_mc_access(struct amdgpu_device *adev, + struct amdgpu_mode_mc_save *save) +{ + u32 crtc_enabled, tmp; + int i; + + save->vga_render_control = RREG32(mmVGA_RENDER_CONTROL); + save->vga_hdp_control = RREG32(mmVGA_HDP_CONTROL); + + /* disable VGA render */ + tmp = RREG32(mmVGA_RENDER_CONTROL); + tmp = REG_SET_FIELD(tmp, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 0); + WREG32(mmVGA_RENDER_CONTROL, tmp); + + /* blank the display controllers */ + for (i = 0; i < adev->mode_info.num_crtc; i++) { + crtc_enabled = REG_GET_FIELD(RREG32(mmCRTC_CONTROL + crtc_offsets[i]), + CRTC_CONTROL, CRTC_MASTER_EN); + if (crtc_enabled) { +#if 0 + u32 frame_count; + int j; + + save->crtc_enabled[i] = true; + tmp = RREG32(mmCRTC_BLANK_CONTROL + crtc_offsets[i]); + if (REG_GET_FIELD(tmp, CRTC_BLANK_CONTROL, CRTC_BLANK_DATA_EN) == 0) { + amdgpu_display_vblank_wait(adev, i); + WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 1); + tmp = REG_SET_FIELD(tmp, CRTC_BLANK_CONTROL, CRTC_BLANK_DATA_EN, 1); + WREG32(mmCRTC_BLANK_CONTROL + crtc_offsets[i], tmp); + WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 0); + } + /* wait for the next frame */ + frame_count = amdgpu_display_vblank_get_counter(adev, i); + for (j = 0; j < adev->usec_timeout; j++) { + if (amdgpu_display_vblank_get_counter(adev, i) != frame_count) + break; + udelay(1); + } + tmp = RREG32(mmGRPH_UPDATE + crtc_offsets[i]); + if (REG_GET_FIELD(tmp, GRPH_UPDATE, GRPH_UPDATE_LOCK) == 0) { + tmp = REG_SET_FIELD(tmp, GRPH_UPDATE, GRPH_UPDATE_LOCK, 1); + WREG32(mmGRPH_UPDATE + crtc_offsets[i], tmp); + } + tmp = RREG32(mmMASTER_UPDATE_LOCK + crtc_offsets[i]); + if (REG_GET_FIELD(tmp, MASTER_UPDATE_LOCK, MASTER_UPDATE_LOCK) == 0) { + tmp = REG_SET_FIELD(tmp, MASTER_UPDATE_LOCK, MASTER_UPDATE_LOCK, 1); + WREG32(mmMASTER_UPDATE_LOCK + crtc_offsets[i], tmp); + } +#else + /* XXX this is a hack to avoid strange behavior with EFI on certain systems */ + WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 1); + tmp = RREG32(mmCRTC_CONTROL + crtc_offsets[i]); + tmp = REG_SET_FIELD(tmp, CRTC_CONTROL, CRTC_MASTER_EN, 0); + WREG32(mmCRTC_CONTROL + crtc_offsets[i], tmp); + WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 0); + save->crtc_enabled[i] = false; + /* ***** */ +#endif + } else { + save->crtc_enabled[i] = false; + } + } +} + +static void dce_v8_0_resume_mc_access(struct amdgpu_device *adev, + struct amdgpu_mode_mc_save *save) +{ + u32 tmp, frame_count; + int i, j; + + /* update crtc base addresses */ + for (i = 0; i < adev->mode_info.num_crtc; i++) { + WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + crtc_offsets[i], + upper_32_bits(adev->mc.vram_start)); + WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS_HIGH + crtc_offsets[i], + upper_32_bits(adev->mc.vram_start)); + WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + crtc_offsets[i], + (u32)adev->mc.vram_start); + WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS + crtc_offsets[i], + (u32)adev->mc.vram_start); + + if (save->crtc_enabled[i]) { + tmp = RREG32(mmMASTER_UPDATE_MODE + crtc_offsets[i]); + if (REG_GET_FIELD(tmp, MASTER_UPDATE_MODE, MASTER_UPDATE_MODE) != 3) { + tmp = REG_SET_FIELD(tmp, MASTER_UPDATE_MODE, MASTER_UPDATE_MODE, 3); + WREG32(mmMASTER_UPDATE_MODE + crtc_offsets[i], tmp); + } + tmp = RREG32(mmGRPH_UPDATE + crtc_offsets[i]); + if (REG_GET_FIELD(tmp, GRPH_UPDATE, GRPH_UPDATE_LOCK)) { + tmp = REG_SET_FIELD(tmp, GRPH_UPDATE, GRPH_UPDATE_LOCK, 0); + WREG32(mmGRPH_UPDATE + crtc_offsets[i], tmp); + } + tmp = RREG32(mmMASTER_UPDATE_LOCK + crtc_offsets[i]); + if (REG_GET_FIELD(tmp, MASTER_UPDATE_LOCK, MASTER_UPDATE_LOCK)) { + tmp = REG_SET_FIELD(tmp, MASTER_UPDATE_LOCK, MASTER_UPDATE_LOCK, 0); + WREG32(mmMASTER_UPDATE_LOCK + crtc_offsets[i], tmp); + } + for (j = 0; j < adev->usec_timeout; j++) { + tmp = RREG32(mmGRPH_UPDATE + crtc_offsets[i]); + if (REG_GET_FIELD(tmp, GRPH_UPDATE, GRPH_SURFACE_UPDATE_PENDING) == 0) + break; + udelay(1); + } + tmp = RREG32(mmCRTC_BLANK_CONTROL + crtc_offsets[i]); + tmp = REG_SET_FIELD(tmp, CRTC_BLANK_CONTROL, CRTC_BLANK_DATA_EN, 0); + WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 1); + WREG32(mmCRTC_BLANK_CONTROL + crtc_offsets[i], tmp); + WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 0); + /* wait for the next frame */ + frame_count = amdgpu_display_vblank_get_counter(adev, i); + for (j = 0; j < adev->usec_timeout; j++) { + if (amdgpu_display_vblank_get_counter(adev, i) != frame_count) + break; + udelay(1); + } + } + } + + WREG32(mmVGA_MEMORY_BASE_ADDRESS_HIGH, upper_32_bits(adev->mc.vram_start)); + WREG32(mmVGA_MEMORY_BASE_ADDRESS, lower_32_bits(adev->mc.vram_start)); + + /* Unlock vga access */ + WREG32(mmVGA_HDP_CONTROL, save->vga_hdp_control); + mdelay(1); + WREG32(mmVGA_RENDER_CONTROL, save->vga_render_control); +} + +static void dce_v8_0_set_vga_render_state(struct amdgpu_device *adev, + bool render) +{ + u32 tmp; + + /* Lockout access through VGA aperture*/ + tmp = RREG32(mmVGA_HDP_CONTROL); + if (render) + tmp = REG_SET_FIELD(tmp, VGA_HDP_CONTROL, VGA_MEMORY_DISABLE, 0); + else + tmp = REG_SET_FIELD(tmp, VGA_HDP_CONTROL, VGA_MEMORY_DISABLE, 1); + WREG32(mmVGA_HDP_CONTROL, tmp); + + /* disable VGA render */ + tmp = RREG32(mmVGA_RENDER_CONTROL); + if (render) + tmp = REG_SET_FIELD(tmp, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 1); + else + tmp = REG_SET_FIELD(tmp, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 0); + WREG32(mmVGA_RENDER_CONTROL, tmp); +} + +static void dce_v8_0_program_fmt(struct drm_encoder *encoder) +{ + struct drm_device *dev = encoder->dev; + struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); + struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(encoder->crtc); + struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder); + int bpc = 0; + u32 tmp = 0; + enum amdgpu_connector_dither dither = AMDGPU_FMT_DITHER_DISABLE; + + if (connector) { + struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector); + bpc = amdgpu_connector_get_monitor_bpc(connector); + dither = amdgpu_connector->dither; + } + + /* LVDS/eDP FMT is set up by atom */ + if (amdgpu_encoder->devices & ATOM_DEVICE_LCD_SUPPORT) + return; + + /* not needed for analog */ + if ((amdgpu_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1) || + (amdgpu_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2)) + return; + + if (bpc == 0) + return; + + switch (bpc) { + case 6: + if (dither == AMDGPU_FMT_DITHER_ENABLE) + /* XXX sort out optimal dither settings */ + tmp |= (FMT_BIT_DEPTH_CONTROL__FMT_FRAME_RANDOM_ENABLE_MASK | + FMT_BIT_DEPTH_CONTROL__FMT_HIGHPASS_RANDOM_ENABLE_MASK | + FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_EN_MASK | + (0 << FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_DEPTH__SHIFT)); + else + tmp |= (FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_EN_MASK | + (0 << FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_DEPTH__SHIFT)); + break; + case 8: + if (dither == AMDGPU_FMT_DITHER_ENABLE) + /* XXX sort out optimal dither settings */ + tmp |= (FMT_BIT_DEPTH_CONTROL__FMT_FRAME_RANDOM_ENABLE_MASK | + FMT_BIT_DEPTH_CONTROL__FMT_HIGHPASS_RANDOM_ENABLE_MASK | + FMT_BIT_DEPTH_CONTROL__FMT_RGB_RANDOM_ENABLE_MASK | + FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_EN_MASK | + (1 << FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_DEPTH__SHIFT)); + else + tmp |= (FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_EN_MASK | + (1 << FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_DEPTH__SHIFT)); + break; + case 10: + if (dither == AMDGPU_FMT_DITHER_ENABLE) + /* XXX sort out optimal dither settings */ + tmp |= (FMT_BIT_DEPTH_CONTROL__FMT_FRAME_RANDOM_ENABLE_MASK | + FMT_BIT_DEPTH_CONTROL__FMT_HIGHPASS_RANDOM_ENABLE_MASK | + FMT_BIT_DEPTH_CONTROL__FMT_RGB_RANDOM_ENABLE_MASK | + FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_EN_MASK | + (2 << FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_DEPTH__SHIFT)); + else + tmp |= (FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_EN_MASK | + (2 << FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_DEPTH__SHIFT)); + break; + default: + /* not needed */ + break; + } + + WREG32(mmFMT_BIT_DEPTH_CONTROL + amdgpu_crtc->crtc_offset, tmp); +} + + +/* display watermark setup */ +/** + * dce_v8_0_line_buffer_adjust - Set up the line buffer + * + * @adev: amdgpu_device pointer + * @amdgpu_crtc: the selected display controller + * @mode: the current display mode on the selected display + * controller + * + * Setup up the line buffer allocation for + * the selected display controller (CIK). + * Returns the line buffer size in pixels. + */ +static u32 dce_v8_0_line_buffer_adjust(struct amdgpu_device *adev, + struct amdgpu_crtc *amdgpu_crtc, + struct drm_display_mode *mode) +{ + u32 tmp, buffer_alloc, i; + u32 pipe_offset = amdgpu_crtc->crtc_id * 0x8; + /* + * Line Buffer Setup + * There are 6 line buffers, one for each display controllers. + * There are 3 partitions per LB. Select the number of partitions + * to enable based on the display width. For display widths larger + * than 4096, you need use to use 2 display controllers and combine + * them using the stereo blender. + */ + if (amdgpu_crtc->base.enabled && mode) { + if (mode->crtc_hdisplay < 1920) { + tmp = 1; + buffer_alloc = 2; + } else if (mode->crtc_hdisplay < 2560) { + tmp = 2; + buffer_alloc = 2; + } else if (mode->crtc_hdisplay < 4096) { + tmp = 0; + buffer_alloc = (adev->flags & AMDGPU_IS_APU) ? 2 : 4; + } else { + DRM_DEBUG_KMS("Mode too big for LB!\n"); + tmp = 0; + buffer_alloc = (adev->flags & AMDGPU_IS_APU) ? 2 : 4; + } + } else { + tmp = 1; + buffer_alloc = 0; + } + + WREG32(mmLB_MEMORY_CTRL + amdgpu_crtc->crtc_offset, + (tmp << LB_MEMORY_CTRL__LB_MEMORY_CONFIG__SHIFT) | + (0x6B0 << LB_MEMORY_CTRL__LB_MEMORY_SIZE__SHIFT)); + + WREG32(mmPIPE0_DMIF_BUFFER_CONTROL + pipe_offset, + (buffer_alloc << PIPE0_DMIF_BUFFER_CONTROL__DMIF_BUFFERS_ALLOCATED__SHIFT)); + for (i = 0; i < adev->usec_timeout; i++) { + if (RREG32(mmPIPE0_DMIF_BUFFER_CONTROL + pipe_offset) & + PIPE0_DMIF_BUFFER_CONTROL__DMIF_BUFFERS_ALLOCATION_COMPLETED_MASK) + break; + udelay(1); + } + + if (amdgpu_crtc->base.enabled && mode) { + switch (tmp) { + case 0: + default: + return 4096 * 2; + case 1: + return 1920 * 2; + case 2: + return 2560 * 2; + } + } + + /* controller not enabled, so no lb used */ + return 0; +} + +/** + * cik_get_number_of_dram_channels - get the number of dram channels + * + * @adev: amdgpu_device pointer + * + * Look up the number of video ram channels (CIK). + * Used for display watermark bandwidth calculations + * Returns the number of dram channels + */ +static u32 cik_get_number_of_dram_channels(struct amdgpu_device *adev) +{ + u32 tmp = RREG32(mmMC_SHARED_CHMAP); + + switch ((tmp & MC_SHARED_CHMAP__NOOFCHAN_MASK) >> MC_SHARED_CHMAP__NOOFCHAN__SHIFT) { + case 0: + default: + return 1; + case 1: + return 2; + case 2: + return 4; + case 3: + return 8; + case 4: + return 3; + case 5: + return 6; + case 6: + return 10; + case 7: + return 12; + case 8: + return 16; + } +} + +struct dce8_wm_params { + u32 dram_channels; /* number of dram channels */ + u32 yclk; /* bandwidth per dram data pin in kHz */ + u32 sclk; /* engine clock in kHz */ + u32 disp_clk; /* display clock in kHz */ + u32 src_width; /* viewport width */ + u32 active_time; /* active display time in ns */ + u32 blank_time; /* blank time in ns */ + bool interlaced; /* mode is interlaced */ + fixed20_12 vsc; /* vertical scale ratio */ + u32 num_heads; /* number of active crtcs */ + u32 bytes_per_pixel; /* bytes per pixel display + overlay */ + u32 lb_size; /* line buffer allocated to pipe */ + u32 vtaps; /* vertical scaler taps */ +}; + +/** + * dce_v8_0_dram_bandwidth - get the dram bandwidth + * + * @wm: watermark calculation data + * + * Calculate the raw dram bandwidth (CIK). + * Used for display watermark bandwidth calculations + * Returns the dram bandwidth in MBytes/s + */ +static u32 dce_v8_0_dram_bandwidth(struct dce8_wm_params *wm) +{ + /* Calculate raw DRAM Bandwidth */ + fixed20_12 dram_efficiency; /* 0.7 */ + fixed20_12 yclk, dram_channels, bandwidth; + fixed20_12 a; + + a.full = dfixed_const(1000); + yclk.full = dfixed_const(wm->yclk); + yclk.full = dfixed_div(yclk, a); + dram_channels.full = dfixed_const(wm->dram_channels * 4); + a.full = dfixed_const(10); + dram_efficiency.full = dfixed_const(7); + dram_efficiency.full = dfixed_div(dram_efficiency, a); + bandwidth.full = dfixed_mul(dram_channels, yclk); + bandwidth.full = dfixed_mul(bandwidth, dram_efficiency); + + return dfixed_trunc(bandwidth); +} + +/** + * dce_v8_0_dram_bandwidth_for_display - get the dram bandwidth for display + * + * @wm: watermark calculation data + * + * Calculate the dram bandwidth used for display (CIK). + * Used for display watermark bandwidth calculations + * Returns the dram bandwidth for display in MBytes/s + */ +static u32 dce_v8_0_dram_bandwidth_for_display(struct dce8_wm_params *wm) +{ + /* Calculate DRAM Bandwidth and the part allocated to display. */ + fixed20_12 disp_dram_allocation; /* 0.3 to 0.7 */ + fixed20_12 yclk, dram_channels, bandwidth; + fixed20_12 a; + + a.full = dfixed_const(1000); + yclk.full = dfixed_const(wm->yclk); + yclk.full = dfixed_div(yclk, a); + dram_channels.full = dfixed_const(wm->dram_channels * 4); + a.full = dfixed_const(10); + disp_dram_allocation.full = dfixed_const(3); /* XXX worse case value 0.3 */ + disp_dram_allocation.full = dfixed_div(disp_dram_allocation, a); + bandwidth.full = dfixed_mul(dram_channels, yclk); + bandwidth.full = dfixed_mul(bandwidth, disp_dram_allocation); + + return dfixed_trunc(bandwidth); +} + +/** + * dce_v8_0_data_return_bandwidth - get the data return bandwidth + * + * @wm: watermark calculation data + * + * Calculate the data return bandwidth used for display (CIK). + * Used for display watermark bandwidth calculations + * Returns the data return bandwidth in MBytes/s + */ +static u32 dce_v8_0_data_return_bandwidth(struct dce8_wm_params *wm) +{ + /* Calculate the display Data return Bandwidth */ + fixed20_12 return_efficiency; /* 0.8 */ + fixed20_12 sclk, bandwidth; + fixed20_12 a; + + a.full = dfixed_const(1000); + sclk.full = dfixed_const(wm->sclk); + sclk.full = dfixed_div(sclk, a); + a.full = dfixed_const(10); + return_efficiency.full = dfixed_const(8); + return_efficiency.full = dfixed_div(return_efficiency, a); + a.full = dfixed_const(32); + bandwidth.full = dfixed_mul(a, sclk); + bandwidth.full = dfixed_mul(bandwidth, return_efficiency); + + return dfixed_trunc(bandwidth); +} + +/** + * dce_v8_0_dmif_request_bandwidth - get the dmif bandwidth + * + * @wm: watermark calculation data + * + * Calculate the dmif bandwidth used for display (CIK). + * Used for display watermark bandwidth calculations + * Returns the dmif bandwidth in MBytes/s + */ +static u32 dce_v8_0_dmif_request_bandwidth(struct dce8_wm_params *wm) +{ + /* Calculate the DMIF Request Bandwidth */ + fixed20_12 disp_clk_request_efficiency; /* 0.8 */ + fixed20_12 disp_clk, bandwidth; + fixed20_12 a, b; + + a.full = dfixed_const(1000); + disp_clk.full = dfixed_const(wm->disp_clk); + disp_clk.full = dfixed_div(disp_clk, a); + a.full = dfixed_const(32); + b.full = dfixed_mul(a, disp_clk); + + a.full = dfixed_const(10); + disp_clk_request_efficiency.full = dfixed_const(8); + disp_clk_request_efficiency.full = dfixed_div(disp_clk_request_efficiency, a); + + bandwidth.full = dfixed_mul(b, disp_clk_request_efficiency); + + return dfixed_trunc(bandwidth); +} + +/** + * dce_v8_0_available_bandwidth - get the min available bandwidth + * + * @wm: watermark calculation data + * + * Calculate the min available bandwidth used for display (CIK). + * Used for display watermark bandwidth calculations + * Returns the min available bandwidth in MBytes/s + */ +static u32 dce_v8_0_available_bandwidth(struct dce8_wm_params *wm) +{ + /* Calculate the Available bandwidth. Display can use this temporarily but not in average. */ + u32 dram_bandwidth = dce_v8_0_dram_bandwidth(wm); + u32 data_return_bandwidth = dce_v8_0_data_return_bandwidth(wm); + u32 dmif_req_bandwidth = dce_v8_0_dmif_request_bandwidth(wm); + + return min(dram_bandwidth, min(data_return_bandwidth, dmif_req_bandwidth)); +} + +/** + * dce_v8_0_average_bandwidth - get the average available bandwidth + * + * @wm: watermark calculation data + * + * Calculate the average available bandwidth used for display (CIK). + * Used for display watermark bandwidth calculations + * Returns the average available bandwidth in MBytes/s + */ +static u32 dce_v8_0_average_bandwidth(struct dce8_wm_params *wm) +{ + /* Calculate the display mode Average Bandwidth + * DisplayMode should contain the source and destination dimensions, + * timing, etc. + */ + fixed20_12 bpp; + fixed20_12 line_time; + fixed20_12 src_width; + fixed20_12 bandwidth; + fixed20_12 a; + + a.full = dfixed_const(1000); + line_time.full = dfixed_const(wm->active_time + wm->blank_time); + line_time.full = dfixed_div(line_time, a); + bpp.full = dfixed_const(wm->bytes_per_pixel); + src_width.full = dfixed_const(wm->src_width); + bandwidth.full = dfixed_mul(src_width, bpp); + bandwidth.full = dfixed_mul(bandwidth, wm->vsc); + bandwidth.full = dfixed_div(bandwidth, line_time); + + return dfixed_trunc(bandwidth); +} + +/** + * dce_v8_0_latency_watermark - get the latency watermark + * + * @wm: watermark calculation data + * + * Calculate the latency watermark (CIK). + * Used for display watermark bandwidth calculations + * Returns the latency watermark in ns + */ +static u32 dce_v8_0_latency_watermark(struct dce8_wm_params *wm) +{ + /* First calculate the latency in ns */ + u32 mc_latency = 2000; /* 2000 ns. */ + u32 available_bandwidth = dce_v8_0_available_bandwidth(wm); + u32 worst_chunk_return_time = (512 * 8 * 1000) / available_bandwidth; + u32 cursor_line_pair_return_time = (128 * 4 * 1000) / available_bandwidth; + u32 dc_latency = 40000000 / wm->disp_clk; /* dc pipe latency */ + u32 other_heads_data_return_time = ((wm->num_heads + 1) * worst_chunk_return_time) + + (wm->num_heads * cursor_line_pair_return_time); + u32 latency = mc_latency + other_heads_data_return_time + dc_latency; + u32 max_src_lines_per_dst_line, lb_fill_bw, line_fill_time; + u32 tmp, dmif_size = 12288; + fixed20_12 a, b, c; + + if (wm->num_heads == 0) + return 0; + + a.full = dfixed_const(2); + b.full = dfixed_const(1); + if ((wm->vsc.full > a.full) || + ((wm->vsc.full > b.full) && (wm->vtaps >= 3)) || + (wm->vtaps >= 5) || + ((wm->vsc.full >= a.full) && wm->interlaced)) + max_src_lines_per_dst_line = 4; + else + max_src_lines_per_dst_line = 2; + + a.full = dfixed_const(available_bandwidth); + b.full = dfixed_const(wm->num_heads); + a.full = dfixed_div(a, b); + + b.full = dfixed_const(mc_latency + 512); + c.full = dfixed_const(wm->disp_clk); + b.full = dfixed_div(b, c); + + c.full = dfixed_const(dmif_size); + b.full = dfixed_div(c, b); + + tmp = min(dfixed_trunc(a), dfixed_trunc(b)); + + b.full = dfixed_const(1000); + c.full = dfixed_const(wm->disp_clk); + b.full = dfixed_div(c, b); + c.full = dfixed_const(wm->bytes_per_pixel); + b.full = dfixed_mul(b, c); + + lb_fill_bw = min(tmp, dfixed_trunc(b)); + + a.full = dfixed_const(max_src_lines_per_dst_line * wm->src_width * wm->bytes_per_pixel); + b.full = dfixed_const(1000); + c.full = dfixed_const(lb_fill_bw); + b.full = dfixed_div(c, b); + a.full = dfixed_div(a, b); + line_fill_time = dfixed_trunc(a); + + if (line_fill_time < wm->active_time) + return latency; + else + return latency + (line_fill_time - wm->active_time); + +} + +/** + * dce_v8_0_average_bandwidth_vs_dram_bandwidth_for_display - check + * average and available dram bandwidth + * + * @wm: watermark calculation data + * + * Check if the display average bandwidth fits in the display + * dram bandwidth (CIK). + * Used for display watermark bandwidth calculations + * Returns true if the display fits, false if not. + */ +static bool dce_v8_0_average_bandwidth_vs_dram_bandwidth_for_display(struct dce8_wm_params *wm) +{ + if (dce_v8_0_average_bandwidth(wm) <= + (dce_v8_0_dram_bandwidth_for_display(wm) / wm->num_heads)) + return true; + else + return false; +} + +/** + * dce_v8_0_average_bandwidth_vs_available_bandwidth - check + * average and available bandwidth + * + * @wm: watermark calculation data + * + * Check if the display average bandwidth fits in the display + * available bandwidth (CIK). + * Used for display watermark bandwidth calculations + * Returns true if the display fits, false if not. + */ +static bool dce_v8_0_average_bandwidth_vs_available_bandwidth(struct dce8_wm_params *wm) +{ + if (dce_v8_0_average_bandwidth(wm) <= + (dce_v8_0_available_bandwidth(wm) / wm->num_heads)) + return true; + else + return false; +} + +/** + * dce_v8_0_check_latency_hiding - check latency hiding + * + * @wm: watermark calculation data + * + * Check latency hiding (CIK). + * Used for display watermark bandwidth calculations + * Returns true if the display fits, false if not. + */ +static bool dce_v8_0_check_latency_hiding(struct dce8_wm_params *wm) +{ + u32 lb_partitions = wm->lb_size / wm->src_width; + u32 line_time = wm->active_time + wm->blank_time; + u32 latency_tolerant_lines; + u32 latency_hiding; + fixed20_12 a; + + a.full = dfixed_const(1); + if (wm->vsc.full > a.full) + latency_tolerant_lines = 1; + else { + if (lb_partitions <= (wm->vtaps + 1)) + latency_tolerant_lines = 1; + else + latency_tolerant_lines = 2; + } + + latency_hiding = (latency_tolerant_lines * line_time + wm->blank_time); + + if (dce_v8_0_latency_watermark(wm) <= latency_hiding) + return true; + else + return false; +} + +/** + * dce_v8_0_program_watermarks - program display watermarks + * + * @adev: amdgpu_device pointer + * @amdgpu_crtc: the selected display controller + * @lb_size: line buffer size + * @num_heads: number of display controllers in use + * + * Calculate and program the display watermarks for the + * selected display controller (CIK). + */ +static void dce_v8_0_program_watermarks(struct amdgpu_device *adev, + struct amdgpu_crtc *amdgpu_crtc, + u32 lb_size, u32 num_heads) +{ + struct drm_display_mode *mode = &amdgpu_crtc->base.mode; + struct dce8_wm_params wm_low, wm_high; + u32 pixel_period; + u32 line_time = 0; + u32 latency_watermark_a = 0, latency_watermark_b = 0; + u32 tmp, wm_mask; + + if (amdgpu_crtc->base.enabled && num_heads && mode) { + pixel_period = 1000000 / (u32)mode->clock; + line_time = min((u32)mode->crtc_htotal * pixel_period, (u32)65535); + + /* watermark for high clocks */ + if (adev->pm.dpm_enabled) { + wm_high.yclk = + amdgpu_dpm_get_mclk(adev, false) * 10; + wm_high.sclk = + amdgpu_dpm_get_sclk(adev, false) * 10; + } else { + wm_high.yclk = adev->pm.current_mclk * 10; + wm_high.sclk = adev->pm.current_sclk * 10; + } + + wm_high.disp_clk = mode->clock; + wm_high.src_width = mode->crtc_hdisplay; + wm_high.active_time = mode->crtc_hdisplay * pixel_period; + wm_high.blank_time = line_time - wm_high.active_time; + wm_high.interlaced = false; + if (mode->flags & DRM_MODE_FLAG_INTERLACE) + wm_high.interlaced = true; + wm_high.vsc = amdgpu_crtc->vsc; + wm_high.vtaps = 1; + if (amdgpu_crtc->rmx_type != RMX_OFF) + wm_high.vtaps = 2; + wm_high.bytes_per_pixel = 4; /* XXX: get this from fb config */ + wm_high.lb_size = lb_size; + wm_high.dram_channels = cik_get_number_of_dram_channels(adev); + wm_high.num_heads = num_heads; + + /* set for high clocks */ + latency_watermark_a = min(dce_v8_0_latency_watermark(&wm_high), (u32)65535); + + /* possibly force display priority to high */ + /* should really do this at mode validation time... */ + if (!dce_v8_0_average_bandwidth_vs_dram_bandwidth_for_display(&wm_high) || + !dce_v8_0_average_bandwidth_vs_available_bandwidth(&wm_high) || + !dce_v8_0_check_latency_hiding(&wm_high) || + (adev->mode_info.disp_priority == 2)) { + DRM_DEBUG_KMS("force priority to high\n"); + } + + /* watermark for low clocks */ + if (adev->pm.dpm_enabled) { + wm_low.yclk = + amdgpu_dpm_get_mclk(adev, true) * 10; + wm_low.sclk = + amdgpu_dpm_get_sclk(adev, true) * 10; + } else { + wm_low.yclk = adev->pm.current_mclk * 10; + wm_low.sclk = adev->pm.current_sclk * 10; + } + + wm_low.disp_clk = mode->clock; + wm_low.src_width = mode->crtc_hdisplay; + wm_low.active_time = mode->crtc_hdisplay * pixel_period; + wm_low.blank_time = line_time - wm_low.active_time; + wm_low.interlaced = false; + if (mode->flags & DRM_MODE_FLAG_INTERLACE) + wm_low.interlaced = true; + wm_low.vsc = amdgpu_crtc->vsc; + wm_low.vtaps = 1; + if (amdgpu_crtc->rmx_type != RMX_OFF) + wm_low.vtaps = 2; + wm_low.bytes_per_pixel = 4; /* XXX: get this from fb config */ + wm_low.lb_size = lb_size; + wm_low.dram_channels = cik_get_number_of_dram_channels(adev); + wm_low.num_heads = num_heads; + + /* set for low clocks */ + latency_watermark_b = min(dce_v8_0_latency_watermark(&wm_low), (u32)65535); + + /* possibly force display priority to high */ + /* should really do this at mode validation time... */ + if (!dce_v8_0_average_bandwidth_vs_dram_bandwidth_for_display(&wm_low) || + !dce_v8_0_average_bandwidth_vs_available_bandwidth(&wm_low) || + !dce_v8_0_check_latency_hiding(&wm_low) || + (adev->mode_info.disp_priority == 2)) { + DRM_DEBUG_KMS("force priority to high\n"); + } + } + + /* select wm A */ + wm_mask = RREG32(mmDPG_WATERMARK_MASK_CONTROL + amdgpu_crtc->crtc_offset); + tmp = wm_mask; + tmp &= ~(3 << DPG_WATERMARK_MASK_CONTROL__URGENCY_WATERMARK_MASK__SHIFT); + tmp |= (1 << DPG_WATERMARK_MASK_CONTROL__URGENCY_WATERMARK_MASK__SHIFT); + WREG32(mmDPG_WATERMARK_MASK_CONTROL + amdgpu_crtc->crtc_offset, tmp); + WREG32(mmDPG_PIPE_URGENCY_CONTROL + amdgpu_crtc->crtc_offset, + ((latency_watermark_a << DPG_PIPE_URGENCY_CONTROL__URGENCY_LOW_WATERMARK__SHIFT) | + (line_time << DPG_PIPE_URGENCY_CONTROL__URGENCY_HIGH_WATERMARK__SHIFT))); + /* select wm B */ + tmp = RREG32(mmDPG_WATERMARK_MASK_CONTROL + amdgpu_crtc->crtc_offset); + tmp &= ~(3 << DPG_WATERMARK_MASK_CONTROL__URGENCY_WATERMARK_MASK__SHIFT); + tmp |= (2 << DPG_WATERMARK_MASK_CONTROL__URGENCY_WATERMARK_MASK__SHIFT); + WREG32(mmDPG_WATERMARK_MASK_CONTROL + amdgpu_crtc->crtc_offset, tmp); + WREG32(mmDPG_PIPE_URGENCY_CONTROL + amdgpu_crtc->crtc_offset, + ((latency_watermark_b << DPG_PIPE_URGENCY_CONTROL__URGENCY_LOW_WATERMARK__SHIFT) | + (line_time << DPG_PIPE_URGENCY_CONTROL__URGENCY_HIGH_WATERMARK__SHIFT))); + /* restore original selection */ + WREG32(mmDPG_WATERMARK_MASK_CONTROL + amdgpu_crtc->crtc_offset, wm_mask); + + /* save values for DPM */ + amdgpu_crtc->line_time = line_time; + amdgpu_crtc->wm_high = latency_watermark_a; + amdgpu_crtc->wm_low = latency_watermark_b; +} + +/** + * dce_v8_0_bandwidth_update - program display watermarks + * + * @adev: amdgpu_device pointer + * + * Calculate and program the display watermarks and line + * buffer allocation (CIK). + */ +static void dce_v8_0_bandwidth_update(struct amdgpu_device *adev) +{ + struct drm_display_mode *mode = NULL; + u32 num_heads = 0, lb_size; + int i; + + amdgpu_update_display_priority(adev); + + for (i = 0; i < adev->mode_info.num_crtc; i++) { + if (adev->mode_info.crtcs[i]->base.enabled) + num_heads++; + } + for (i = 0; i < adev->mode_info.num_crtc; i++) { + mode = &adev->mode_info.crtcs[i]->base.mode; + lb_size = dce_v8_0_line_buffer_adjust(adev, adev->mode_info.crtcs[i], mode); + dce_v8_0_program_watermarks(adev, adev->mode_info.crtcs[i], + lb_size, num_heads); + } +} + +static void dce_v8_0_audio_get_connected_pins(struct amdgpu_device *adev) +{ + int i; + u32 offset, tmp; + + for (i = 0; i < adev->mode_info.audio.num_pins; i++) { + offset = adev->mode_info.audio.pin[i].offset; + tmp = RREG32_AUDIO_ENDPT(offset, + ixAZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT); + if (((tmp & + AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__PORT_CONNECTIVITY_MASK) >> + AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__PORT_CONNECTIVITY__SHIFT) == 1) + adev->mode_info.audio.pin[i].connected = false; + else + adev->mode_info.audio.pin[i].connected = true; + } +} + +static struct amdgpu_audio_pin *dce_v8_0_audio_get_pin(struct amdgpu_device *adev) +{ + int i; + + dce_v8_0_audio_get_connected_pins(adev); + + for (i = 0; i < adev->mode_info.audio.num_pins; i++) { + if (adev->mode_info.audio.pin[i].connected) + return &adev->mode_info.audio.pin[i]; + } + DRM_ERROR("No connected audio pins found!\n"); + return NULL; +} + +static void dce_v8_0_afmt_audio_select_pin(struct drm_encoder *encoder) +{ + struct amdgpu_device *adev = encoder->dev->dev_private; + struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); + struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; + u32 offset; + + if (!dig || !dig->afmt || !dig->afmt->pin) + return; + + offset = dig->afmt->offset; + + WREG32(mmAFMT_AUDIO_SRC_CONTROL + offset, + (dig->afmt->pin->id << AFMT_AUDIO_SRC_CONTROL__AFMT_AUDIO_SRC_SELECT__SHIFT)); +} + +static void dce_v8_0_audio_write_latency_fields(struct drm_encoder *encoder, + struct drm_display_mode *mode) +{ + struct amdgpu_device *adev = encoder->dev->dev_private; + struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); + struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; + struct drm_connector *connector; + struct amdgpu_connector *amdgpu_connector = NULL; + u32 tmp = 0, offset; + + if (!dig || !dig->afmt || !dig->afmt->pin) + return; + + offset = dig->afmt->pin->offset; + + list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) { + if (connector->encoder == encoder) { + amdgpu_connector = to_amdgpu_connector(connector); + break; + } + } + + if (!amdgpu_connector) { + DRM_ERROR("Couldn't find encoder's connector\n"); + return; + } + + if (mode->flags & DRM_MODE_FLAG_INTERLACE) { + if (connector->latency_present[1]) + tmp = + (connector->video_latency[1] << + AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__VIDEO_LIPSYNC__SHIFT) | + (connector->audio_latency[1] << + AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__AUDIO_LIPSYNC__SHIFT); + else + tmp = + (0 << + AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__VIDEO_LIPSYNC__SHIFT) | + (0 << + AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__AUDIO_LIPSYNC__SHIFT); + } else { + if (connector->latency_present[0]) + tmp = + (connector->video_latency[0] << + AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__VIDEO_LIPSYNC__SHIFT) | + (connector->audio_latency[0] << + AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__AUDIO_LIPSYNC__SHIFT); + else + tmp = + (0 << + AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__VIDEO_LIPSYNC__SHIFT) | + (0 << + AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__AUDIO_LIPSYNC__SHIFT); + + } + WREG32_AUDIO_ENDPT(offset, ixAZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC, tmp); +} + +static void dce_v8_0_audio_write_speaker_allocation(struct drm_encoder *encoder) +{ + struct amdgpu_device *adev = encoder->dev->dev_private; + struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); + struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; + struct drm_connector *connector; + struct amdgpu_connector *amdgpu_connector = NULL; + u32 offset, tmp; + u8 *sadb = NULL; + int sad_count; + + if (!dig || !dig->afmt || !dig->afmt->pin) + return; + + offset = dig->afmt->pin->offset; + + list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) { + if (connector->encoder == encoder) { + amdgpu_connector = to_amdgpu_connector(connector); + break; + } + } + + if (!amdgpu_connector) { + DRM_ERROR("Couldn't find encoder's connector\n"); + return; + } + + sad_count = drm_edid_to_speaker_allocation(amdgpu_connector_edid(connector), &sadb); + if (sad_count < 0) { + DRM_ERROR("Couldn't read Speaker Allocation Data Block: %d\n", sad_count); + sad_count = 0; + } + + /* program the speaker allocation */ + tmp = RREG32_AUDIO_ENDPT(offset, ixAZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER); + tmp &= ~(AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__DP_CONNECTION_MASK | + AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__SPEAKER_ALLOCATION_MASK); + /* set HDMI mode */ + tmp |= AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__HDMI_CONNECTION_MASK; + if (sad_count) + tmp |= (sadb[0] << AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__SPEAKER_ALLOCATION__SHIFT); + else + tmp |= (5 << AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__SPEAKER_ALLOCATION__SHIFT); /* stereo */ + WREG32_AUDIO_ENDPT(offset, ixAZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER, tmp); + + kfree(sadb); +} + +static void dce_v8_0_audio_write_sad_regs(struct drm_encoder *encoder) +{ + struct amdgpu_device *adev = encoder->dev->dev_private; + struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); + struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; + u32 offset; + struct drm_connector *connector; + struct amdgpu_connector *amdgpu_connector = NULL; + struct cea_sad *sads; + int i, sad_count; + + static const u16 eld_reg_to_type[][2] = { + { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0, HDMI_AUDIO_CODING_TYPE_PCM }, + { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1, HDMI_AUDIO_CODING_TYPE_AC3 }, + { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2, HDMI_AUDIO_CODING_TYPE_MPEG1 }, + { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3, HDMI_AUDIO_CODING_TYPE_MP3 }, + { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4, HDMI_AUDIO_CODING_TYPE_MPEG2 }, + { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5, HDMI_AUDIO_CODING_TYPE_AAC_LC }, + { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6, HDMI_AUDIO_CODING_TYPE_DTS }, + { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7, HDMI_AUDIO_CODING_TYPE_ATRAC }, + { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9, HDMI_AUDIO_CODING_TYPE_EAC3 }, + { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10, HDMI_AUDIO_CODING_TYPE_DTS_HD }, + { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11, HDMI_AUDIO_CODING_TYPE_MLP }, + { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13, HDMI_AUDIO_CODING_TYPE_WMA_PRO }, + }; + + if (!dig || !dig->afmt || !dig->afmt->pin) + return; + + offset = dig->afmt->pin->offset; + + list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) { + if (connector->encoder == encoder) { + amdgpu_connector = to_amdgpu_connector(connector); + break; + } + } + + if (!amdgpu_connector) { + DRM_ERROR("Couldn't find encoder's connector\n"); + return; + } + + sad_count = drm_edid_to_sad(amdgpu_connector_edid(connector), &sads); + if (sad_count <= 0) { + DRM_ERROR("Couldn't read SADs: %d\n", sad_count); + return; + } + BUG_ON(!sads); + + for (i = 0; i < ARRAY_SIZE(eld_reg_to_type); i++) { + u32 value = 0; + u8 stereo_freqs = 0; + int max_channels = -1; + int j; + + for (j = 0; j < sad_count; j++) { + struct cea_sad *sad = &sads[j]; + + if (sad->format == eld_reg_to_type[i][1]) { + if (sad->channels > max_channels) { + value = (sad->channels << + AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__MAX_CHANNELS__SHIFT) | + (sad->byte2 << + AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__DESCRIPTOR_BYTE_2__SHIFT) | + (sad->freq << + AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__SUPPORTED_FREQUENCIES__SHIFT); + max_channels = sad->channels; + } + + if (sad->format == HDMI_AUDIO_CODING_TYPE_PCM) + stereo_freqs |= sad->freq; + else + break; + } + } + + value |= (stereo_freqs << + AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__SUPPORTED_FREQUENCIES_STEREO__SHIFT); + + WREG32_AUDIO_ENDPT(offset, eld_reg_to_type[i][0], value); + } + + kfree(sads); +} + +static void dce_v8_0_audio_enable(struct amdgpu_device *adev, + struct amdgpu_audio_pin *pin, + bool enable) +{ + if (!pin) + return; + + WREG32_AUDIO_ENDPT(pin->offset, ixAZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL, + enable ? AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__AUDIO_ENABLED_MASK : 0); +} + +static const u32 pin_offsets[7] = +{ + (0x1780 - 0x1780), + (0x1786 - 0x1780), + (0x178c - 0x1780), + (0x1792 - 0x1780), + (0x1798 - 0x1780), + (0x179d - 0x1780), + (0x17a4 - 0x1780), +}; + +static int dce_v8_0_audio_init(struct amdgpu_device *adev) +{ + int i; + + if (!amdgpu_audio) + return 0; + + adev->mode_info.audio.enabled = true; + + if (adev->asic_type == CHIP_KAVERI) /* KV: 4 streams, 7 endpoints */ + adev->mode_info.audio.num_pins = 7; + else if ((adev->asic_type == CHIP_KABINI) || + (adev->asic_type == CHIP_MULLINS)) /* KB/ML: 2 streams, 3 endpoints */ + adev->mode_info.audio.num_pins = 3; + else if ((adev->asic_type == CHIP_BONAIRE) || + (adev->asic_type == CHIP_HAWAII))/* BN/HW: 6 streams, 7 endpoints */ + adev->mode_info.audio.num_pins = 7; + else + adev->mode_info.audio.num_pins = 3; + + for (i = 0; i < adev->mode_info.audio.num_pins; i++) { + adev->mode_info.audio.pin[i].channels = -1; + adev->mode_info.audio.pin[i].rate = -1; + adev->mode_info.audio.pin[i].bits_per_sample = -1; + adev->mode_info.audio.pin[i].status_bits = 0; + adev->mode_info.audio.pin[i].category_code = 0; + adev->mode_info.audio.pin[i].connected = false; + adev->mode_info.audio.pin[i].offset = pin_offsets[i]; + adev->mode_info.audio.pin[i].id = i; + /* disable audio. it will be set up later */ + /* XXX remove once we switch to ip funcs */ + dce_v8_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false); + } + + return 0; +} + +static void dce_v8_0_audio_fini(struct amdgpu_device *adev) +{ + int i; + + if (!adev->mode_info.audio.enabled) + return; + + for (i = 0; i < adev->mode_info.audio.num_pins; i++) + dce_v8_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false); + + adev->mode_info.audio.enabled = false; +} + +/* + * update the N and CTS parameters for a given pixel clock rate + */ +static void dce_v8_0_afmt_update_ACR(struct drm_encoder *encoder, uint32_t clock) +{ + struct drm_device *dev = encoder->dev; + struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_afmt_acr acr = amdgpu_afmt_acr(clock); + struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); + struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; + uint32_t offset = dig->afmt->offset; + + WREG32(mmHDMI_ACR_32_0 + offset, (acr.cts_32khz << HDMI_ACR_44_0__HDMI_ACR_CTS_44__SHIFT)); + WREG32(mmHDMI_ACR_32_1 + offset, acr.n_32khz); + + WREG32(mmHDMI_ACR_44_0 + offset, (acr.cts_44_1khz << HDMI_ACR_44_0__HDMI_ACR_CTS_44__SHIFT)); + WREG32(mmHDMI_ACR_44_1 + offset, acr.n_44_1khz); + + WREG32(mmHDMI_ACR_48_0 + offset, (acr.cts_48khz << HDMI_ACR_48_0__HDMI_ACR_CTS_48__SHIFT)); + WREG32(mmHDMI_ACR_48_1 + offset, acr.n_48khz); +} + +/* + * build a HDMI Video Info Frame + */ +static void dce_v8_0_afmt_update_avi_infoframe(struct drm_encoder *encoder, + void *buffer, size_t size) +{ + struct drm_device *dev = encoder->dev; + struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); + struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; + uint32_t offset = dig->afmt->offset; + uint8_t *frame = buffer + 3; + uint8_t *header = buffer; + + WREG32(mmAFMT_AVI_INFO0 + offset, + frame[0x0] | (frame[0x1] << 8) | (frame[0x2] << 16) | (frame[0x3] << 24)); + WREG32(mmAFMT_AVI_INFO1 + offset, + frame[0x4] | (frame[0x5] << 8) | (frame[0x6] << 16) | (frame[0x7] << 24)); + WREG32(mmAFMT_AVI_INFO2 + offset, + frame[0x8] | (frame[0x9] << 8) | (frame[0xA] << 16) | (frame[0xB] << 24)); + WREG32(mmAFMT_AVI_INFO3 + offset, + frame[0xC] | (frame[0xD] << 8) | (header[1] << 24)); +} + +static void dce_v8_0_audio_set_dto(struct drm_encoder *encoder, u32 clock) +{ + struct drm_device *dev = encoder->dev; + struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); + struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; + struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(encoder->crtc); + u32 dto_phase = 24 * 1000; + u32 dto_modulo = clock; + + if (!dig || !dig->afmt) + return; + + /* XXX two dtos; generally use dto0 for hdmi */ + /* Express [24MHz / target pixel clock] as an exact rational + * number (coefficient of two integer numbers. DCCG_AUDIO_DTOx_PHASE + * is the numerator, DCCG_AUDIO_DTOx_MODULE is the denominator + */ + WREG32(mmDCCG_AUDIO_DTO_SOURCE, (amdgpu_crtc->crtc_id << DCCG_AUDIO_DTO_SOURCE__DCCG_AUDIO_DTO0_SOURCE_SEL__SHIFT)); + WREG32(mmDCCG_AUDIO_DTO0_PHASE, dto_phase); + WREG32(mmDCCG_AUDIO_DTO0_MODULE, dto_modulo); +} + +/* + * update the info frames with the data from the current display mode + */ +static void dce_v8_0_afmt_setmode(struct drm_encoder *encoder, + struct drm_display_mode *mode) +{ + struct drm_device *dev = encoder->dev; + struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); + struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; + struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder); + u8 buffer[HDMI_INFOFRAME_HEADER_SIZE + HDMI_AVI_INFOFRAME_SIZE]; + struct hdmi_avi_infoframe frame; + uint32_t offset, val; + ssize_t err; + int bpc = 8; + + if (!dig || !dig->afmt) + return; + + /* Silent, r600_hdmi_enable will raise WARN for us */ + if (!dig->afmt->enabled) + return; + offset = dig->afmt->offset; + + /* hdmi deep color mode general control packets setup, if bpc > 8 */ + if (encoder->crtc) { + struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(encoder->crtc); + bpc = amdgpu_crtc->bpc; + } + + /* disable audio prior to setting up hw */ + dig->afmt->pin = dce_v8_0_audio_get_pin(adev); + dce_v8_0_audio_enable(adev, dig->afmt->pin, false); + + dce_v8_0_audio_set_dto(encoder, mode->clock); + + WREG32(mmHDMI_VBI_PACKET_CONTROL + offset, + HDMI_VBI_PACKET_CONTROL__HDMI_NULL_SEND_MASK); /* send null packets when required */ + + WREG32(mmAFMT_AUDIO_CRC_CONTROL + offset, 0x1000); + + val = RREG32(mmHDMI_CONTROL + offset); + val &= ~HDMI_CONTROL__HDMI_DEEP_COLOR_ENABLE_MASK; + val &= ~HDMI_CONTROL__HDMI_DEEP_COLOR_DEPTH_MASK; + + switch (bpc) { + case 0: + case 6: + case 8: + case 16: + default: + DRM_DEBUG("%s: Disabling hdmi deep color for %d bpc.\n", + connector->name, bpc); + break; + case 10: + val |= HDMI_CONTROL__HDMI_DEEP_COLOR_ENABLE_MASK; + val |= 1 << HDMI_CONTROL__HDMI_DEEP_COLOR_DEPTH__SHIFT; + DRM_DEBUG("%s: Enabling hdmi deep color 30 for 10 bpc.\n", + connector->name); + break; + case 12: + val |= HDMI_CONTROL__HDMI_DEEP_COLOR_ENABLE_MASK; + val |= 2 << HDMI_CONTROL__HDMI_DEEP_COLOR_DEPTH__SHIFT; + DRM_DEBUG("%s: Enabling hdmi deep color 36 for 12 bpc.\n", + connector->name); + break; + } + + WREG32(mmHDMI_CONTROL + offset, val); + + WREG32(mmHDMI_VBI_PACKET_CONTROL + offset, + HDMI_VBI_PACKET_CONTROL__HDMI_NULL_SEND_MASK | /* send null packets when required */ + HDMI_VBI_PACKET_CONTROL__HDMI_GC_SEND_MASK | /* send general control packets */ + HDMI_VBI_PACKET_CONTROL__HDMI_GC_CONT_MASK); /* send general control packets every frame */ + + WREG32(mmHDMI_INFOFRAME_CONTROL0 + offset, + HDMI_INFOFRAME_CONTROL0__HDMI_AUDIO_INFO_SEND_MASK | /* enable audio info frames (frames won't be set until audio is enabled) */ + HDMI_INFOFRAME_CONTROL0__HDMI_AUDIO_INFO_CONT_MASK); /* required for audio info values to be updated */ + + WREG32(mmAFMT_INFOFRAME_CONTROL0 + offset, + AFMT_INFOFRAME_CONTROL0__AFMT_AUDIO_INFO_UPDATE_MASK); /* required for audio info values to be updated */ + + WREG32(mmHDMI_INFOFRAME_CONTROL1 + offset, + (2 << HDMI_INFOFRAME_CONTROL1__HDMI_AUDIO_INFO_LINE__SHIFT)); /* anything other than 0 */ + + WREG32(mmHDMI_GC + offset, 0); /* unset HDMI_GC_AVMUTE */ + + WREG32(mmHDMI_AUDIO_PACKET_CONTROL + offset, + (1 << HDMI_AUDIO_PACKET_CONTROL__HDMI_AUDIO_DELAY_EN__SHIFT) | /* set the default audio delay */ + (3 << HDMI_AUDIO_PACKET_CONTROL__HDMI_AUDIO_PACKETS_PER_LINE__SHIFT)); /* should be suffient for all audio modes and small enough for all hblanks */ + + WREG32(mmAFMT_AUDIO_PACKET_CONTROL + offset, + AFMT_AUDIO_PACKET_CONTROL__AFMT_60958_CS_UPDATE_MASK); /* allow 60958 channel status fields to be updated */ + + /* fglrx clears sth in AFMT_AUDIO_PACKET_CONTROL2 here */ + + if (bpc > 8) + WREG32(mmHDMI_ACR_PACKET_CONTROL + offset, + HDMI_ACR_PACKET_CONTROL__HDMI_ACR_AUTO_SEND_MASK); /* allow hw to sent ACR packets when required */ + else + WREG32(mmHDMI_ACR_PACKET_CONTROL + offset, + HDMI_ACR_PACKET_CONTROL__HDMI_ACR_SOURCE_MASK | /* select SW CTS value */ + HDMI_ACR_PACKET_CONTROL__HDMI_ACR_AUTO_SEND_MASK); /* allow hw to sent ACR packets when required */ + + dce_v8_0_afmt_update_ACR(encoder, mode->clock); + + WREG32(mmAFMT_60958_0 + offset, + (1 << AFMT_60958_0__AFMT_60958_CS_CHANNEL_NUMBER_L__SHIFT)); + + WREG32(mmAFMT_60958_1 + offset, + (2 << AFMT_60958_1__AFMT_60958_CS_CHANNEL_NUMBER_R__SHIFT)); + + WREG32(mmAFMT_60958_2 + offset, + (3 << AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_2__SHIFT) | + (4 << AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_3__SHIFT) | + (5 << AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_4__SHIFT) | + (6 << AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_5__SHIFT) | + (7 << AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_6__SHIFT) | + (8 << AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_7__SHIFT)); + + dce_v8_0_audio_write_speaker_allocation(encoder); + + + WREG32(mmAFMT_AUDIO_PACKET_CONTROL2 + offset, + (0xff << AFMT_AUDIO_PACKET_CONTROL2__AFMT_AUDIO_CHANNEL_ENABLE__SHIFT)); + + dce_v8_0_afmt_audio_select_pin(encoder); + dce_v8_0_audio_write_sad_regs(encoder); + dce_v8_0_audio_write_latency_fields(encoder, mode); + + err = drm_hdmi_avi_infoframe_from_display_mode(&frame, mode); + if (err < 0) { + DRM_ERROR("failed to setup AVI infoframe: %zd\n", err); + return; + } + + err = hdmi_avi_infoframe_pack(&frame, buffer, sizeof(buffer)); + if (err < 0) { + DRM_ERROR("failed to pack AVI infoframe: %zd\n", err); + return; + } + + dce_v8_0_afmt_update_avi_infoframe(encoder, buffer, sizeof(buffer)); + + WREG32_OR(mmHDMI_INFOFRAME_CONTROL0 + offset, + HDMI_INFOFRAME_CONTROL0__HDMI_AVI_INFO_SEND_MASK | /* enable AVI info frames */ + HDMI_INFOFRAME_CONTROL0__HDMI_AVI_INFO_SEND_MASK); /* required for audio info values to be updated */ + + WREG32_P(mmHDMI_INFOFRAME_CONTROL1 + offset, + (2 << HDMI_INFOFRAME_CONTROL1__HDMI_AVI_INFO_LINE__SHIFT), /* anything other than 0 */ + ~HDMI_INFOFRAME_CONTROL1__HDMI_AVI_INFO_LINE_MASK); + + WREG32_OR(mmAFMT_AUDIO_PACKET_CONTROL + offset, + AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_SAMPLE_SEND_MASK); /* send audio packets */ + + /* it's unknown what these bits do excatly, but it's indeed quite useful for debugging */ + WREG32(mmAFMT_RAMP_CONTROL0 + offset, 0x00FFFFFF); + WREG32(mmAFMT_RAMP_CONTROL1 + offset, 0x007FFFFF); + WREG32(mmAFMT_RAMP_CONTROL2 + offset, 0x00000001); + WREG32(mmAFMT_RAMP_CONTROL3 + offset, 0x00000001); + + /* enable audio after to setting up hw */ + dce_v8_0_audio_enable(adev, dig->afmt->pin, true); +} + +static void dce_v8_0_afmt_enable(struct drm_encoder *encoder, bool enable) +{ + struct drm_device *dev = encoder->dev; + struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); + struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; + + if (!dig || !dig->afmt) + return; + + /* Silent, r600_hdmi_enable will raise WARN for us */ + if (enable && dig->afmt->enabled) + return; + if (!enable && !dig->afmt->enabled) + return; + + if (!enable && dig->afmt->pin) { + dce_v8_0_audio_enable(adev, dig->afmt->pin, false); + dig->afmt->pin = NULL; + } + + dig->afmt->enabled = enable; + + DRM_DEBUG("%sabling AFMT interface @ 0x%04X for encoder 0x%x\n", + enable ? "En" : "Dis", dig->afmt->offset, amdgpu_encoder->encoder_id); +} + +static void dce_v8_0_afmt_init(struct amdgpu_device *adev) +{ + int i; + + for (i = 0; i < adev->mode_info.num_dig; i++) + adev->mode_info.afmt[i] = NULL; + + /* DCE8 has audio blocks tied to DIG encoders */ + for (i = 0; i < adev->mode_info.num_dig; i++) { + adev->mode_info.afmt[i] = kzalloc(sizeof(struct amdgpu_afmt), GFP_KERNEL); + if (adev->mode_info.afmt[i]) { + adev->mode_info.afmt[i]->offset = dig_offsets[i]; + adev->mode_info.afmt[i]->id = i; + } + } +} + +static void dce_v8_0_afmt_fini(struct amdgpu_device *adev) +{ + int i; + + for (i = 0; i < adev->mode_info.num_dig; i++) { + kfree(adev->mode_info.afmt[i]); + adev->mode_info.afmt[i] = NULL; + } +} + +static const u32 vga_control_regs[6] = +{ + mmD1VGA_CONTROL, + mmD2VGA_CONTROL, + mmD3VGA_CONTROL, + mmD4VGA_CONTROL, + mmD5VGA_CONTROL, + mmD6VGA_CONTROL, +}; + +static void dce_v8_0_vga_enable(struct drm_crtc *crtc, bool enable) +{ + struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); + struct drm_device *dev = crtc->dev; + struct amdgpu_device *adev = dev->dev_private; + u32 vga_control; + + vga_control = RREG32(vga_control_regs[amdgpu_crtc->crtc_id]) & ~1; + if (enable) + WREG32(vga_control_regs[amdgpu_crtc->crtc_id], vga_control | 1); + else + WREG32(vga_control_regs[amdgpu_crtc->crtc_id], vga_control); +} + +static void dce_v8_0_grph_enable(struct drm_crtc *crtc, bool enable) +{ + struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); + struct drm_device *dev = crtc->dev; + struct amdgpu_device *adev = dev->dev_private; + + if (enable) + WREG32(mmGRPH_ENABLE + amdgpu_crtc->crtc_offset, 1); + else + WREG32(mmGRPH_ENABLE + amdgpu_crtc->crtc_offset, 0); +} + +static void dce_v8_0_tiling_fields(uint64_t tiling_flags, unsigned *bankw, + unsigned *bankh, unsigned *mtaspect, + unsigned *tile_split) +{ + *bankw = (tiling_flags >> AMDGPU_TILING_EG_BANKW_SHIFT) & AMDGPU_TILING_EG_BANKW_MASK; + *bankh = (tiling_flags >> AMDGPU_TILING_EG_BANKH_SHIFT) & AMDGPU_TILING_EG_BANKH_MASK; + *mtaspect = (tiling_flags >> AMDGPU_TILING_EG_MACRO_TILE_ASPECT_SHIFT) & AMDGPU_TILING_EG_MACRO_TILE_ASPECT_MASK; + *tile_split = (tiling_flags >> AMDGPU_TILING_EG_TILE_SPLIT_SHIFT) & AMDGPU_TILING_EG_TILE_SPLIT_MASK; + switch (*bankw) { + default: + case 1: + *bankw = ADDR_SURF_BANK_WIDTH_1; + break; + case 2: + *bankw = ADDR_SURF_BANK_WIDTH_2; + break; + case 4: + *bankw = ADDR_SURF_BANK_WIDTH_4; + break; + case 8: + *bankw = ADDR_SURF_BANK_WIDTH_8; + break; + } + switch (*bankh) { + default: + case 1: + *bankh = ADDR_SURF_BANK_HEIGHT_1; + break; + case 2: + *bankh = ADDR_SURF_BANK_HEIGHT_2; + break; + case 4: + *bankh = ADDR_SURF_BANK_HEIGHT_4; + break; + case 8: + *bankh = ADDR_SURF_BANK_HEIGHT_8; + break; + } + switch (*mtaspect) { + default: + case 1: + *mtaspect = ADDR_SURF_MACRO_TILE_ASPECT_1; + break; + case 2: + *mtaspect = ADDR_SURF_MACRO_TILE_ASPECT_2; + break; + case 4: + *mtaspect = ADDR_SURF_MACRO_TILE_ASPECT_4; + break; + case 8: + *mtaspect = ADDR_SURF_MACRO_TILE_ASPECT_8; + break; + } +} + +static int dce_v8_0_crtc_do_set_base(struct drm_crtc *crtc, + struct drm_framebuffer *fb, + int x, int y, int atomic) +{ + struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); + struct drm_device *dev = crtc->dev; + struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_framebuffer *amdgpu_fb; + struct drm_framebuffer *target_fb; + struct drm_gem_object *obj; + struct amdgpu_bo *rbo; + uint64_t fb_location, tiling_flags; + uint32_t fb_format, fb_pitch_pixels; + unsigned bankw, bankh, mtaspect, tile_split; + u32 fb_swap = (GRPH_ENDIAN_NONE << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT); + u32 pipe_config = (adev->gfx.config.tile_mode_array[10] >> 6) & 0x1f; + u32 tmp, viewport_w, viewport_h; + int r; + bool bypass_lut = false; + + /* no fb bound */ + if (!atomic && !crtc->primary->fb) { + DRM_DEBUG_KMS("No FB bound\n"); + return 0; + } + + if (atomic) { + amdgpu_fb = to_amdgpu_framebuffer(fb); + target_fb = fb; + } + else { + amdgpu_fb = to_amdgpu_framebuffer(crtc->primary->fb); + target_fb = crtc->primary->fb; + } + + /* If atomic, assume fb object is pinned & idle & fenced and + * just update base pointers + */ + obj = amdgpu_fb->obj; + rbo = gem_to_amdgpu_bo(obj); + r = amdgpu_bo_reserve(rbo, false); + if (unlikely(r != 0)) + return r; + + if (atomic) + fb_location = amdgpu_bo_gpu_offset(rbo); + else { + r = amdgpu_bo_pin(rbo, AMDGPU_GEM_DOMAIN_VRAM, &fb_location); + if (unlikely(r != 0)) { + amdgpu_bo_unreserve(rbo); + return -EINVAL; + } + } + + amdgpu_bo_get_tiling_flags(rbo, &tiling_flags); + amdgpu_bo_unreserve(rbo); + + switch (target_fb->pixel_format) { + case DRM_FORMAT_C8: + fb_format = ((GRPH_DEPTH_8BPP << GRPH_CONTROL__GRPH_DEPTH__SHIFT) | + (GRPH_FORMAT_INDEXED << GRPH_CONTROL__GRPH_FORMAT__SHIFT)); + break; + case DRM_FORMAT_XRGB4444: + case DRM_FORMAT_ARGB4444: + fb_format = ((GRPH_DEPTH_16BPP << GRPH_CONTROL__GRPH_DEPTH__SHIFT) | + (GRPH_FORMAT_ARGB1555 << GRPH_CONTROL__GRPH_FORMAT__SHIFT)); +#ifdef __BIG_ENDIAN + fb_swap = (GRPH_ENDIAN_8IN16 << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT); +#endif + break; + case DRM_FORMAT_XRGB1555: + case DRM_FORMAT_ARGB1555: + fb_format = ((GRPH_DEPTH_16BPP << GRPH_CONTROL__GRPH_DEPTH__SHIFT) | + (GRPH_FORMAT_ARGB1555 << GRPH_CONTROL__GRPH_FORMAT__SHIFT)); +#ifdef __BIG_ENDIAN + fb_swap = (GRPH_ENDIAN_8IN16 << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT); +#endif + break; + case DRM_FORMAT_BGRX5551: + case DRM_FORMAT_BGRA5551: + fb_format = ((GRPH_DEPTH_16BPP << GRPH_CONTROL__GRPH_DEPTH__SHIFT) | + (GRPH_FORMAT_BGRA5551 << GRPH_CONTROL__GRPH_FORMAT__SHIFT)); +#ifdef __BIG_ENDIAN + fb_swap = (GRPH_ENDIAN_8IN16 << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT); +#endif + break; + case DRM_FORMAT_RGB565: + fb_format = ((GRPH_DEPTH_16BPP << GRPH_CONTROL__GRPH_DEPTH__SHIFT) | + (GRPH_FORMAT_ARGB565 << GRPH_CONTROL__GRPH_FORMAT__SHIFT)); +#ifdef __BIG_ENDIAN + fb_swap = (GRPH_ENDIAN_8IN16 << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT); +#endif + break; + case DRM_FORMAT_XRGB8888: + case DRM_FORMAT_ARGB8888: + fb_format = ((GRPH_DEPTH_32BPP << GRPH_CONTROL__GRPH_DEPTH__SHIFT) | + (GRPH_FORMAT_ARGB8888 << GRPH_CONTROL__GRPH_FORMAT__SHIFT)); +#ifdef __BIG_ENDIAN + fb_swap = (GRPH_ENDIAN_8IN32 << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT); +#endif + break; + case DRM_FORMAT_XRGB2101010: + case DRM_FORMAT_ARGB2101010: + fb_format = ((GRPH_DEPTH_32BPP << GRPH_CONTROL__GRPH_DEPTH__SHIFT) | + (GRPH_FORMAT_ARGB2101010 << GRPH_CONTROL__GRPH_FORMAT__SHIFT)); +#ifdef __BIG_ENDIAN + fb_swap = (GRPH_ENDIAN_8IN32 << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT); +#endif + /* Greater 8 bpc fb needs to bypass hw-lut to retain precision */ + bypass_lut = true; + break; + case DRM_FORMAT_BGRX1010102: + case DRM_FORMAT_BGRA1010102: + fb_format = ((GRPH_DEPTH_32BPP << GRPH_CONTROL__GRPH_DEPTH__SHIFT) | + (GRPH_FORMAT_BGRA1010102 << GRPH_CONTROL__GRPH_FORMAT__SHIFT)); +#ifdef __BIG_ENDIAN + fb_swap = (GRPH_ENDIAN_8IN32 << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT); +#endif + /* Greater 8 bpc fb needs to bypass hw-lut to retain precision */ + bypass_lut = true; + break; + default: + DRM_ERROR("Unsupported screen format %s\n", + drm_get_format_name(target_fb->pixel_format)); + return -EINVAL; + } + + if (tiling_flags & AMDGPU_TILING_MACRO) { + unsigned tileb, index, num_banks, tile_split_bytes; + + dce_v8_0_tiling_fields(tiling_flags, &bankw, &bankh, &mtaspect, &tile_split); + /* Set NUM_BANKS. */ + /* Calculate the macrotile mode index. */ + tile_split_bytes = 64 << tile_split; + tileb = 8 * 8 * target_fb->bits_per_pixel / 8; + tileb = min(tile_split_bytes, tileb); + + for (index = 0; tileb > 64; index++) { + tileb >>= 1; + } + + if (index >= 16) { + DRM_ERROR("Wrong screen bpp (%u) or tile split (%u)\n", + target_fb->bits_per_pixel, tile_split); + return -EINVAL; + } + + num_banks = (adev->gfx.config.macrotile_mode_array[index] >> 6) & 0x3; + fb_format |= (num_banks << GRPH_CONTROL__GRPH_NUM_BANKS__SHIFT); + fb_format |= (GRPH_ARRAY_2D_TILED_THIN1 << GRPH_CONTROL__GRPH_ARRAY_MODE__SHIFT); + fb_format |= (tile_split << GRPH_CONTROL__GRPH_TILE_SPLIT__SHIFT); + fb_format |= (bankw << GRPH_CONTROL__GRPH_BANK_WIDTH__SHIFT); + fb_format |= (bankh << GRPH_CONTROL__GRPH_BANK_HEIGHT__SHIFT); + fb_format |= (mtaspect << GRPH_CONTROL__GRPH_MACRO_TILE_ASPECT__SHIFT); + fb_format |= (DISPLAY_MICRO_TILING << GRPH_CONTROL__GRPH_MICRO_TILE_MODE__SHIFT); + } else if (tiling_flags & AMDGPU_TILING_MICRO) { + fb_format |= (GRPH_ARRAY_1D_TILED_THIN1 << GRPH_CONTROL__GRPH_ARRAY_MODE__SHIFT); + } + + /* Read the pipe config from the 2D TILED SCANOUT mode. + * It should be the same for the other modes too, but not all + * modes set the pipe config field. */ + fb_format |= (pipe_config << GRPH_CONTROL__GRPH_PIPE_CONFIG__SHIFT); + + dce_v8_0_vga_enable(crtc, false); + + WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset, + upper_32_bits(fb_location)); + WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset, + upper_32_bits(fb_location)); + WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset, + (u32)fb_location & GRPH_PRIMARY_SURFACE_ADDRESS__GRPH_PRIMARY_SURFACE_ADDRESS_MASK); + WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset, + (u32) fb_location & GRPH_SECONDARY_SURFACE_ADDRESS__GRPH_SECONDARY_SURFACE_ADDRESS_MASK); + WREG32(mmGRPH_CONTROL + amdgpu_crtc->crtc_offset, fb_format); + WREG32(mmGRPH_SWAP_CNTL + amdgpu_crtc->crtc_offset, fb_swap); + + /* + * The LUT only has 256 slots for indexing by a 8 bpc fb. Bypass the LUT + * for > 8 bpc scanout to avoid truncation of fb indices to 8 msb's, to + * retain the full precision throughout the pipeline. + */ + WREG32_P(mmGRPH_LUT_10BIT_BYPASS_CONTROL + amdgpu_crtc->crtc_offset, + (bypass_lut ? LUT_10BIT_BYPASS_EN : 0), + ~LUT_10BIT_BYPASS_EN); + + if (bypass_lut) + DRM_DEBUG_KMS("Bypassing hardware LUT due to 10 bit fb scanout.\n"); + + WREG32(mmGRPH_SURFACE_OFFSET_X + amdgpu_crtc->crtc_offset, 0); + WREG32(mmGRPH_SURFACE_OFFSET_Y + amdgpu_crtc->crtc_offset, 0); + WREG32(mmGRPH_X_START + amdgpu_crtc->crtc_offset, 0); + WREG32(mmGRPH_Y_START + amdgpu_crtc->crtc_offset, 0); + WREG32(mmGRPH_X_END + amdgpu_crtc->crtc_offset, target_fb->width); + WREG32(mmGRPH_Y_END + amdgpu_crtc->crtc_offset, target_fb->height); + + fb_pitch_pixels = target_fb->pitches[0] / (target_fb->bits_per_pixel / 8); + WREG32(mmGRPH_PITCH + amdgpu_crtc->crtc_offset, fb_pitch_pixels); + + dce_v8_0_grph_enable(crtc, true); + + WREG32(mmLB_DESKTOP_HEIGHT + amdgpu_crtc->crtc_offset, + target_fb->height); + + x &= ~3; + y &= ~1; + WREG32(mmVIEWPORT_START + amdgpu_crtc->crtc_offset, + (x << 16) | y); + viewport_w = crtc->mode.hdisplay; + viewport_h = (crtc->mode.vdisplay + 1) & ~1; + WREG32(mmVIEWPORT_SIZE + amdgpu_crtc->crtc_offset, + (viewport_w << 16) | viewport_h); + + /* pageflip setup */ + /* make sure flip is at vb rather than hb */ + tmp = RREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset); + tmp &= ~GRPH_FLIP_CONTROL__GRPH_SURFACE_UPDATE_H_RETRACE_EN_MASK; + WREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset, tmp); + + /* set pageflip to happen only at start of vblank interval (front porch) */ + WREG32(mmMASTER_UPDATE_MODE + amdgpu_crtc->crtc_offset, 3); + + if (!atomic && fb && fb != crtc->primary->fb) { + amdgpu_fb = to_amdgpu_framebuffer(fb); + rbo = gem_to_amdgpu_bo(amdgpu_fb->obj); + r = amdgpu_bo_reserve(rbo, false); + if (unlikely(r != 0)) + return r; + amdgpu_bo_unpin(rbo); + amdgpu_bo_unreserve(rbo); + } + + /* Bytes per pixel may have changed */ + dce_v8_0_bandwidth_update(adev); + + return 0; +} + +static void dce_v8_0_set_interleave(struct drm_crtc *crtc, + struct drm_display_mode *mode) +{ + struct drm_device *dev = crtc->dev; + struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); + + if (mode->flags & DRM_MODE_FLAG_INTERLACE) + WREG32(mmLB_DATA_FORMAT + amdgpu_crtc->crtc_offset, + LB_DATA_FORMAT__INTERLEAVE_EN__SHIFT); + else + WREG32(mmLB_DATA_FORMAT + amdgpu_crtc->crtc_offset, 0); +} + +static void dce_v8_0_crtc_load_lut(struct drm_crtc *crtc) +{ + struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); + struct drm_device *dev = crtc->dev; + struct amdgpu_device *adev = dev->dev_private; + int i; + + DRM_DEBUG_KMS("%d\n", amdgpu_crtc->crtc_id); + + WREG32(mmINPUT_CSC_CONTROL + amdgpu_crtc->crtc_offset, + ((INPUT_CSC_BYPASS << INPUT_CSC_CONTROL__INPUT_CSC_GRPH_MODE__SHIFT) | + (INPUT_CSC_BYPASS << INPUT_CSC_CONTROL__INPUT_CSC_OVL_MODE__SHIFT))); + WREG32(mmPRESCALE_GRPH_CONTROL + amdgpu_crtc->crtc_offset, + PRESCALE_GRPH_CONTROL__GRPH_PRESCALE_BYPASS_MASK); + WREG32(mmPRESCALE_OVL_CONTROL + amdgpu_crtc->crtc_offset, + PRESCALE_OVL_CONTROL__OVL_PRESCALE_BYPASS_MASK); + WREG32(mmINPUT_GAMMA_CONTROL + amdgpu_crtc->crtc_offset, + ((INPUT_GAMMA_USE_LUT << INPUT_GAMMA_CONTROL__GRPH_INPUT_GAMMA_MODE__SHIFT) | + (INPUT_GAMMA_USE_LUT << INPUT_GAMMA_CONTROL__OVL_INPUT_GAMMA_MODE__SHIFT))); + + WREG32(mmDC_LUT_CONTROL + amdgpu_crtc->crtc_offset, 0); + + WREG32(mmDC_LUT_BLACK_OFFSET_BLUE + amdgpu_crtc->crtc_offset, 0); + WREG32(mmDC_LUT_BLACK_OFFSET_GREEN + amdgpu_crtc->crtc_offset, 0); + WREG32(mmDC_LUT_BLACK_OFFSET_RED + amdgpu_crtc->crtc_offset, 0); + + WREG32(mmDC_LUT_WHITE_OFFSET_BLUE + amdgpu_crtc->crtc_offset, 0xffff); + WREG32(mmDC_LUT_WHITE_OFFSET_GREEN + amdgpu_crtc->crtc_offset, 0xffff); + WREG32(mmDC_LUT_WHITE_OFFSET_RED + amdgpu_crtc->crtc_offset, 0xffff); + + WREG32(mmDC_LUT_RW_MODE + amdgpu_crtc->crtc_offset, 0); + WREG32(mmDC_LUT_WRITE_EN_MASK + amdgpu_crtc->crtc_offset, 0x00000007); + + WREG32(mmDC_LUT_RW_INDEX + amdgpu_crtc->crtc_offset, 0); + for (i = 0; i < 256; i++) { + WREG32(mmDC_LUT_30_COLOR + amdgpu_crtc->crtc_offset, + (amdgpu_crtc->lut_r[i] << 20) | + (amdgpu_crtc->lut_g[i] << 10) | + (amdgpu_crtc->lut_b[i] << 0)); + } + + WREG32(mmDEGAMMA_CONTROL + amdgpu_crtc->crtc_offset, + ((DEGAMMA_BYPASS << DEGAMMA_CONTROL__GRPH_DEGAMMA_MODE__SHIFT) | + (DEGAMMA_BYPASS << DEGAMMA_CONTROL__OVL_DEGAMMA_MODE__SHIFT) | + (DEGAMMA_BYPASS << DEGAMMA_CONTROL__CURSOR_DEGAMMA_MODE__SHIFT))); + WREG32(mmGAMUT_REMAP_CONTROL + amdgpu_crtc->crtc_offset, + ((GAMUT_REMAP_BYPASS << GAMUT_REMAP_CONTROL__GRPH_GAMUT_REMAP_MODE__SHIFT) | + (GAMUT_REMAP_BYPASS << GAMUT_REMAP_CONTROL__OVL_GAMUT_REMAP_MODE__SHIFT))); + WREG32(mmREGAMMA_CONTROL + amdgpu_crtc->crtc_offset, + ((REGAMMA_BYPASS << REGAMMA_CONTROL__GRPH_REGAMMA_MODE__SHIFT) | + (REGAMMA_BYPASS << REGAMMA_CONTROL__OVL_REGAMMA_MODE__SHIFT))); + WREG32(mmOUTPUT_CSC_CONTROL + amdgpu_crtc->crtc_offset, + ((OUTPUT_CSC_BYPASS << OUTPUT_CSC_CONTROL__OUTPUT_CSC_GRPH_MODE__SHIFT) | + (OUTPUT_CSC_BYPASS << OUTPUT_CSC_CONTROL__OUTPUT_CSC_OVL_MODE__SHIFT))); + /* XXX match this to the depth of the crtc fmt block, move to modeset? */ + WREG32(0x1a50 + amdgpu_crtc->crtc_offset, 0); + /* XXX this only needs to be programmed once per crtc at startup, + * not sure where the best place for it is + */ + WREG32(mmALPHA_CONTROL + amdgpu_crtc->crtc_offset, + ALPHA_CONTROL__CURSOR_ALPHA_BLND_ENA_MASK); +} + +static int dce_v8_0_pick_dig_encoder(struct drm_encoder *encoder) +{ + struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); + struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; + + switch (amdgpu_encoder->encoder_id) { + case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: + if (dig->linkb) + return 1; + else + return 0; + break; + case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: + if (dig->linkb) + return 3; + else + return 2; + break; + case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: + if (dig->linkb) + return 5; + else + return 4; + break; + case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3: + return 6; + break; + default: + DRM_ERROR("invalid encoder_id: 0x%x\n", amdgpu_encoder->encoder_id); + return 0; + } +} + +/** + * dce_v8_0_pick_pll - Allocate a PPLL for use by the crtc. + * + * @crtc: drm crtc + * + * Returns the PPLL (Pixel PLL) to be used by the crtc. For DP monitors + * a single PPLL can be used for all DP crtcs/encoders. For non-DP + * monitors a dedicated PPLL must be used. If a particular board has + * an external DP PLL, return ATOM_PPLL_INVALID to skip PLL programming + * as there is no need to program the PLL itself. If we are not able to + * allocate a PLL, return ATOM_PPLL_INVALID to skip PLL programming to + * avoid messing up an existing monitor. + * + * Asic specific PLL information + * + * DCE 8.x + * KB/KV + * - PPLL1, PPLL2 are available for all UNIPHY (both DP and non-DP) + * CI + * - PPLL0, PPLL1, PPLL2 are available for all UNIPHY (both DP and non-DP) and DAC + * + */ +static u32 dce_v8_0_pick_pll(struct drm_crtc *crtc) +{ + struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); + struct drm_device *dev = crtc->dev; + struct amdgpu_device *adev = dev->dev_private; + u32 pll_in_use; + int pll; + + if (ENCODER_MODE_IS_DP(amdgpu_atombios_encoder_get_encoder_mode(amdgpu_crtc->encoder))) { + if (adev->clock.dp_extclk) + /* skip PPLL programming if using ext clock */ + return ATOM_PPLL_INVALID; + else { + /* use the same PPLL for all DP monitors */ + pll = amdgpu_pll_get_shared_dp_ppll(crtc); + if (pll != ATOM_PPLL_INVALID) + return pll; + } + } else { + /* use the same PPLL for all monitors with the same clock */ + pll = amdgpu_pll_get_shared_nondp_ppll(crtc); + if (pll != ATOM_PPLL_INVALID) + return pll; + } + /* otherwise, pick one of the plls */ + if ((adev->asic_type == CHIP_KABINI) || + (adev->asic_type == CHIP_MULLINS)) { + /* KB/ML has PPLL1 and PPLL2 */ + pll_in_use = amdgpu_pll_get_use_mask(crtc); + if (!(pll_in_use & (1 << ATOM_PPLL2))) + return ATOM_PPLL2; + if (!(pll_in_use & (1 << ATOM_PPLL1))) + return ATOM_PPLL1; + DRM_ERROR("unable to allocate a PPLL\n"); + return ATOM_PPLL_INVALID; + } else { + /* CI/KV has PPLL0, PPLL1, and PPLL2 */ + pll_in_use = amdgpu_pll_get_use_mask(crtc); + if (!(pll_in_use & (1 << ATOM_PPLL2))) + return ATOM_PPLL2; + if (!(pll_in_use & (1 << ATOM_PPLL1))) + return ATOM_PPLL1; + if (!(pll_in_use & (1 << ATOM_PPLL0))) + return ATOM_PPLL0; + DRM_ERROR("unable to allocate a PPLL\n"); + return ATOM_PPLL_INVALID; + } + return ATOM_PPLL_INVALID; +} + +static void dce_v8_0_lock_cursor(struct drm_crtc *crtc, bool lock) +{ + struct amdgpu_device *adev = crtc->dev->dev_private; + struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); + uint32_t cur_lock; + + cur_lock = RREG32(mmCUR_UPDATE + amdgpu_crtc->crtc_offset); + if (lock) + cur_lock |= CUR_UPDATE__CURSOR_UPDATE_LOCK_MASK; + else + cur_lock &= ~CUR_UPDATE__CURSOR_UPDATE_LOCK_MASK; + WREG32(mmCUR_UPDATE + amdgpu_crtc->crtc_offset, cur_lock); +} + +static void dce_v8_0_hide_cursor(struct drm_crtc *crtc) +{ + struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); + struct amdgpu_device *adev = crtc->dev->dev_private; + + WREG32_IDX(mmCUR_CONTROL + amdgpu_crtc->crtc_offset, + (CURSOR_24_8_PRE_MULT << CUR_CONTROL__CURSOR_MODE__SHIFT) | + (CURSOR_URGENT_1_2 << CUR_CONTROL__CURSOR_URGENT_CONTROL__SHIFT)); +} + +static void dce_v8_0_show_cursor(struct drm_crtc *crtc) +{ + struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); + struct amdgpu_device *adev = crtc->dev->dev_private; + + WREG32_IDX(mmCUR_CONTROL + amdgpu_crtc->crtc_offset, + CUR_CONTROL__CURSOR_EN_MASK | + (CURSOR_24_8_PRE_MULT << CUR_CONTROL__CURSOR_MODE__SHIFT) | + (CURSOR_URGENT_1_2 << CUR_CONTROL__CURSOR_URGENT_CONTROL__SHIFT)); +} + +static void dce_v8_0_set_cursor(struct drm_crtc *crtc, struct drm_gem_object *obj, + uint64_t gpu_addr) +{ + struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); + struct amdgpu_device *adev = crtc->dev->dev_private; + + WREG32(mmCUR_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset, + upper_32_bits(gpu_addr)); + WREG32(mmCUR_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset, + gpu_addr & 0xffffffff); +} + +static int dce_v8_0_crtc_cursor_move(struct drm_crtc *crtc, + int x, int y) +{ + struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); + struct amdgpu_device *adev = crtc->dev->dev_private; + int xorigin = 0, yorigin = 0; + + /* avivo cursor are offset into the total surface */ + x += crtc->x; + y += crtc->y; + DRM_DEBUG("x %d y %d c->x %d c->y %d\n", x, y, crtc->x, crtc->y); + + if (x < 0) { + xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1); + x = 0; + } + if (y < 0) { + yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1); + y = 0; + } + + dce_v8_0_lock_cursor(crtc, true); + WREG32(mmCUR_POSITION + amdgpu_crtc->crtc_offset, (x << 16) | y); + WREG32(mmCUR_HOT_SPOT + amdgpu_crtc->crtc_offset, (xorigin << 16) | yorigin); + WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset, + ((amdgpu_crtc->cursor_width - 1) << 16) | (amdgpu_crtc->cursor_height - 1)); + dce_v8_0_lock_cursor(crtc, false); + + return 0; +} + +static int dce_v8_0_crtc_cursor_set(struct drm_crtc *crtc, + struct drm_file *file_priv, + uint32_t handle, + uint32_t width, + uint32_t height) +{ + struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); + struct drm_gem_object *obj; + struct amdgpu_bo *robj; + uint64_t gpu_addr; + int ret; + + if (!handle) { + /* turn off cursor */ + dce_v8_0_hide_cursor(crtc); + obj = NULL; + goto unpin; + } + + if ((width > amdgpu_crtc->max_cursor_width) || + (height > amdgpu_crtc->max_cursor_height)) { + DRM_ERROR("bad cursor width or height %d x %d\n", width, height); + return -EINVAL; + } + + obj = drm_gem_object_lookup(crtc->dev, file_priv, handle); + if (!obj) { + DRM_ERROR("Cannot find cursor object %x for crtc %d\n", handle, amdgpu_crtc->crtc_id); + return -ENOENT; + } + + robj = gem_to_amdgpu_bo(obj); + ret = amdgpu_bo_reserve(robj, false); + if (unlikely(ret != 0)) + goto fail; + ret = amdgpu_bo_pin_restricted(robj, AMDGPU_GEM_DOMAIN_VRAM, + 0, &gpu_addr); + amdgpu_bo_unreserve(robj); + if (ret) + goto fail; + + amdgpu_crtc->cursor_width = width; + amdgpu_crtc->cursor_height = height; + + dce_v8_0_lock_cursor(crtc, true); + dce_v8_0_set_cursor(crtc, obj, gpu_addr); + dce_v8_0_show_cursor(crtc); + dce_v8_0_lock_cursor(crtc, false); + +unpin: + if (amdgpu_crtc->cursor_bo) { + robj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo); + ret = amdgpu_bo_reserve(robj, false); + if (likely(ret == 0)) { + amdgpu_bo_unpin(robj); + amdgpu_bo_unreserve(robj); + } + drm_gem_object_unreference_unlocked(amdgpu_crtc->cursor_bo); + } + + amdgpu_crtc->cursor_bo = obj; + return 0; +fail: + drm_gem_object_unreference_unlocked(obj); + + return ret; +} + +static void dce_v8_0_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green, + u16 *blue, uint32_t start, uint32_t size) +{ + struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); + int end = (start + size > 256) ? 256 : start + size, i; + + /* userspace palettes are always correct as is */ + for (i = start; i < end; i++) { + amdgpu_crtc->lut_r[i] = red[i] >> 6; + amdgpu_crtc->lut_g[i] = green[i] >> 6; + amdgpu_crtc->lut_b[i] = blue[i] >> 6; + } + dce_v8_0_crtc_load_lut(crtc); +} + +static void dce_v8_0_crtc_destroy(struct drm_crtc *crtc) +{ + struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); + + drm_crtc_cleanup(crtc); + destroy_workqueue(amdgpu_crtc->pflip_queue); + kfree(amdgpu_crtc); +} + +static const struct drm_crtc_funcs dce_v8_0_crtc_funcs = { + .cursor_set = dce_v8_0_crtc_cursor_set, + .cursor_move = dce_v8_0_crtc_cursor_move, + .gamma_set = dce_v8_0_crtc_gamma_set, + .set_config = amdgpu_crtc_set_config, + .destroy = dce_v8_0_crtc_destroy, + .page_flip = amdgpu_crtc_page_flip, +}; + +static void dce_v8_0_crtc_dpms(struct drm_crtc *crtc, int mode) +{ + struct drm_device *dev = crtc->dev; + struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); + + switch (mode) { + case DRM_MODE_DPMS_ON: + amdgpu_crtc->enabled = true; + amdgpu_atombios_crtc_enable(crtc, ATOM_ENABLE); + dce_v8_0_vga_enable(crtc, true); + amdgpu_atombios_crtc_blank(crtc, ATOM_DISABLE); + dce_v8_0_vga_enable(crtc, false); + drm_vblank_post_modeset(dev, amdgpu_crtc->crtc_id); + dce_v8_0_crtc_load_lut(crtc); + break; + case DRM_MODE_DPMS_STANDBY: + case DRM_MODE_DPMS_SUSPEND: + case DRM_MODE_DPMS_OFF: + drm_vblank_pre_modeset(dev, amdgpu_crtc->crtc_id); + if (amdgpu_crtc->enabled) { + dce_v8_0_vga_enable(crtc, true); + amdgpu_atombios_crtc_blank(crtc, ATOM_ENABLE); + dce_v8_0_vga_enable(crtc, false); + } + amdgpu_atombios_crtc_enable(crtc, ATOM_DISABLE); + amdgpu_crtc->enabled = false; + break; + } + /* adjust pm to dpms */ + amdgpu_pm_compute_clocks(adev); +} + +static void dce_v8_0_crtc_prepare(struct drm_crtc *crtc) +{ + /* disable crtc pair power gating before programming */ + amdgpu_atombios_crtc_powergate(crtc, ATOM_DISABLE); + amdgpu_atombios_crtc_lock(crtc, ATOM_ENABLE); + dce_v8_0_crtc_dpms(crtc, DRM_MODE_DPMS_OFF); +} + +static void dce_v8_0_crtc_commit(struct drm_crtc *crtc) +{ + dce_v8_0_crtc_dpms(crtc, DRM_MODE_DPMS_ON); + amdgpu_atombios_crtc_lock(crtc, ATOM_DISABLE); +} + +static void dce_v8_0_crtc_disable(struct drm_crtc *crtc) +{ + struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); + struct drm_device *dev = crtc->dev; + struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_atom_ss ss; + int i; + + dce_v8_0_crtc_dpms(crtc, DRM_MODE_DPMS_OFF); + if (crtc->primary->fb) { + int r; + struct amdgpu_framebuffer *amdgpu_fb; + struct amdgpu_bo *rbo; + + amdgpu_fb = to_amdgpu_framebuffer(crtc->primary->fb); + rbo = gem_to_amdgpu_bo(amdgpu_fb->obj); + r = amdgpu_bo_reserve(rbo, false); + if (unlikely(r)) + DRM_ERROR("failed to reserve rbo before unpin\n"); + else { + amdgpu_bo_unpin(rbo); + amdgpu_bo_unreserve(rbo); + } + } + /* disable the GRPH */ + dce_v8_0_grph_enable(crtc, false); + + amdgpu_atombios_crtc_powergate(crtc, ATOM_ENABLE); + + for (i = 0; i < adev->mode_info.num_crtc; i++) { + if (adev->mode_info.crtcs[i] && + adev->mode_info.crtcs[i]->enabled && + i != amdgpu_crtc->crtc_id && + amdgpu_crtc->pll_id == adev->mode_info.crtcs[i]->pll_id) { + /* one other crtc is using this pll don't turn + * off the pll + */ + goto done; + } + } + + switch (amdgpu_crtc->pll_id) { + case ATOM_PPLL1: + case ATOM_PPLL2: + /* disable the ppll */ + amdgpu_atombios_crtc_program_pll(crtc, amdgpu_crtc->crtc_id, amdgpu_crtc->pll_id, + 0, 0, ATOM_DISABLE, 0, 0, 0, 0, 0, false, &ss); + break; + case ATOM_PPLL0: + /* disable the ppll */ + if ((adev->asic_type == CHIP_KAVERI) || + (adev->asic_type == CHIP_BONAIRE) || + (adev->asic_type == CHIP_HAWAII)) + amdgpu_atombios_crtc_program_pll(crtc, amdgpu_crtc->crtc_id, amdgpu_crtc->pll_id, + 0, 0, ATOM_DISABLE, 0, 0, 0, 0, 0, false, &ss); + break; + default: + break; + } +done: + amdgpu_crtc->pll_id = ATOM_PPLL_INVALID; + amdgpu_crtc->adjusted_clock = 0; + amdgpu_crtc->encoder = NULL; + amdgpu_crtc->connector = NULL; +} + +static int dce_v8_0_crtc_mode_set(struct drm_crtc *crtc, + struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode, + int x, int y, struct drm_framebuffer *old_fb) +{ + struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); + + if (!amdgpu_crtc->adjusted_clock) + return -EINVAL; + + amdgpu_atombios_crtc_set_pll(crtc, adjusted_mode); + amdgpu_atombios_crtc_set_dtd_timing(crtc, adjusted_mode); + dce_v8_0_crtc_do_set_base(crtc, old_fb, x, y, 0); + amdgpu_atombios_crtc_overscan_setup(crtc, mode, adjusted_mode); + amdgpu_atombios_crtc_scaler_setup(crtc); + /* update the hw version fpr dpm */ + amdgpu_crtc->hw_mode = *adjusted_mode; + + return 0; +} + +static bool dce_v8_0_crtc_mode_fixup(struct drm_crtc *crtc, + const struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode) +{ + struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); + struct drm_device *dev = crtc->dev; + struct drm_encoder *encoder; + + /* assign the encoder to the amdgpu crtc to avoid repeated lookups later */ + list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { + if (encoder->crtc == crtc) { + amdgpu_crtc->encoder = encoder; + amdgpu_crtc->connector = amdgpu_get_connector_for_encoder(encoder); + break; + } + } + if ((amdgpu_crtc->encoder == NULL) || (amdgpu_crtc->connector == NULL)) { + amdgpu_crtc->encoder = NULL; + amdgpu_crtc->connector = NULL; + return false; + } + if (!amdgpu_crtc_scaling_mode_fixup(crtc, mode, adjusted_mode)) + return false; + if (amdgpu_atombios_crtc_prepare_pll(crtc, adjusted_mode)) + return false; + /* pick pll */ + amdgpu_crtc->pll_id = dce_v8_0_pick_pll(crtc); + /* if we can't get a PPLL for a non-DP encoder, fail */ + if ((amdgpu_crtc->pll_id == ATOM_PPLL_INVALID) && + !ENCODER_MODE_IS_DP(amdgpu_atombios_encoder_get_encoder_mode(amdgpu_crtc->encoder))) + return false; + + return true; +} + +static int dce_v8_0_crtc_set_base(struct drm_crtc *crtc, int x, int y, + struct drm_framebuffer *old_fb) +{ + return dce_v8_0_crtc_do_set_base(crtc, old_fb, x, y, 0); +} + +static int dce_v8_0_crtc_set_base_atomic(struct drm_crtc *crtc, + struct drm_framebuffer *fb, + int x, int y, enum mode_set_atomic state) +{ + return dce_v8_0_crtc_do_set_base(crtc, fb, x, y, 1); +} + +static const struct drm_crtc_helper_funcs dce_v8_0_crtc_helper_funcs = { + .dpms = dce_v8_0_crtc_dpms, + .mode_fixup = dce_v8_0_crtc_mode_fixup, + .mode_set = dce_v8_0_crtc_mode_set, + .mode_set_base = dce_v8_0_crtc_set_base, + .mode_set_base_atomic = dce_v8_0_crtc_set_base_atomic, + .prepare = dce_v8_0_crtc_prepare, + .commit = dce_v8_0_crtc_commit, + .load_lut = dce_v8_0_crtc_load_lut, + .disable = dce_v8_0_crtc_disable, +}; + +static int dce_v8_0_crtc_init(struct amdgpu_device *adev, int index) +{ + struct amdgpu_crtc *amdgpu_crtc; + int i; + + amdgpu_crtc = kzalloc(sizeof(struct amdgpu_crtc) + + (AMDGPUFB_CONN_LIMIT * sizeof(struct drm_connector *)), GFP_KERNEL); + if (amdgpu_crtc == NULL) + return -ENOMEM; + + drm_crtc_init(adev->ddev, &amdgpu_crtc->base, &dce_v8_0_crtc_funcs); + + drm_mode_crtc_set_gamma_size(&amdgpu_crtc->base, 256); + amdgpu_crtc->crtc_id = index; + amdgpu_crtc->pflip_queue = create_singlethread_workqueue("amdgpu-pageflip-queue"); + adev->mode_info.crtcs[index] = amdgpu_crtc; + + amdgpu_crtc->max_cursor_width = CIK_CURSOR_WIDTH; + amdgpu_crtc->max_cursor_height = CIK_CURSOR_HEIGHT; + adev->ddev->mode_config.cursor_width = amdgpu_crtc->max_cursor_width; + adev->ddev->mode_config.cursor_height = amdgpu_crtc->max_cursor_height; + + for (i = 0; i < 256; i++) { + amdgpu_crtc->lut_r[i] = i << 2; + amdgpu_crtc->lut_g[i] = i << 2; + amdgpu_crtc->lut_b[i] = i << 2; + } + + amdgpu_crtc->crtc_offset = crtc_offsets[amdgpu_crtc->crtc_id]; + + amdgpu_crtc->pll_id = ATOM_PPLL_INVALID; + amdgpu_crtc->adjusted_clock = 0; + amdgpu_crtc->encoder = NULL; + amdgpu_crtc->connector = NULL; + drm_crtc_helper_add(&amdgpu_crtc->base, &dce_v8_0_crtc_helper_funcs); + + return 0; +} + +static int dce_v8_0_early_init(struct amdgpu_device *adev) +{ + adev->audio_endpt_rreg = &dce_v8_0_audio_endpt_rreg; + adev->audio_endpt_wreg = &dce_v8_0_audio_endpt_wreg; + + dce_v8_0_set_display_funcs(adev); + dce_v8_0_set_irq_funcs(adev); + + switch (adev->asic_type) { + case CHIP_BONAIRE: + case CHIP_HAWAII: + adev->mode_info.num_crtc = 6; + adev->mode_info.num_hpd = 6; + adev->mode_info.num_dig = 6; + break; + case CHIP_KAVERI: + adev->mode_info.num_crtc = 4; + adev->mode_info.num_hpd = 6; + adev->mode_info.num_dig = 7; + break; + case CHIP_KABINI: + case CHIP_MULLINS: + adev->mode_info.num_crtc = 2; + adev->mode_info.num_hpd = 6; + adev->mode_info.num_dig = 6; /* ? */ + break; + default: + /* FIXME: not supported yet */ + return -EINVAL; + } + + return 0; +} + +static int dce_v8_0_sw_init(struct amdgpu_device *adev) +{ + int r, i; + + for (i = 0; i < adev->mode_info.num_crtc; i++) { + r = amdgpu_irq_add_id(adev, i + 1, &adev->crtc_irq); + if (r) + return r; + } + + for (i = 8; i < 20; i += 2) { + r = amdgpu_irq_add_id(adev, i, &adev->pageflip_irq); + if (r) + return r; + } + + /* HPD hotplug */ + r = amdgpu_irq_add_id(adev, 42, &adev->hpd_irq); + if (r) + return r; + + adev->mode_info.mode_config_initialized = true; + + adev->ddev->mode_config.funcs = &amdgpu_mode_funcs; + + adev->ddev->mode_config.max_width = 16384; + adev->ddev->mode_config.max_height = 16384; + + adev->ddev->mode_config.preferred_depth = 24; + adev->ddev->mode_config.prefer_shadow = 1; + + adev->ddev->mode_config.fb_base = adev->mc.aper_base; + + r = amdgpu_modeset_create_props(adev); + if (r) + return r; + + adev->ddev->mode_config.max_width = 16384; + adev->ddev->mode_config.max_height = 16384; + + /* allocate crtcs */ + for (i = 0; i < adev->mode_info.num_crtc; i++) { + r = dce_v8_0_crtc_init(adev, i); + if (r) + return r; + } + + if (amdgpu_atombios_get_connector_info_from_object_table(adev)) + amdgpu_print_display_setup(adev->ddev); + else + return -EINVAL; + + /* setup afmt */ + dce_v8_0_afmt_init(adev); + + r = dce_v8_0_audio_init(adev); + if (r) + return r; + + drm_kms_helper_poll_init(adev->ddev); + + return r; +} + +static int dce_v8_0_sw_fini(struct amdgpu_device *adev) +{ + kfree(adev->mode_info.bios_hardcoded_edid); + + drm_kms_helper_poll_fini(adev->ddev); + + dce_v8_0_audio_fini(adev); + + dce_v8_0_afmt_fini(adev); + + drm_mode_config_cleanup(adev->ddev); + adev->mode_info.mode_config_initialized = false; + + return 0; +} + +static int dce_v8_0_hw_init(struct amdgpu_device *adev) +{ + int i; + + /* init dig PHYs, disp eng pll */ + amdgpu_atombios_encoder_init_dig(adev); + amdgpu_atombios_crtc_set_disp_eng_pll(adev, adev->clock.default_dispclk); + + /* initialize hpd */ + dce_v8_0_hpd_init(adev); + + for (i = 0; i < adev->mode_info.audio.num_pins; i++) { + dce_v8_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false); + } + + return 0; +} + +static int dce_v8_0_hw_fini(struct amdgpu_device *adev) +{ + int i; + + dce_v8_0_hpd_fini(adev); + + for (i = 0; i < adev->mode_info.audio.num_pins; i++) { + dce_v8_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false); + } + + return 0; +} + +static int dce_v8_0_suspend(struct amdgpu_device *adev) +{ + struct drm_connector *connector; + + drm_kms_helper_poll_disable(adev->ddev); + + /* turn off display hw */ + list_for_each_entry(connector, &adev->ddev->mode_config.connector_list, head) { + drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF); + } + + amdgpu_atombios_scratch_regs_save(adev); + + dce_v8_0_hpd_fini(adev); + + return 0; +} + +static int dce_v8_0_resume(struct amdgpu_device *adev) +{ + struct drm_connector *connector; + + amdgpu_atombios_scratch_regs_restore(adev); + + /* init dig PHYs, disp eng pll */ + amdgpu_atombios_encoder_init_dig(adev); + amdgpu_atombios_crtc_set_disp_eng_pll(adev, adev->clock.default_dispclk); + /* turn on the BL */ + if (adev->mode_info.bl_encoder) { + u8 bl_level = amdgpu_display_backlight_get_level(adev, + adev->mode_info.bl_encoder); + amdgpu_display_backlight_set_level(adev, adev->mode_info.bl_encoder, + bl_level); + } + + /* initialize hpd */ + dce_v8_0_hpd_init(adev); + + /* blat the mode back in */ + drm_helper_resume_force_mode(adev->ddev); + /* turn on display hw */ + list_for_each_entry(connector, &adev->ddev->mode_config.connector_list, head) { + drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON); + } + + drm_kms_helper_poll_enable(adev->ddev); + + return 0; +} + +static bool dce_v8_0_is_idle(struct amdgpu_device *adev) +{ + /* XXX todo */ + return true; +} + +static int dce_v8_0_wait_for_idle(struct amdgpu_device *adev) +{ + /* XXX todo */ + return 0; +} + +static void dce_v8_0_print_status(struct amdgpu_device *adev) +{ + dev_info(adev->dev, "DCE 8.x registers\n"); + /* XXX todo */ +} + +static int dce_v8_0_soft_reset(struct amdgpu_device *adev) +{ + u32 srbm_soft_reset = 0, tmp; + + if (dce_v8_0_is_display_hung(adev)) + srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_DC_MASK; + + if (srbm_soft_reset) { + dce_v8_0_print_status(adev); + + tmp = RREG32(mmSRBM_SOFT_RESET); + tmp |= srbm_soft_reset; + dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp); + WREG32(mmSRBM_SOFT_RESET, tmp); + tmp = RREG32(mmSRBM_SOFT_RESET); + + udelay(50); + + tmp &= ~srbm_soft_reset; + WREG32(mmSRBM_SOFT_RESET, tmp); + tmp = RREG32(mmSRBM_SOFT_RESET); + + /* Wait a little for things to settle down */ + udelay(50); + dce_v8_0_print_status(adev); + } + return 0; +} + +static void dce_v8_0_set_crtc_vblank_interrupt_state(struct amdgpu_device *adev, + int crtc, + enum amdgpu_interrupt_state state) +{ + u32 reg_block, lb_interrupt_mask; + + if (crtc >= adev->mode_info.num_crtc) { + DRM_DEBUG("invalid crtc %d\n", crtc); + return; + } + + switch (crtc) { + case 0: + reg_block = CRTC0_REGISTER_OFFSET; + break; + case 1: + reg_block = CRTC1_REGISTER_OFFSET; + break; + case 2: + reg_block = CRTC2_REGISTER_OFFSET; + break; + case 3: + reg_block = CRTC3_REGISTER_OFFSET; + break; + case 4: + reg_block = CRTC4_REGISTER_OFFSET; + break; + case 5: + reg_block = CRTC5_REGISTER_OFFSET; + break; + default: + DRM_DEBUG("invalid crtc %d\n", crtc); + return; + } + + switch (state) { + case AMDGPU_IRQ_STATE_DISABLE: + lb_interrupt_mask = RREG32(mmLB_INTERRUPT_MASK + reg_block); + lb_interrupt_mask &= ~LB_INTERRUPT_MASK__VBLANK_INTERRUPT_MASK_MASK; + WREG32(mmLB_INTERRUPT_MASK + reg_block, lb_interrupt_mask); + break; + case AMDGPU_IRQ_STATE_ENABLE: + lb_interrupt_mask = RREG32(mmLB_INTERRUPT_MASK + reg_block); + lb_interrupt_mask |= LB_INTERRUPT_MASK__VBLANK_INTERRUPT_MASK_MASK; + WREG32(mmLB_INTERRUPT_MASK + reg_block, lb_interrupt_mask); + break; + default: + break; + } +} + +static void dce_v8_0_set_crtc_vline_interrupt_state(struct amdgpu_device *adev, + int crtc, + enum amdgpu_interrupt_state state) +{ + u32 reg_block, lb_interrupt_mask; + + if (crtc >= adev->mode_info.num_crtc) { + DRM_DEBUG("invalid crtc %d\n", crtc); + return; + } + + switch (crtc) { + case 0: + reg_block = CRTC0_REGISTER_OFFSET; + break; + case 1: + reg_block = CRTC1_REGISTER_OFFSET; + break; + case 2: + reg_block = CRTC2_REGISTER_OFFSET; + break; + case 3: + reg_block = CRTC3_REGISTER_OFFSET; + break; + case 4: + reg_block = CRTC4_REGISTER_OFFSET; + break; + case 5: + reg_block = CRTC5_REGISTER_OFFSET; + break; + default: + DRM_DEBUG("invalid crtc %d\n", crtc); + return; + } + + switch (state) { + case AMDGPU_IRQ_STATE_DISABLE: + lb_interrupt_mask = RREG32(mmLB_INTERRUPT_MASK + reg_block); + lb_interrupt_mask &= ~LB_INTERRUPT_MASK__VLINE_INTERRUPT_MASK_MASK; + WREG32(mmLB_INTERRUPT_MASK + reg_block, lb_interrupt_mask); + break; + case AMDGPU_IRQ_STATE_ENABLE: + lb_interrupt_mask = RREG32(mmLB_INTERRUPT_MASK + reg_block); + lb_interrupt_mask |= LB_INTERRUPT_MASK__VLINE_INTERRUPT_MASK_MASK; + WREG32(mmLB_INTERRUPT_MASK + reg_block, lb_interrupt_mask); + break; + default: + break; + } +} + +static int dce_v8_0_set_hpd_interrupt_state(struct amdgpu_device *adev, + struct amdgpu_irq_src *src, + unsigned type, + enum amdgpu_interrupt_state state) +{ + u32 dc_hpd_int_cntl_reg, dc_hpd_int_cntl; + + switch (type) { + case AMDGPU_HPD_1: + dc_hpd_int_cntl_reg = mmDC_HPD1_INT_CONTROL; + break; + case AMDGPU_HPD_2: + dc_hpd_int_cntl_reg = mmDC_HPD2_INT_CONTROL; + break; + case AMDGPU_HPD_3: + dc_hpd_int_cntl_reg = mmDC_HPD3_INT_CONTROL; + break; + case AMDGPU_HPD_4: + dc_hpd_int_cntl_reg = mmDC_HPD4_INT_CONTROL; + break; + case AMDGPU_HPD_5: + dc_hpd_int_cntl_reg = mmDC_HPD5_INT_CONTROL; + break; + case AMDGPU_HPD_6: + dc_hpd_int_cntl_reg = mmDC_HPD6_INT_CONTROL; + break; + default: + DRM_DEBUG("invalid hdp %d\n", type); + return 0; + } + + switch (state) { + case AMDGPU_IRQ_STATE_DISABLE: + dc_hpd_int_cntl = RREG32(dc_hpd_int_cntl_reg); + dc_hpd_int_cntl &= ~DC_HPD1_INT_CONTROL__DC_HPD1_INT_EN_MASK; + WREG32(dc_hpd_int_cntl_reg, dc_hpd_int_cntl); + break; + case AMDGPU_IRQ_STATE_ENABLE: + dc_hpd_int_cntl = RREG32(dc_hpd_int_cntl_reg); + dc_hpd_int_cntl |= DC_HPD1_INT_CONTROL__DC_HPD1_INT_EN_MASK; + WREG32(dc_hpd_int_cntl_reg, dc_hpd_int_cntl); + break; + default: + break; + } + + return 0; +} + +static int dce_v8_0_set_crtc_interrupt_state(struct amdgpu_device *adev, + struct amdgpu_irq_src *src, + unsigned type, + enum amdgpu_interrupt_state state) +{ + switch (type) { + case AMDGPU_CRTC_IRQ_VBLANK1: + dce_v8_0_set_crtc_vblank_interrupt_state(adev, 0, state); + break; + case AMDGPU_CRTC_IRQ_VBLANK2: + dce_v8_0_set_crtc_vblank_interrupt_state(adev, 1, state); + break; + case AMDGPU_CRTC_IRQ_VBLANK3: + dce_v8_0_set_crtc_vblank_interrupt_state(adev, 2, state); + break; + case AMDGPU_CRTC_IRQ_VBLANK4: + dce_v8_0_set_crtc_vblank_interrupt_state(adev, 3, state); + break; + case AMDGPU_CRTC_IRQ_VBLANK5: + dce_v8_0_set_crtc_vblank_interrupt_state(adev, 4, state); + break; + case AMDGPU_CRTC_IRQ_VBLANK6: + dce_v8_0_set_crtc_vblank_interrupt_state(adev, 5, state); + break; + case AMDGPU_CRTC_IRQ_VLINE1: + dce_v8_0_set_crtc_vline_interrupt_state(adev, 0, state); + break; + case AMDGPU_CRTC_IRQ_VLINE2: + dce_v8_0_set_crtc_vline_interrupt_state(adev, 1, state); + break; + case AMDGPU_CRTC_IRQ_VLINE3: + dce_v8_0_set_crtc_vline_interrupt_state(adev, 2, state); + break; + case AMDGPU_CRTC_IRQ_VLINE4: + dce_v8_0_set_crtc_vline_interrupt_state(adev, 3, state); + break; + case AMDGPU_CRTC_IRQ_VLINE5: + dce_v8_0_set_crtc_vline_interrupt_state(adev, 4, state); + break; + case AMDGPU_CRTC_IRQ_VLINE6: + dce_v8_0_set_crtc_vline_interrupt_state(adev, 5, state); + break; + default: + break; + } + return 0; +} + +static int dce_v8_0_crtc_irq(struct amdgpu_device *adev, + struct amdgpu_irq_src *source, + struct amdgpu_iv_entry *entry) +{ + unsigned crtc = entry->src_id - 1; + uint32_t disp_int = RREG32(interrupt_status_offsets[crtc].reg); + unsigned irq_type = amdgpu_crtc_idx_to_irq_type(adev, crtc); + + switch (entry->src_data) { + case 0: /* vblank */ + if (disp_int & interrupt_status_offsets[crtc].vblank) { + WREG32(mmLB_VBLANK_STATUS + crtc_offsets[crtc], LB_VBLANK_STATUS__VBLANK_ACK_MASK); + if (amdgpu_irq_enabled(adev, source, irq_type)) { + drm_handle_vblank(adev->ddev, crtc); + } + DRM_DEBUG("IH: D%d vblank\n", crtc + 1); + } + break; + case 1: /* vline */ + if (disp_int & interrupt_status_offsets[crtc].vline) { + WREG32(mmLB_VLINE_STATUS + crtc_offsets[crtc], LB_VLINE_STATUS__VLINE_ACK_MASK); + DRM_DEBUG("IH: D%d vline\n", crtc + 1); + } + break; + default: + DRM_DEBUG("Unhandled interrupt: %d %d\n", entry->src_id, entry->src_data); + break; + } + + return 0; +} + +static int dce_v8_0_set_pageflip_interrupt_state(struct amdgpu_device *adev, + struct amdgpu_irq_src *src, + unsigned type, + enum amdgpu_interrupt_state state) +{ + u32 reg, reg_block; + /* now deal with page flip IRQ */ + switch (type) { + case AMDGPU_PAGEFLIP_IRQ_D1: + reg_block = CRTC0_REGISTER_OFFSET; + break; + case AMDGPU_PAGEFLIP_IRQ_D2: + reg_block = CRTC1_REGISTER_OFFSET; + break; + case AMDGPU_PAGEFLIP_IRQ_D3: + reg_block = CRTC2_REGISTER_OFFSET; + break; + case AMDGPU_PAGEFLIP_IRQ_D4: + reg_block = CRTC3_REGISTER_OFFSET; + break; + case AMDGPU_PAGEFLIP_IRQ_D5: + reg_block = CRTC4_REGISTER_OFFSET; + break; + case AMDGPU_PAGEFLIP_IRQ_D6: + reg_block = CRTC5_REGISTER_OFFSET; + break; + default: + DRM_ERROR("invalid pageflip crtc %d\n", type); + return -EINVAL; + } + + reg = RREG32(mmGRPH_INTERRUPT_CONTROL + reg_block); + if (state == AMDGPU_IRQ_STATE_DISABLE) + WREG32(mmGRPH_INTERRUPT_CONTROL + reg_block, reg & ~GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK_MASK); + else + WREG32(mmGRPH_INTERRUPT_CONTROL + reg_block, reg | GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK_MASK); + + return 0; +} + +static int dce_v8_0_pageflip_irq(struct amdgpu_device *adev, + struct amdgpu_irq_src *source, + struct amdgpu_iv_entry *entry) +{ + int reg_block; + unsigned long flags; + unsigned crtc_id; + struct amdgpu_crtc *amdgpu_crtc; + struct amdgpu_flip_work *works; + + crtc_id = (entry->src_id - 8) >> 1; + amdgpu_crtc = adev->mode_info.crtcs[crtc_id]; + + /* ack the interrupt */ + switch(crtc_id){ + case AMDGPU_PAGEFLIP_IRQ_D1: + reg_block = CRTC0_REGISTER_OFFSET; + break; + case AMDGPU_PAGEFLIP_IRQ_D2: + reg_block = CRTC1_REGISTER_OFFSET; + break; + case AMDGPU_PAGEFLIP_IRQ_D3: + reg_block = CRTC2_REGISTER_OFFSET; + break; + case AMDGPU_PAGEFLIP_IRQ_D4: + reg_block = CRTC3_REGISTER_OFFSET; + break; + case AMDGPU_PAGEFLIP_IRQ_D5: + reg_block = CRTC4_REGISTER_OFFSET; + break; + case AMDGPU_PAGEFLIP_IRQ_D6: + reg_block = CRTC5_REGISTER_OFFSET; + break; + default: + DRM_ERROR("invalid pageflip crtc %d\n", crtc_id); + return -EINVAL; + } + + if (RREG32(mmGRPH_INTERRUPT_STATUS + reg_block) & GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_OCCURRED_MASK) + WREG32(mmGRPH_INTERRUPT_STATUS + reg_block, GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_CLEAR_MASK); + + /* IRQ could occur when in initial stage */ + if (amdgpu_crtc == NULL) + return 0; + + spin_lock_irqsave(&adev->ddev->event_lock, flags); + works = amdgpu_crtc->pflip_works; + if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){ + DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d != " + "AMDGPU_FLIP_SUBMITTED(%d)\n", + amdgpu_crtc->pflip_status, + AMDGPU_FLIP_SUBMITTED); + spin_unlock_irqrestore(&adev->ddev->event_lock, flags); + return 0; + } + + /* page flip completed. clean up */ + amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE; + amdgpu_crtc->pflip_works = NULL; + + /* wakeup usersapce */ + if (works->event) + drm_send_vblank_event(adev->ddev, crtc_id, works->event); + + spin_unlock_irqrestore(&adev->ddev->event_lock, flags); + + drm_vblank_put(adev->ddev, amdgpu_crtc->crtc_id); + amdgpu_irq_put(adev, &adev->pageflip_irq, crtc_id); + queue_work(amdgpu_crtc->pflip_queue, &works->unpin_work); + + return 0; +} + +static int dce_v8_0_hpd_irq(struct amdgpu_device *adev, + struct amdgpu_irq_src *source, + struct amdgpu_iv_entry *entry) +{ + uint32_t disp_int, mask, int_control, tmp; + unsigned hpd; + + if (entry->src_data > 6) { + DRM_DEBUG("Unhandled interrupt: %d %d\n", entry->src_id, entry->src_data); + return 0; + } + + hpd = entry->src_data; + disp_int = RREG32(interrupt_status_offsets[hpd].reg); + mask = interrupt_status_offsets[hpd].hpd; + int_control = hpd_int_control_offsets[hpd]; + + if (disp_int & mask) { + tmp = RREG32(int_control); + tmp |= DC_HPD1_INT_CONTROL__DC_HPD1_INT_ACK_MASK; + WREG32(int_control, tmp); + schedule_work(&adev->hotplug_work); + DRM_DEBUG("IH: HPD%d\n", hpd + 1); + } + + return 0; + +} + +static int dce_v8_0_set_clockgating_state(struct amdgpu_device *adev, + enum amdgpu_clockgating_state state) +{ + return 0; +} + +static int dce_v8_0_set_powergating_state(struct amdgpu_device *adev, + enum amdgpu_powergating_state state) +{ + return 0; +} + +const struct amdgpu_ip_funcs dce_v8_0_ip_funcs = { + .early_init = dce_v8_0_early_init, + .late_init = NULL, + .sw_init = dce_v8_0_sw_init, + .sw_fini = dce_v8_0_sw_fini, + .hw_init = dce_v8_0_hw_init, + .hw_fini = dce_v8_0_hw_fini, + .suspend = dce_v8_0_suspend, + .resume = dce_v8_0_resume, + .is_idle = dce_v8_0_is_idle, + .wait_for_idle = dce_v8_0_wait_for_idle, + .soft_reset = dce_v8_0_soft_reset, + .print_status = dce_v8_0_print_status, + .set_clockgating_state = dce_v8_0_set_clockgating_state, + .set_powergating_state = dce_v8_0_set_powergating_state, +}; + +static void +dce_v8_0_encoder_mode_set(struct drm_encoder *encoder, + struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode) +{ + struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); + + amdgpu_encoder->pixel_clock = adjusted_mode->clock; + + /* need to call this here rather than in prepare() since we need some crtc info */ + amdgpu_atombios_encoder_dpms(encoder, DRM_MODE_DPMS_OFF); + + /* set scaler clears this on some chips */ + dce_v8_0_set_interleave(encoder->crtc, mode); + + if (amdgpu_atombios_encoder_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI) { + dce_v8_0_afmt_enable(encoder, true); + dce_v8_0_afmt_setmode(encoder, adjusted_mode); + } +} + +static void dce_v8_0_encoder_prepare(struct drm_encoder *encoder) +{ + struct amdgpu_device *adev = encoder->dev->dev_private; + struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); + struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder); + + if ((amdgpu_encoder->active_device & + (ATOM_DEVICE_DFP_SUPPORT | ATOM_DEVICE_LCD_SUPPORT)) || + (amdgpu_encoder_get_dp_bridge_encoder_id(encoder) != + ENCODER_OBJECT_ID_NONE)) { + struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; + if (dig) { + dig->dig_encoder = dce_v8_0_pick_dig_encoder(encoder); + if (amdgpu_encoder->active_device & ATOM_DEVICE_DFP_SUPPORT) + dig->afmt = adev->mode_info.afmt[dig->dig_encoder]; + } + } + + amdgpu_atombios_scratch_regs_lock(adev, true); + + if (connector) { + struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector); + + /* select the clock/data port if it uses a router */ + if (amdgpu_connector->router.cd_valid) + amdgpu_i2c_router_select_cd_port(amdgpu_connector); + + /* turn eDP panel on for mode set */ + if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) + amdgpu_atombios_encoder_set_edp_panel_power(connector, + ATOM_TRANSMITTER_ACTION_POWER_ON); + } + + /* this is needed for the pll/ss setup to work correctly in some cases */ + amdgpu_atombios_encoder_set_crtc_source(encoder); + /* set up the FMT blocks */ + dce_v8_0_program_fmt(encoder); +} + +static void dce_v8_0_encoder_commit(struct drm_encoder *encoder) +{ + struct drm_device *dev = encoder->dev; + struct amdgpu_device *adev = dev->dev_private; + + /* need to call this here as we need the crtc set up */ + amdgpu_atombios_encoder_dpms(encoder, DRM_MODE_DPMS_ON); + amdgpu_atombios_scratch_regs_lock(adev, false); +} + +static void dce_v8_0_encoder_disable(struct drm_encoder *encoder) +{ + struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); + struct amdgpu_encoder_atom_dig *dig; + + amdgpu_atombios_encoder_dpms(encoder, DRM_MODE_DPMS_OFF); + + if (amdgpu_atombios_encoder_is_digital(encoder)) { + if (amdgpu_atombios_encoder_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI) + dce_v8_0_afmt_enable(encoder, false); + dig = amdgpu_encoder->enc_priv; + dig->dig_encoder = -1; + } + amdgpu_encoder->active_device = 0; +} + +/* these are handled by the primary encoders */ +static void dce_v8_0_ext_prepare(struct drm_encoder *encoder) +{ + +} + +static void dce_v8_0_ext_commit(struct drm_encoder *encoder) +{ + +} + +static void +dce_v8_0_ext_mode_set(struct drm_encoder *encoder, + struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode) +{ + +} + +static void dce_v8_0_ext_disable(struct drm_encoder *encoder) +{ + +} + +static void +dce_v8_0_ext_dpms(struct drm_encoder *encoder, int mode) +{ + +} + +static bool dce_v8_0_ext_mode_fixup(struct drm_encoder *encoder, + const struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode) +{ + return true; +} + +static const struct drm_encoder_helper_funcs dce_v8_0_ext_helper_funcs = { + .dpms = dce_v8_0_ext_dpms, + .mode_fixup = dce_v8_0_ext_mode_fixup, + .prepare = dce_v8_0_ext_prepare, + .mode_set = dce_v8_0_ext_mode_set, + .commit = dce_v8_0_ext_commit, + .disable = dce_v8_0_ext_disable, + /* no detect for TMDS/LVDS yet */ +}; + +static const struct drm_encoder_helper_funcs dce_v8_0_dig_helper_funcs = { + .dpms = amdgpu_atombios_encoder_dpms, + .mode_fixup = amdgpu_atombios_encoder_mode_fixup, + .prepare = dce_v8_0_encoder_prepare, + .mode_set = dce_v8_0_encoder_mode_set, + .commit = dce_v8_0_encoder_commit, + .disable = dce_v8_0_encoder_disable, + .detect = amdgpu_atombios_encoder_dig_detect, +}; + +static const struct drm_encoder_helper_funcs dce_v8_0_dac_helper_funcs = { + .dpms = amdgpu_atombios_encoder_dpms, + .mode_fixup = amdgpu_atombios_encoder_mode_fixup, + .prepare = dce_v8_0_encoder_prepare, + .mode_set = dce_v8_0_encoder_mode_set, + .commit = dce_v8_0_encoder_commit, + .detect = amdgpu_atombios_encoder_dac_detect, +}; + +static void dce_v8_0_encoder_destroy(struct drm_encoder *encoder) +{ + struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); + if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) + amdgpu_atombios_encoder_fini_backlight(amdgpu_encoder); + kfree(amdgpu_encoder->enc_priv); + drm_encoder_cleanup(encoder); + kfree(amdgpu_encoder); +} + +static const struct drm_encoder_funcs dce_v8_0_encoder_funcs = { + .destroy = dce_v8_0_encoder_destroy, +}; + +static void dce_v8_0_encoder_add(struct amdgpu_device *adev, + uint32_t encoder_enum, + uint32_t supported_device, + u16 caps) +{ + struct drm_device *dev = adev->ddev; + struct drm_encoder *encoder; + struct amdgpu_encoder *amdgpu_encoder; + + /* see if we already added it */ + list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { + amdgpu_encoder = to_amdgpu_encoder(encoder); + if (amdgpu_encoder->encoder_enum == encoder_enum) { + amdgpu_encoder->devices |= supported_device; + return; + } + + } + + /* add a new one */ + amdgpu_encoder = kzalloc(sizeof(struct amdgpu_encoder), GFP_KERNEL); + if (!amdgpu_encoder) + return; + + encoder = &amdgpu_encoder->base; + switch (adev->mode_info.num_crtc) { + case 1: + encoder->possible_crtcs = 0x1; + break; + case 2: + default: + encoder->possible_crtcs = 0x3; + break; + case 4: + encoder->possible_crtcs = 0xf; + break; + case 6: + encoder->possible_crtcs = 0x3f; + break; + } + + amdgpu_encoder->enc_priv = NULL; + + amdgpu_encoder->encoder_enum = encoder_enum; + amdgpu_encoder->encoder_id = (encoder_enum & OBJECT_ID_MASK) >> OBJECT_ID_SHIFT; + amdgpu_encoder->devices = supported_device; + amdgpu_encoder->rmx_type = RMX_OFF; + amdgpu_encoder->underscan_type = UNDERSCAN_OFF; + amdgpu_encoder->is_ext_encoder = false; + amdgpu_encoder->caps = caps; + + switch (amdgpu_encoder->encoder_id) { + case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1: + case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2: + drm_encoder_init(dev, encoder, &dce_v8_0_encoder_funcs, + DRM_MODE_ENCODER_DAC); + drm_encoder_helper_add(encoder, &dce_v8_0_dac_helper_funcs); + break; + case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1: + case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: + case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: + case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: + case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3: + if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) { + amdgpu_encoder->rmx_type = RMX_FULL; + drm_encoder_init(dev, encoder, &dce_v8_0_encoder_funcs, + DRM_MODE_ENCODER_LVDS); + amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_lcd_info(amdgpu_encoder); + } else if (amdgpu_encoder->devices & (ATOM_DEVICE_CRT_SUPPORT)) { + drm_encoder_init(dev, encoder, &dce_v8_0_encoder_funcs, + DRM_MODE_ENCODER_DAC); + amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_dig_info(amdgpu_encoder); + } else { + drm_encoder_init(dev, encoder, &dce_v8_0_encoder_funcs, + DRM_MODE_ENCODER_TMDS); + amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_dig_info(amdgpu_encoder); + } + drm_encoder_helper_add(encoder, &dce_v8_0_dig_helper_funcs); + break; + case ENCODER_OBJECT_ID_SI170B: + case ENCODER_OBJECT_ID_CH7303: + case ENCODER_OBJECT_ID_EXTERNAL_SDVOA: + case ENCODER_OBJECT_ID_EXTERNAL_SDVOB: + case ENCODER_OBJECT_ID_TITFP513: + case ENCODER_OBJECT_ID_VT1623: + case ENCODER_OBJECT_ID_HDMI_SI1930: + case ENCODER_OBJECT_ID_TRAVIS: + case ENCODER_OBJECT_ID_NUTMEG: + /* these are handled by the primary encoders */ + amdgpu_encoder->is_ext_encoder = true; + if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) + drm_encoder_init(dev, encoder, &dce_v8_0_encoder_funcs, + DRM_MODE_ENCODER_LVDS); + else if (amdgpu_encoder->devices & (ATOM_DEVICE_CRT_SUPPORT)) + drm_encoder_init(dev, encoder, &dce_v8_0_encoder_funcs, + DRM_MODE_ENCODER_DAC); + else + drm_encoder_init(dev, encoder, &dce_v8_0_encoder_funcs, + DRM_MODE_ENCODER_TMDS); + drm_encoder_helper_add(encoder, &dce_v8_0_ext_helper_funcs); + break; + } +} + +static const struct amdgpu_display_funcs dce_v8_0_display_funcs = { + .set_vga_render_state = &dce_v8_0_set_vga_render_state, + .bandwidth_update = &dce_v8_0_bandwidth_update, + .vblank_get_counter = &dce_v8_0_vblank_get_counter, + .vblank_wait = &dce_v8_0_vblank_wait, + .is_display_hung = &dce_v8_0_is_display_hung, + .backlight_set_level = &amdgpu_atombios_encoder_set_backlight_level, + .backlight_get_level = &amdgpu_atombios_encoder_get_backlight_level, + .hpd_sense = &dce_v8_0_hpd_sense, + .hpd_set_polarity = &dce_v8_0_hpd_set_polarity, + .hpd_get_gpio_reg = &dce_v8_0_hpd_get_gpio_reg, + .page_flip = &dce_v8_0_page_flip, + .page_flip_get_scanoutpos = &dce_v8_0_crtc_get_scanoutpos, + .add_encoder = &dce_v8_0_encoder_add, + .add_connector = &amdgpu_connector_add, + .stop_mc_access = &dce_v8_0_stop_mc_access, + .resume_mc_access = &dce_v8_0_resume_mc_access, +}; + +static void dce_v8_0_set_display_funcs(struct amdgpu_device *adev) +{ + if (adev->mode_info.funcs == NULL) + adev->mode_info.funcs = &dce_v8_0_display_funcs; +} + +static const struct amdgpu_irq_src_funcs dce_v8_0_crtc_irq_funcs = { + .set = dce_v8_0_set_crtc_interrupt_state, + .process = dce_v8_0_crtc_irq, +}; + +static const struct amdgpu_irq_src_funcs dce_v8_0_pageflip_irq_funcs = { + .set = dce_v8_0_set_pageflip_interrupt_state, + .process = dce_v8_0_pageflip_irq, +}; + +static const struct amdgpu_irq_src_funcs dce_v8_0_hpd_irq_funcs = { + .set = dce_v8_0_set_hpd_interrupt_state, + .process = dce_v8_0_hpd_irq, +}; + +static void dce_v8_0_set_irq_funcs(struct amdgpu_device *adev) +{ + adev->crtc_irq.num_types = AMDGPU_CRTC_IRQ_LAST; + adev->crtc_irq.funcs = &dce_v8_0_crtc_irq_funcs; + + adev->pageflip_irq.num_types = AMDGPU_PAGEFLIP_IRQ_LAST; + adev->pageflip_irq.funcs = &dce_v8_0_pageflip_irq_funcs; + + adev->hpd_irq.num_types = AMDGPU_HPD_LAST; + adev->hpd_irq.funcs = &dce_v8_0_hpd_irq_funcs; +} diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.h b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.h new file mode 100644 index 000000000000..3a0a73b41041 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.h @@ -0,0 +1,29 @@ +/* + * Copyright 2014 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef __DCE_V8_0_H__ +#define __DCE_V8_0_H__ + +extern const struct amdgpu_ip_funcs dce_v8_0_ip_funcs; + +#endif diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c new file mode 100644 index 000000000000..675b096417f4 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c @@ -0,0 +1,5635 @@ +/* + * Copyright 2014 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#include +#include "drmP.h" +#include "amdgpu.h" +#include "amdgpu_ih.h" +#include "amdgpu_gfx.h" +#include "cikd.h" +#include "cik.h" +#include "atom.h" +#include "amdgpu_ucode.h" +#include "clearstate_ci.h" + +#include "uvd/uvd_4_2_d.h" + +#include "dce/dce_8_0_d.h" +#include "dce/dce_8_0_sh_mask.h" + +#include "bif/bif_4_1_d.h" +#include "bif/bif_4_1_sh_mask.h" + +#include "gca/gfx_7_0_d.h" +#include "gca/gfx_7_2_enum.h" +#include "gca/gfx_7_2_sh_mask.h" + +#include "gmc/gmc_7_0_d.h" +#include "gmc/gmc_7_0_sh_mask.h" + +#include "oss/oss_2_0_d.h" +#include "oss/oss_2_0_sh_mask.h" + +#define GFX7_NUM_GFX_RINGS 1 +#define GFX7_NUM_COMPUTE_RINGS 8 + +static void gfx_v7_0_set_ring_funcs(struct amdgpu_device *adev); +static void gfx_v7_0_set_irq_funcs(struct amdgpu_device *adev); +static void gfx_v7_0_set_gds_init(struct amdgpu_device *adev); +int gfx_v7_0_get_cu_info(struct amdgpu_device *, struct amdgpu_cu_info *); + +MODULE_FIRMWARE("radeon/bonaire_pfp.bin"); +MODULE_FIRMWARE("radeon/bonaire_me.bin"); +MODULE_FIRMWARE("radeon/bonaire_ce.bin"); +MODULE_FIRMWARE("radeon/bonaire_rlc.bin"); +MODULE_FIRMWARE("radeon/bonaire_mec.bin"); + +MODULE_FIRMWARE("radeon/hawaii_pfp.bin"); +MODULE_FIRMWARE("radeon/hawaii_me.bin"); +MODULE_FIRMWARE("radeon/hawaii_ce.bin"); +MODULE_FIRMWARE("radeon/hawaii_rlc.bin"); +MODULE_FIRMWARE("radeon/hawaii_mec.bin"); + +MODULE_FIRMWARE("radeon/kaveri_pfp.bin"); +MODULE_FIRMWARE("radeon/kaveri_me.bin"); +MODULE_FIRMWARE("radeon/kaveri_ce.bin"); +MODULE_FIRMWARE("radeon/kaveri_rlc.bin"); +MODULE_FIRMWARE("radeon/kaveri_mec.bin"); +MODULE_FIRMWARE("radeon/kaveri_mec2.bin"); + +MODULE_FIRMWARE("radeon/kabini_pfp.bin"); +MODULE_FIRMWARE("radeon/kabini_me.bin"); +MODULE_FIRMWARE("radeon/kabini_ce.bin"); +MODULE_FIRMWARE("radeon/kabini_rlc.bin"); +MODULE_FIRMWARE("radeon/kabini_mec.bin"); + +MODULE_FIRMWARE("radeon/mullins_pfp.bin"); +MODULE_FIRMWARE("radeon/mullins_me.bin"); +MODULE_FIRMWARE("radeon/mullins_ce.bin"); +MODULE_FIRMWARE("radeon/mullins_rlc.bin"); +MODULE_FIRMWARE("radeon/mullins_mec.bin"); + +static const struct amdgpu_gds_reg_offset amdgpu_gds_reg_offset[] = +{ + {mmGDS_VMID0_BASE, mmGDS_VMID0_SIZE, mmGDS_GWS_VMID0, mmGDS_OA_VMID0}, + {mmGDS_VMID1_BASE, mmGDS_VMID1_SIZE, mmGDS_GWS_VMID1, mmGDS_OA_VMID1}, + {mmGDS_VMID2_BASE, mmGDS_VMID2_SIZE, mmGDS_GWS_VMID2, mmGDS_OA_VMID2}, + {mmGDS_VMID3_BASE, mmGDS_VMID3_SIZE, mmGDS_GWS_VMID3, mmGDS_OA_VMID3}, + {mmGDS_VMID4_BASE, mmGDS_VMID4_SIZE, mmGDS_GWS_VMID4, mmGDS_OA_VMID4}, + {mmGDS_VMID5_BASE, mmGDS_VMID5_SIZE, mmGDS_GWS_VMID5, mmGDS_OA_VMID5}, + {mmGDS_VMID6_BASE, mmGDS_VMID6_SIZE, mmGDS_GWS_VMID6, mmGDS_OA_VMID6}, + {mmGDS_VMID7_BASE, mmGDS_VMID7_SIZE, mmGDS_GWS_VMID7, mmGDS_OA_VMID7}, + {mmGDS_VMID8_BASE, mmGDS_VMID8_SIZE, mmGDS_GWS_VMID8, mmGDS_OA_VMID8}, + {mmGDS_VMID9_BASE, mmGDS_VMID9_SIZE, mmGDS_GWS_VMID9, mmGDS_OA_VMID9}, + {mmGDS_VMID10_BASE, mmGDS_VMID10_SIZE, mmGDS_GWS_VMID10, mmGDS_OA_VMID10}, + {mmGDS_VMID11_BASE, mmGDS_VMID11_SIZE, mmGDS_GWS_VMID11, mmGDS_OA_VMID11}, + {mmGDS_VMID12_BASE, mmGDS_VMID12_SIZE, mmGDS_GWS_VMID12, mmGDS_OA_VMID12}, + {mmGDS_VMID13_BASE, mmGDS_VMID13_SIZE, mmGDS_GWS_VMID13, mmGDS_OA_VMID13}, + {mmGDS_VMID14_BASE, mmGDS_VMID14_SIZE, mmGDS_GWS_VMID14, mmGDS_OA_VMID14}, + {mmGDS_VMID15_BASE, mmGDS_VMID15_SIZE, mmGDS_GWS_VMID15, mmGDS_OA_VMID15} +}; + +static const u32 spectre_rlc_save_restore_register_list[] = +{ + (0x0e00 << 16) | (0xc12c >> 2), + 0x00000000, + (0x0e00 << 16) | (0xc140 >> 2), + 0x00000000, + (0x0e00 << 16) | (0xc150 >> 2), + 0x00000000, + (0x0e00 << 16) | (0xc15c >> 2), + 0x00000000, + (0x0e00 << 16) | (0xc168 >> 2), + 0x00000000, + (0x0e00 << 16) | (0xc170 >> 2), + 0x00000000, + (0x0e00 << 16) | (0xc178 >> 2), + 0x00000000, + (0x0e00 << 16) | (0xc204 >> 2), + 0x00000000, + (0x0e00 << 16) | (0xc2b4 >> 2), + 0x00000000, + (0x0e00 << 16) | (0xc2b8 >> 2), + 0x00000000, + (0x0e00 << 16) | (0xc2bc >> 2), + 0x00000000, + (0x0e00 << 16) | (0xc2c0 >> 2), + 0x00000000, + (0x0e00 << 16) | (0x8228 >> 2), + 0x00000000, + (0x0e00 << 16) | (0x829c >> 2), + 0x00000000, + (0x0e00 << 16) | (0x869c >> 2), + 0x00000000, + (0x0600 << 16) | (0x98f4 >> 2), + 0x00000000, + (0x0e00 << 16) | (0x98f8 >> 2), + 0x00000000, + (0x0e00 << 16) | (0x9900 >> 2), + 0x00000000, + (0x0e00 << 16) | (0xc260 >> 2), + 0x00000000, + (0x0e00 << 16) | (0x90e8 >> 2), + 0x00000000, + (0x0e00 << 16) | (0x3c000 >> 2), + 0x00000000, + (0x0e00 << 16) | (0x3c00c >> 2), + 0x00000000, + (0x0e00 << 16) | (0x8c1c >> 2), + 0x00000000, + (0x0e00 << 16) | (0x9700 >> 2), + 0x00000000, + (0x0e00 << 16) | (0xcd20 >> 2), + 0x00000000, + (0x4e00 << 16) | (0xcd20 >> 2), + 0x00000000, + (0x5e00 << 16) | (0xcd20 >> 2), + 0x00000000, + (0x6e00 << 16) | (0xcd20 >> 2), + 0x00000000, + (0x7e00 << 16) | (0xcd20 >> 2), + 0x00000000, + (0x8e00 << 16) | (0xcd20 >> 2), + 0x00000000, + (0x9e00 << 16) | (0xcd20 >> 2), + 0x00000000, + (0xae00 << 16) | (0xcd20 >> 2), + 0x00000000, + (0xbe00 << 16) | (0xcd20 >> 2), + 0x00000000, + (0x0e00 << 16) | (0x89bc >> 2), + 0x00000000, + (0x0e00 << 16) | (0x8900 >> 2), + 0x00000000, + 0x3, + (0x0e00 << 16) | (0xc130 >> 2), + 0x00000000, + (0x0e00 << 16) | (0xc134 >> 2), + 0x00000000, + (0x0e00 << 16) | (0xc1fc >> 2), + 0x00000000, + (0x0e00 << 16) | (0xc208 >> 2), + 0x00000000, + (0x0e00 << 16) | (0xc264 >> 2), + 0x00000000, + (0x0e00 << 16) | (0xc268 >> 2), + 0x00000000, + (0x0e00 << 16) | (0xc26c >> 2), + 0x00000000, + (0x0e00 << 16) | (0xc270 >> 2), + 0x00000000, + (0x0e00 << 16) | (0xc274 >> 2), + 0x00000000, + (0x0e00 << 16) | (0xc278 >> 2), + 0x00000000, + (0x0e00 << 16) | (0xc27c >> 2), + 0x00000000, + (0x0e00 << 16) | (0xc280 >> 2), + 0x00000000, + (0x0e00 << 16) | (0xc284 >> 2), + 0x00000000, + (0x0e00 << 16) | (0xc288 >> 2), + 0x00000000, + (0x0e00 << 16) | (0xc28c >> 2), + 0x00000000, + (0x0e00 << 16) | (0xc290 >> 2), + 0x00000000, + (0x0e00 << 16) | (0xc294 >> 2), + 0x00000000, + (0x0e00 << 16) | (0xc298 >> 2), + 0x00000000, + (0x0e00 << 16) | (0xc29c >> 2), + 0x00000000, + (0x0e00 << 16) | (0xc2a0 >> 2), + 0x00000000, + (0x0e00 << 16) | (0xc2a4 >> 2), + 0x00000000, + (0x0e00 << 16) | (0xc2a8 >> 2), + 0x00000000, + (0x0e00 << 16) | (0xc2ac >> 2), + 0x00000000, + (0x0e00 << 16) | (0xc2b0 >> 2), + 0x00000000, + (0x0e00 << 16) | (0x301d0 >> 2), + 0x00000000, + (0x0e00 << 16) | (0x30238 >> 2), + 0x00000000, + (0x0e00 << 16) | (0x30250 >> 2), + 0x00000000, + (0x0e00 << 16) | (0x30254 >> 2), + 0x00000000, + (0x0e00 << 16) | (0x30258 >> 2), + 0x00000000, + (0x0e00 << 16) | (0x3025c >> 2), + 0x00000000, + (0x4e00 << 16) | (0xc900 >> 2), + 0x00000000, + (0x5e00 << 16) | (0xc900 >> 2), + 0x00000000, + (0x6e00 << 16) | (0xc900 >> 2), + 0x00000000, + (0x7e00 << 16) | (0xc900 >> 2), + 0x00000000, + (0x8e00 << 16) | (0xc900 >> 2), + 0x00000000, + (0x9e00 << 16) | (0xc900 >> 2), + 0x00000000, + (0xae00 << 16) | (0xc900 >> 2), + 0x00000000, + (0xbe00 << 16) | (0xc900 >> 2), + 0x00000000, + (0x4e00 << 16) | (0xc904 >> 2), + 0x00000000, + (0x5e00 << 16) | (0xc904 >> 2), + 0x00000000, + (0x6e00 << 16) | (0xc904 >> 2), + 0x00000000, + (0x7e00 << 16) | (0xc904 >> 2), + 0x00000000, + (0x8e00 << 16) | (0xc904 >> 2), + 0x00000000, + (0x9e00 << 16) | (0xc904 >> 2), + 0x00000000, + (0xae00 << 16) | (0xc904 >> 2), + 0x00000000, + (0xbe00 << 16) | (0xc904 >> 2), + 0x00000000, + (0x4e00 << 16) | (0xc908 >> 2), + 0x00000000, + (0x5e00 << 16) | (0xc908 >> 2), + 0x00000000, + (0x6e00 << 16) | (0xc908 >> 2), + 0x00000000, + (0x7e00 << 16) | (0xc908 >> 2), + 0x00000000, + (0x8e00 << 16) | (0xc908 >> 2), + 0x00000000, + (0x9e00 << 16) | (0xc908 >> 2), + 0x00000000, + (0xae00 << 16) | (0xc908 >> 2), + 0x00000000, + (0xbe00 << 16) | (0xc908 >> 2), + 0x00000000, + (0x4e00 << 16) | (0xc90c >> 2), + 0x00000000, + (0x5e00 << 16) | (0xc90c >> 2), + 0x00000000, + (0x6e00 << 16) | (0xc90c >> 2), + 0x00000000, + (0x7e00 << 16) | (0xc90c >> 2), + 0x00000000, + (0x8e00 << 16) | (0xc90c >> 2), + 0x00000000, + (0x9e00 << 16) | (0xc90c >> 2), + 0x00000000, + (0xae00 << 16) | (0xc90c >> 2), + 0x00000000, + (0xbe00 << 16) | (0xc90c >> 2), + 0x00000000, + (0x4e00 << 16) | (0xc910 >> 2), + 0x00000000, + (0x5e00 << 16) | (0xc910 >> 2), + 0x00000000, + (0x6e00 << 16) | (0xc910 >> 2), + 0x00000000, + (0x7e00 << 16) | (0xc910 >> 2), + 0x00000000, + (0x8e00 << 16) | (0xc910 >> 2), + 0x00000000, + (0x9e00 << 16) | (0xc910 >> 2), + 0x00000000, + (0xae00 << 16) | (0xc910 >> 2), + 0x00000000, + (0xbe00 << 16) | (0xc910 >> 2), + 0x00000000, + (0x0e00 << 16) | (0xc99c >> 2), + 0x00000000, + (0x0e00 << 16) | (0x9834 >> 2), + 0x00000000, + (0x0000 << 16) | (0x30f00 >> 2), + 0x00000000, + (0x0001 << 16) | (0x30f00 >> 2), + 0x00000000, + (0x0000 << 16) | (0x30f04 >> 2), + 0x00000000, + (0x0001 << 16) | (0x30f04 >> 2), + 0x00000000, + (0x0000 << 16) | (0x30f08 >> 2), + 0x00000000, + (0x0001 << 16) | (0x30f08 >> 2), + 0x00000000, + (0x0000 << 16) | (0x30f0c >> 2), + 0x00000000, + (0x0001 << 16) | (0x30f0c >> 2), + 0x00000000, + (0x0600 << 16) | (0x9b7c >> 2), + 0x00000000, + (0x0e00 << 16) | (0x8a14 >> 2), + 0x00000000, + (0x0e00 << 16) | (0x8a18 >> 2), + 0x00000000, + (0x0600 << 16) | (0x30a00 >> 2), + 0x00000000, + (0x0e00 << 16) | (0x8bf0 >> 2), + 0x00000000, + (0x0e00 << 16) | (0x8bcc >> 2), + 0x00000000, + (0x0e00 << 16) | (0x8b24 >> 2), + 0x00000000, + (0x0e00 << 16) | (0x30a04 >> 2), + 0x00000000, + (0x0600 << 16) | (0x30a10 >> 2), + 0x00000000, + (0x0600 << 16) | (0x30a14 >> 2), + 0x00000000, + (0x0600 << 16) | (0x30a18 >> 2), + 0x00000000, + (0x0600 << 16) | (0x30a2c >> 2), + 0x00000000, + (0x0e00 << 16) | (0xc700 >> 2), + 0x00000000, + (0x0e00 << 16) | (0xc704 >> 2), + 0x00000000, + (0x0e00 << 16) | (0xc708 >> 2), + 0x00000000, + (0x0e00 << 16) | (0xc768 >> 2), + 0x00000000, + (0x0400 << 16) | (0xc770 >> 2), + 0x00000000, + (0x0400 << 16) | (0xc774 >> 2), + 0x00000000, + (0x0400 << 16) | (0xc778 >> 2), + 0x00000000, + (0x0400 << 16) | (0xc77c >> 2), + 0x00000000, + (0x0400 << 16) | (0xc780 >> 2), + 0x00000000, + (0x0400 << 16) | (0xc784 >> 2), + 0x00000000, + (0x0400 << 16) | (0xc788 >> 2), + 0x00000000, + (0x0400 << 16) | (0xc78c >> 2), + 0x00000000, + (0x0400 << 16) | (0xc798 >> 2), + 0x00000000, + (0x0400 << 16) | (0xc79c >> 2), + 0x00000000, + (0x0400 << 16) | (0xc7a0 >> 2), + 0x00000000, + (0x0400 << 16) | (0xc7a4 >> 2), + 0x00000000, + (0x0400 << 16) | (0xc7a8 >> 2), + 0x00000000, + (0x0400 << 16) | (0xc7ac >> 2), + 0x00000000, + (0x0400 << 16) | (0xc7b0 >> 2), + 0x00000000, + (0x0400 << 16) | (0xc7b4 >> 2), + 0x00000000, + (0x0e00 << 16) | (0x9100 >> 2), + 0x00000000, + (0x0e00 << 16) | (0x3c010 >> 2), + 0x00000000, + (0x0e00 << 16) | (0x92a8 >> 2), + 0x00000000, + (0x0e00 << 16) | (0x92ac >> 2), + 0x00000000, + (0x0e00 << 16) | (0x92b4 >> 2), + 0x00000000, + (0x0e00 << 16) | (0x92b8 >> 2), + 0x00000000, + (0x0e00 << 16) | (0x92bc >> 2), + 0x00000000, + (0x0e00 << 16) | (0x92c0 >> 2), + 0x00000000, + (0x0e00 << 16) | (0x92c4 >> 2), + 0x00000000, + (0x0e00 << 16) | (0x92c8 >> 2), + 0x00000000, + (0x0e00 << 16) | (0x92cc >> 2), + 0x00000000, + (0x0e00 << 16) | (0x92d0 >> 2), + 0x00000000, + (0x0e00 << 16) | (0x8c00 >> 2), + 0x00000000, + (0x0e00 << 16) | (0x8c04 >> 2), + 0x00000000, + (0x0e00 << 16) | (0x8c20 >> 2), + 0x00000000, + (0x0e00 << 16) | (0x8c38 >> 2), + 0x00000000, + (0x0e00 << 16) | (0x8c3c >> 2), + 0x00000000, + (0x0e00 << 16) | (0xae00 >> 2), + 0x00000000, + (0x0e00 << 16) | (0x9604 >> 2), + 0x00000000, + (0x0e00 << 16) | (0xac08 >> 2), + 0x00000000, + (0x0e00 << 16) | (0xac0c >> 2), + 0x00000000, + (0x0e00 << 16) | (0xac10 >> 2), + 0x00000000, + (0x0e00 << 16) | (0xac14 >> 2), + 0x00000000, + (0x0e00 << 16) | (0xac58 >> 2), + 0x00000000, + (0x0e00 << 16) | (0xac68 >> 2), + 0x00000000, + (0x0e00 << 16) | (0xac6c >> 2), + 0x00000000, + (0x0e00 << 16) | (0xac70 >> 2), + 0x00000000, + (0x0e00 << 16) | (0xac74 >> 2), + 0x00000000, + (0x0e00 << 16) | (0xac78 >> 2), + 0x00000000, + (0x0e00 << 16) | (0xac7c >> 2), + 0x00000000, + (0x0e00 << 16) | (0xac80 >> 2), + 0x00000000, + (0x0e00 << 16) | (0xac84 >> 2), + 0x00000000, + (0x0e00 << 16) | (0xac88 >> 2), + 0x00000000, + (0x0e00 << 16) | (0xac8c >> 2), + 0x00000000, + (0x0e00 << 16) | (0x970c >> 2), + 0x00000000, + (0x0e00 << 16) | (0x9714 >> 2), + 0x00000000, + (0x0e00 << 16) | (0x9718 >> 2), + 0x00000000, + (0x0e00 << 16) | (0x971c >> 2), + 0x00000000, + (0x0e00 << 16) | (0x31068 >> 2), + 0x00000000, + (0x4e00 << 16) | (0x31068 >> 2), + 0x00000000, + (0x5e00 << 16) | (0x31068 >> 2), + 0x00000000, + (0x6e00 << 16) | (0x31068 >> 2), + 0x00000000, + (0x7e00 << 16) | (0x31068 >> 2), + 0x00000000, + (0x8e00 << 16) | (0x31068 >> 2), + 0x00000000, + (0x9e00 << 16) | (0x31068 >> 2), + 0x00000000, + (0xae00 << 16) | (0x31068 >> 2), + 0x00000000, + (0xbe00 << 16) | (0x31068 >> 2), + 0x00000000, + (0x0e00 << 16) | (0xcd10 >> 2), + 0x00000000, + (0x0e00 << 16) | (0xcd14 >> 2), + 0x00000000, + (0x0e00 << 16) | (0x88b0 >> 2), + 0x00000000, + (0x0e00 << 16) | (0x88b4 >> 2), + 0x00000000, + (0x0e00 << 16) | (0x88b8 >> 2), + 0x00000000, + (0x0e00 << 16) | (0x88bc >> 2), + 0x00000000, + (0x0400 << 16) | (0x89c0 >> 2), + 0x00000000, + (0x0e00 << 16) | (0x88c4 >> 2), + 0x00000000, + (0x0e00 << 16) | (0x88c8 >> 2), + 0x00000000, + (0x0e00 << 16) | (0x88d0 >> 2), + 0x00000000, + (0x0e00 << 16) | (0x88d4 >> 2), + 0x00000000, + (0x0e00 << 16) | (0x88d8 >> 2), + 0x00000000, + (0x0e00 << 16) | (0x8980 >> 2), + 0x00000000, + (0x0e00 << 16) | (0x30938 >> 2), + 0x00000000, + (0x0e00 << 16) | (0x3093c >> 2), + 0x00000000, + (0x0e00 << 16) | (0x30940 >> 2), + 0x00000000, + (0x0e00 << 16) | (0x89a0 >> 2), + 0x00000000, + (0x0e00 << 16) | (0x30900 >> 2), + 0x00000000, + (0x0e00 << 16) | (0x30904 >> 2), + 0x00000000, + (0x0e00 << 16) | (0x89b4 >> 2), + 0x00000000, + (0x0e00 << 16) | (0x3c210 >> 2), + 0x00000000, + (0x0e00 << 16) | (0x3c214 >> 2), + 0x00000000, + (0x0e00 << 16) | (0x3c218 >> 2), + 0x00000000, + (0x0e00 << 16) | (0x8904 >> 2), + 0x00000000, + 0x5, + (0x0e00 << 16) | (0x8c28 >> 2), + (0x0e00 << 16) | (0x8c2c >> 2), + (0x0e00 << 16) | (0x8c30 >> 2), + (0x0e00 << 16) | (0x8c34 >> 2), + (0x0e00 << 16) | (0x9600 >> 2), +}; + +static const u32 kalindi_rlc_save_restore_register_list[] = +{ + (0x0e00 << 16) | (0xc12c >> 2), + 0x00000000, + (0x0e00 << 16) | (0xc140 >> 2), + 0x00000000, + (0x0e00 << 16) | (0xc150 >> 2), + 0x00000000, + (0x0e00 << 16) | (0xc15c >> 2), + 0x00000000, + (0x0e00 << 16) | (0xc168 >> 2), + 0x00000000, + (0x0e00 << 16) | (0xc170 >> 2), + 0x00000000, + (0x0e00 << 16) | (0xc204 >> 2), + 0x00000000, + (0x0e00 << 16) | (0xc2b4 >> 2), + 0x00000000, + (0x0e00 << 16) | (0xc2b8 >> 2), + 0x00000000, + (0x0e00 << 16) | (0xc2bc >> 2), + 0x00000000, + (0x0e00 << 16) | (0xc2c0 >> 2), + 0x00000000, + (0x0e00 << 16) | (0x8228 >> 2), + 0x00000000, + (0x0e00 << 16) | (0x829c >> 2), + 0x00000000, + (0x0e00 << 16) | (0x869c >> 2), + 0x00000000, + (0x0600 << 16) | (0x98f4 >> 2), + 0x00000000, + (0x0e00 << 16) | (0x98f8 >> 2), + 0x00000000, + (0x0e00 << 16) | (0x9900 >> 2), + 0x00000000, + (0x0e00 << 16) | (0xc260 >> 2), + 0x00000000, + (0x0e00 << 16) | (0x90e8 >> 2), + 0x00000000, + (0x0e00 << 16) | (0x3c000 >> 2), + 0x00000000, + (0x0e00 << 16) | (0x3c00c >> 2), + 0x00000000, + (0x0e00 << 16) | (0x8c1c >> 2), + 0x00000000, + (0x0e00 << 16) | (0x9700 >> 2), + 0x00000000, + (0x0e00 << 16) | (0xcd20 >> 2), + 0x00000000, + (0x4e00 << 16) | (0xcd20 >> 2), + 0x00000000, + (0x5e00 << 16) | (0xcd20 >> 2), + 0x00000000, + (0x6e00 << 16) | (0xcd20 >> 2), + 0x00000000, + (0x7e00 << 16) | (0xcd20 >> 2), + 0x00000000, + (0x0e00 << 16) | (0x89bc >> 2), + 0x00000000, + (0x0e00 << 16) | (0x8900 >> 2), + 0x00000000, + 0x3, + (0x0e00 << 16) | (0xc130 >> 2), + 0x00000000, + (0x0e00 << 16) | (0xc134 >> 2), + 0x00000000, + (0x0e00 << 16) | (0xc1fc >> 2), + 0x00000000, + (0x0e00 << 16) | (0xc208 >> 2), + 0x00000000, + (0x0e00 << 16) | (0xc264 >> 2), + 0x00000000, + (0x0e00 << 16) | (0xc268 >> 2), + 0x00000000, + (0x0e00 << 16) | (0xc26c >> 2), + 0x00000000, + (0x0e00 << 16) | (0xc270 >> 2), + 0x00000000, + (0x0e00 << 16) | (0xc274 >> 2), + 0x00000000, + (0x0e00 << 16) | (0xc28c >> 2), + 0x00000000, + (0x0e00 << 16) | (0xc290 >> 2), + 0x00000000, + (0x0e00 << 16) | (0xc294 >> 2), + 0x00000000, + (0x0e00 << 16) | (0xc298 >> 2), + 0x00000000, + (0x0e00 << 16) | (0xc2a0 >> 2), + 0x00000000, + (0x0e00 << 16) | (0xc2a4 >> 2), + 0x00000000, + (0x0e00 << 16) | (0xc2a8 >> 2), + 0x00000000, + (0x0e00 << 16) | (0xc2ac >> 2), + 0x00000000, + (0x0e00 << 16) | (0x301d0 >> 2), + 0x00000000, + (0x0e00 << 16) | (0x30238 >> 2), + 0x00000000, + (0x0e00 << 16) | (0x30250 >> 2), + 0x00000000, + (0x0e00 << 16) | (0x30254 >> 2), + 0x00000000, + (0x0e00 << 16) | (0x30258 >> 2), + 0x00000000, + (0x0e00 << 16) | (0x3025c >> 2), + 0x00000000, + (0x4e00 << 16) | (0xc900 >> 2), + 0x00000000, + (0x5e00 << 16) | (0xc900 >> 2), + 0x00000000, + (0x6e00 << 16) | (0xc900 >> 2), + 0x00000000, + (0x7e00 << 16) | (0xc900 >> 2), + 0x00000000, + (0x4e00 << 16) | (0xc904 >> 2), + 0x00000000, + (0x5e00 << 16) | (0xc904 >> 2), + 0x00000000, + (0x6e00 << 16) | (0xc904 >> 2), + 0x00000000, + (0x7e00 << 16) | (0xc904 >> 2), + 0x00000000, + (0x4e00 << 16) | (0xc908 >> 2), + 0x00000000, + (0x5e00 << 16) | (0xc908 >> 2), + 0x00000000, + (0x6e00 << 16) | (0xc908 >> 2), + 0x00000000, + (0x7e00 << 16) | (0xc908 >> 2), + 0x00000000, + (0x4e00 << 16) | (0xc90c >> 2), + 0x00000000, + (0x5e00 << 16) | (0xc90c >> 2), + 0x00000000, + (0x6e00 << 16) | (0xc90c >> 2), + 0x00000000, + (0x7e00 << 16) | (0xc90c >> 2), + 0x00000000, + (0x4e00 << 16) | (0xc910 >> 2), + 0x00000000, + (0x5e00 << 16) | (0xc910 >> 2), + 0x00000000, + (0x6e00 << 16) | (0xc910 >> 2), + 0x00000000, + (0x7e00 << 16) | (0xc910 >> 2), + 0x00000000, + (0x0e00 << 16) | (0xc99c >> 2), + 0x00000000, + (0x0e00 << 16) | (0x9834 >> 2), + 0x00000000, + (0x0000 << 16) | (0x30f00 >> 2), + 0x00000000, + (0x0000 << 16) | (0x30f04 >> 2), + 0x00000000, + (0x0000 << 16) | (0x30f08 >> 2), + 0x00000000, + (0x0000 << 16) | (0x30f0c >> 2), + 0x00000000, + (0x0600 << 16) | (0x9b7c >> 2), + 0x00000000, + (0x0e00 << 16) | (0x8a14 >> 2), + 0x00000000, + (0x0e00 << 16) | (0x8a18 >> 2), + 0x00000000, + (0x0600 << 16) | (0x30a00 >> 2), + 0x00000000, + (0x0e00 << 16) | (0x8bf0 >> 2), + 0x00000000, + (0x0e00 << 16) | (0x8bcc >> 2), + 0x00000000, + (0x0e00 << 16) | (0x8b24 >> 2), + 0x00000000, + (0x0e00 << 16) | (0x30a04 >> 2), + 0x00000000, + (0x0600 << 16) | (0x30a10 >> 2), + 0x00000000, + (0x0600 << 16) | (0x30a14 >> 2), + 0x00000000, + (0x0600 << 16) | (0x30a18 >> 2), + 0x00000000, + (0x0600 << 16) | (0x30a2c >> 2), + 0x00000000, + (0x0e00 << 16) | (0xc700 >> 2), + 0x00000000, + (0x0e00 << 16) | (0xc704 >> 2), + 0x00000000, + (0x0e00 << 16) | (0xc708 >> 2), + 0x00000000, + (0x0e00 << 16) | (0xc768 >> 2), + 0x00000000, + (0x0400 << 16) | (0xc770 >> 2), + 0x00000000, + (0x0400 << 16) | (0xc774 >> 2), + 0x00000000, + (0x0400 << 16) | (0xc798 >> 2), + 0x00000000, + (0x0400 << 16) | (0xc79c >> 2), + 0x00000000, + (0x0e00 << 16) | (0x9100 >> 2), + 0x00000000, + (0x0e00 << 16) | (0x3c010 >> 2), + 0x00000000, + (0x0e00 << 16) | (0x8c00 >> 2), + 0x00000000, + (0x0e00 << 16) | (0x8c04 >> 2), + 0x00000000, + (0x0e00 << 16) | (0x8c20 >> 2), + 0x00000000, + (0x0e00 << 16) | (0x8c38 >> 2), + 0x00000000, + (0x0e00 << 16) | (0x8c3c >> 2), + 0x00000000, + (0x0e00 << 16) | (0xae00 >> 2), + 0x00000000, + (0x0e00 << 16) | (0x9604 >> 2), + 0x00000000, + (0x0e00 << 16) | (0xac08 >> 2), + 0x00000000, + (0x0e00 << 16) | (0xac0c >> 2), + 0x00000000, + (0x0e00 << 16) | (0xac10 >> 2), + 0x00000000, + (0x0e00 << 16) | (0xac14 >> 2), + 0x00000000, + (0x0e00 << 16) | (0xac58 >> 2), + 0x00000000, + (0x0e00 << 16) | (0xac68 >> 2), + 0x00000000, + (0x0e00 << 16) | (0xac6c >> 2), + 0x00000000, + (0x0e00 << 16) | (0xac70 >> 2), + 0x00000000, + (0x0e00 << 16) | (0xac74 >> 2), + 0x00000000, + (0x0e00 << 16) | (0xac78 >> 2), + 0x00000000, + (0x0e00 << 16) | (0xac7c >> 2), + 0x00000000, + (0x0e00 << 16) | (0xac80 >> 2), + 0x00000000, + (0x0e00 << 16) | (0xac84 >> 2), + 0x00000000, + (0x0e00 << 16) | (0xac88 >> 2), + 0x00000000, + (0x0e00 << 16) | (0xac8c >> 2), + 0x00000000, + (0x0e00 << 16) | (0x970c >> 2), + 0x00000000, + (0x0e00 << 16) | (0x9714 >> 2), + 0x00000000, + (0x0e00 << 16) | (0x9718 >> 2), + 0x00000000, + (0x0e00 << 16) | (0x971c >> 2), + 0x00000000, + (0x0e00 << 16) | (0x31068 >> 2), + 0x00000000, + (0x4e00 << 16) | (0x31068 >> 2), + 0x00000000, + (0x5e00 << 16) | (0x31068 >> 2), + 0x00000000, + (0x6e00 << 16) | (0x31068 >> 2), + 0x00000000, + (0x7e00 << 16) | (0x31068 >> 2), + 0x00000000, + (0x0e00 << 16) | (0xcd10 >> 2), + 0x00000000, + (0x0e00 << 16) | (0xcd14 >> 2), + 0x00000000, + (0x0e00 << 16) | (0x88b0 >> 2), + 0x00000000, + (0x0e00 << 16) | (0x88b4 >> 2), + 0x00000000, + (0x0e00 << 16) | (0x88b8 >> 2), + 0x00000000, + (0x0e00 << 16) | (0x88bc >> 2), + 0x00000000, + (0x0400 << 16) | (0x89c0 >> 2), + 0x00000000, + (0x0e00 << 16) | (0x88c4 >> 2), + 0x00000000, + (0x0e00 << 16) | (0x88c8 >> 2), + 0x00000000, + (0x0e00 << 16) | (0x88d0 >> 2), + 0x00000000, + (0x0e00 << 16) | (0x88d4 >> 2), + 0x00000000, + (0x0e00 << 16) | (0x88d8 >> 2), + 0x00000000, + (0x0e00 << 16) | (0x8980 >> 2), + 0x00000000, + (0x0e00 << 16) | (0x30938 >> 2), + 0x00000000, + (0x0e00 << 16) | (0x3093c >> 2), + 0x00000000, + (0x0e00 << 16) | (0x30940 >> 2), + 0x00000000, + (0x0e00 << 16) | (0x89a0 >> 2), + 0x00000000, + (0x0e00 << 16) | (0x30900 >> 2), + 0x00000000, + (0x0e00 << 16) | (0x30904 >> 2), + 0x00000000, + (0x0e00 << 16) | (0x89b4 >> 2), + 0x00000000, + (0x0e00 << 16) | (0x3e1fc >> 2), + 0x00000000, + (0x0e00 << 16) | (0x3c210 >> 2), + 0x00000000, + (0x0e00 << 16) | (0x3c214 >> 2), + 0x00000000, + (0x0e00 << 16) | (0x3c218 >> 2), + 0x00000000, + (0x0e00 << 16) | (0x8904 >> 2), + 0x00000000, + 0x5, + (0x0e00 << 16) | (0x8c28 >> 2), + (0x0e00 << 16) | (0x8c2c >> 2), + (0x0e00 << 16) | (0x8c30 >> 2), + (0x0e00 << 16) | (0x8c34 >> 2), + (0x0e00 << 16) | (0x9600 >> 2), +}; + +static u32 gfx_v7_0_get_csb_size(struct amdgpu_device *adev); +static void gfx_v7_0_get_csb_buffer(struct amdgpu_device *adev, volatile u32 *buffer); +static void gfx_v7_0_init_cp_pg_table(struct amdgpu_device *adev); +static void gfx_v7_0_init_pg(struct amdgpu_device *adev); + +/* + * Core functions + */ +/** + * gfx_v7_0_init_microcode - load ucode images from disk + * + * @adev: amdgpu_device pointer + * + * Use the firmware interface to load the ucode images into + * the driver (not loaded into hw). + * Returns 0 on success, error on failure. + */ +static int gfx_v7_0_init_microcode(struct amdgpu_device *adev) +{ + const char *chip_name; + char fw_name[30]; + int err; + + DRM_DEBUG("\n"); + + switch (adev->asic_type) { + case CHIP_BONAIRE: + chip_name = "bonaire"; + break; + case CHIP_HAWAII: + chip_name = "hawaii"; + break; + case CHIP_KAVERI: + chip_name = "kaveri"; + break; + case CHIP_KABINI: + chip_name = "kabini"; + break; + case CHIP_MULLINS: + chip_name = "mullins"; + break; + default: BUG(); + } + + snprintf(fw_name, sizeof(fw_name), "radeon/%s_pfp.bin", chip_name); + err = request_firmware(&adev->gfx.pfp_fw, fw_name, adev->dev); + if (err) + goto out; + err = amdgpu_ucode_validate(adev->gfx.pfp_fw); + if (err) + goto out; + + snprintf(fw_name, sizeof(fw_name), "radeon/%s_me.bin", chip_name); + err = request_firmware(&adev->gfx.me_fw, fw_name, adev->dev); + if (err) + goto out; + err = amdgpu_ucode_validate(adev->gfx.me_fw); + if (err) + goto out; + + snprintf(fw_name, sizeof(fw_name), "radeon/%s_ce.bin", chip_name); + err = request_firmware(&adev->gfx.ce_fw, fw_name, adev->dev); + if (err) + goto out; + err = amdgpu_ucode_validate(adev->gfx.ce_fw); + if (err) + goto out; + + snprintf(fw_name, sizeof(fw_name), "radeon/%s_mec.bin", chip_name); + err = request_firmware(&adev->gfx.mec_fw, fw_name, adev->dev); + if (err) + goto out; + err = amdgpu_ucode_validate(adev->gfx.mec_fw); + if (err) + goto out; + + if (adev->asic_type == CHIP_KAVERI) { + snprintf(fw_name, sizeof(fw_name), "radeon/%s_mec2.bin", chip_name); + err = request_firmware(&adev->gfx.mec2_fw, fw_name, adev->dev); + if (err) + goto out; + err = amdgpu_ucode_validate(adev->gfx.mec2_fw); + if (err) + goto out; + } + + snprintf(fw_name, sizeof(fw_name), "radeon/%s_rlc.bin", chip_name); + err = request_firmware(&adev->gfx.rlc_fw, fw_name, adev->dev); + if (err) + goto out; + err = amdgpu_ucode_validate(adev->gfx.rlc_fw); + +out: + if (err) { + printk(KERN_ERR + "gfx7: Failed to load firmware \"%s\"\n", + fw_name); + release_firmware(adev->gfx.pfp_fw); + adev->gfx.pfp_fw = NULL; + release_firmware(adev->gfx.me_fw); + adev->gfx.me_fw = NULL; + release_firmware(adev->gfx.ce_fw); + adev->gfx.ce_fw = NULL; + release_firmware(adev->gfx.mec_fw); + adev->gfx.mec_fw = NULL; + release_firmware(adev->gfx.mec2_fw); + adev->gfx.mec2_fw = NULL; + release_firmware(adev->gfx.rlc_fw); + adev->gfx.rlc_fw = NULL; + } + return err; +} + +/** + * gfx_v7_0_tiling_mode_table_init - init the hw tiling table + * + * @adev: amdgpu_device pointer + * + * Starting with SI, the tiling setup is done globally in a + * set of 32 tiling modes. Rather than selecting each set of + * parameters per surface as on older asics, we just select + * which index in the tiling table we want to use, and the + * surface uses those parameters (CIK). + */ +static void gfx_v7_0_tiling_mode_table_init(struct amdgpu_device *adev) +{ + const u32 num_tile_mode_states = 32; + const u32 num_secondary_tile_mode_states = 16; + u32 reg_offset, gb_tile_moden, split_equal_to_row_size; + + switch (adev->gfx.config.mem_row_size_in_kb) { + case 1: + split_equal_to_row_size = ADDR_SURF_TILE_SPLIT_1KB; + break; + case 2: + default: + split_equal_to_row_size = ADDR_SURF_TILE_SPLIT_2KB; + break; + case 4: + split_equal_to_row_size = ADDR_SURF_TILE_SPLIT_4KB; + break; + } + + switch (adev->asic_type) { + case CHIP_BONAIRE: + for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++) { + switch (reg_offset) { + case 0: + gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P4_16x16) | + TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) | + MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); + break; + case 1: + gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P4_16x16) | + TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) | + MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); + break; + case 2: + gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P4_16x16) | + TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) | + MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); + break; + case 3: + gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P4_16x16) | + TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) | + MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); + break; + case 4: + gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P4_16x16) | + MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) | + TILE_SPLIT(split_equal_to_row_size)); + break; + case 5: + gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P4_16x16) | + MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); + break; + case 6: + gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P4_16x16) | + MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) | + TILE_SPLIT(split_equal_to_row_size)); + break; + case 7: + gb_tile_moden = (TILE_SPLIT(split_equal_to_row_size)); + break; + + case 8: + gb_tile_moden = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) | + PIPE_CONFIG(ADDR_SURF_P4_16x16)); + break; + case 9: + gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P4_16x16) | + MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING)); + break; + case 10: + gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P4_16x16) | + MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); + break; + case 11: + gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P4_16x16) | + MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8)); + break; + case 12: + gb_tile_moden = (TILE_SPLIT(split_equal_to_row_size)); + break; + case 13: + gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P4_16x16) | + MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING)); + break; + case 14: + gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P4_16x16) | + MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); + break; + case 15: + gb_tile_moden = (ARRAY_MODE(ARRAY_3D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P4_16x16) | + MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); + break; + case 16: + gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P4_16x16) | + MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8)); + break; + case 17: + gb_tile_moden = (TILE_SPLIT(split_equal_to_row_size)); + break; + case 18: + gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THICK) | + PIPE_CONFIG(ADDR_SURF_P4_16x16) | + MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); + break; + case 19: + gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THICK) | + PIPE_CONFIG(ADDR_SURF_P4_16x16) | + MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING)); + break; + case 20: + gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THICK) | + PIPE_CONFIG(ADDR_SURF_P4_16x16) | + MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); + break; + case 21: + gb_tile_moden = (ARRAY_MODE(ARRAY_3D_TILED_THICK) | + PIPE_CONFIG(ADDR_SURF_P4_16x16) | + MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); + break; + case 22: + gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) | + PIPE_CONFIG(ADDR_SURF_P4_16x16) | + MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); + break; + case 23: + gb_tile_moden = (TILE_SPLIT(split_equal_to_row_size)); + break; + case 24: + gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THICK) | + PIPE_CONFIG(ADDR_SURF_P4_16x16) | + MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); + break; + case 25: + gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_XTHICK) | + PIPE_CONFIG(ADDR_SURF_P4_16x16) | + MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); + break; + case 26: + gb_tile_moden = (ARRAY_MODE(ARRAY_3D_TILED_XTHICK) | + PIPE_CONFIG(ADDR_SURF_P4_16x16) | + MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); + break; + case 27: + gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P4_16x16) | + MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING)); + break; + case 28: + gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P4_16x16) | + MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); + break; + case 29: + gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P4_16x16) | + MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8)); + break; + case 30: + gb_tile_moden = (TILE_SPLIT(split_equal_to_row_size)); + break; + default: + gb_tile_moden = 0; + break; + } + adev->gfx.config.tile_mode_array[reg_offset] = gb_tile_moden; + WREG32(mmGB_TILE_MODE0 + reg_offset, gb_tile_moden); + } + for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++) { + switch (reg_offset) { + case 0: + gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | + NUM_BANKS(ADDR_SURF_16_BANK)); + break; + case 1: + gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | + NUM_BANKS(ADDR_SURF_16_BANK)); + break; + case 2: + gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | + NUM_BANKS(ADDR_SURF_16_BANK)); + break; + case 3: + gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | + NUM_BANKS(ADDR_SURF_16_BANK)); + break; + case 4: + gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | + NUM_BANKS(ADDR_SURF_16_BANK)); + break; + case 5: + gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | + NUM_BANKS(ADDR_SURF_8_BANK)); + break; + case 6: + gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) | + NUM_BANKS(ADDR_SURF_4_BANK)); + break; + case 8: + gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_8) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | + NUM_BANKS(ADDR_SURF_16_BANK)); + break; + case 9: + gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | + NUM_BANKS(ADDR_SURF_16_BANK)); + break; + case 10: + gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | + NUM_BANKS(ADDR_SURF_16_BANK)); + break; + case 11: + gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | + NUM_BANKS(ADDR_SURF_16_BANK)); + break; + case 12: + gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | + NUM_BANKS(ADDR_SURF_16_BANK)); + break; + case 13: + gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | + NUM_BANKS(ADDR_SURF_8_BANK)); + break; + case 14: + gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) | + NUM_BANKS(ADDR_SURF_4_BANK)); + break; + default: + gb_tile_moden = 0; + break; + } + adev->gfx.config.macrotile_mode_array[reg_offset] = gb_tile_moden; + WREG32(mmGB_MACROTILE_MODE0 + reg_offset, gb_tile_moden); + } + break; + case CHIP_HAWAII: + for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++) { + switch (reg_offset) { + case 0: + gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | + TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) | + MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); + break; + case 1: + gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | + TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) | + MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); + break; + case 2: + gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | + TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) | + MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); + break; + case 3: + gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | + TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) | + MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); + break; + case 4: + gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | + MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) | + TILE_SPLIT(split_equal_to_row_size)); + break; + case 5: + gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | + MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) | + TILE_SPLIT(split_equal_to_row_size)); + break; + case 6: + gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | + MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) | + TILE_SPLIT(split_equal_to_row_size)); + break; + case 7: + gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P4_16x16) | + MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) | + TILE_SPLIT(split_equal_to_row_size)); + break; + + case 8: + gb_tile_moden = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) | + PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16)); + break; + case 9: + gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | + MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING)); + break; + case 10: + gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | + MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); + break; + case 11: + gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | + MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8)); + break; + case 12: + gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P4_16x16) | + MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8)); + break; + case 13: + gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | + MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING)); + break; + case 14: + gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | + MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); + break; + case 15: + gb_tile_moden = (ARRAY_MODE(ARRAY_3D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | + MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); + break; + case 16: + gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | + MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8)); + break; + case 17: + gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P4_16x16) | + MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8)); + break; + case 18: + gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THICK) | + PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | + MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); + break; + case 19: + gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THICK) | + PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | + MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING)); + break; + case 20: + gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THICK) | + PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | + MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); + break; + case 21: + gb_tile_moden = (ARRAY_MODE(ARRAY_3D_TILED_THICK) | + PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | + MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); + break; + case 22: + gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) | + PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | + MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); + break; + case 23: + gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) | + PIPE_CONFIG(ADDR_SURF_P4_16x16) | + MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); + break; + case 24: + gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THICK) | + PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | + MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); + break; + case 25: + gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_XTHICK) | + PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | + MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); + break; + case 26: + gb_tile_moden = (ARRAY_MODE(ARRAY_3D_TILED_XTHICK) | + PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | + MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); + break; + case 27: + gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | + MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING)); + break; + case 28: + gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | + MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); + break; + case 29: + gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | + MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8)); + break; + case 30: + gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P4_16x16) | + MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8)); + break; + default: + gb_tile_moden = 0; + break; + } + adev->gfx.config.tile_mode_array[reg_offset] = gb_tile_moden; + WREG32(mmGB_TILE_MODE0 + reg_offset, gb_tile_moden); + } + for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++) { + switch (reg_offset) { + case 0: + gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | + NUM_BANKS(ADDR_SURF_16_BANK)); + break; + case 1: + gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | + NUM_BANKS(ADDR_SURF_16_BANK)); + break; + case 2: + gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) | + NUM_BANKS(ADDR_SURF_16_BANK)); + break; + case 3: + gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) | + NUM_BANKS(ADDR_SURF_16_BANK)); + break; + case 4: + gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) | + NUM_BANKS(ADDR_SURF_8_BANK)); + break; + case 5: + gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) | + NUM_BANKS(ADDR_SURF_4_BANK)); + break; + case 6: + gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) | + NUM_BANKS(ADDR_SURF_4_BANK)); + break; + case 8: + gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | + NUM_BANKS(ADDR_SURF_16_BANK)); + break; + case 9: + gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | + NUM_BANKS(ADDR_SURF_16_BANK)); + break; + case 10: + gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) | + NUM_BANKS(ADDR_SURF_16_BANK)); + break; + case 11: + gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) | + NUM_BANKS(ADDR_SURF_8_BANK)); + break; + case 12: + gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | + NUM_BANKS(ADDR_SURF_16_BANK)); + break; + case 13: + gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | + NUM_BANKS(ADDR_SURF_8_BANK)); + break; + case 14: + gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) | + NUM_BANKS(ADDR_SURF_4_BANK)); + break; + default: + gb_tile_moden = 0; + break; + } + adev->gfx.config.macrotile_mode_array[reg_offset] = gb_tile_moden; + WREG32(mmGB_MACROTILE_MODE0 + reg_offset, gb_tile_moden); + } + break; + case CHIP_KABINI: + case CHIP_KAVERI: + case CHIP_MULLINS: + default: + for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++) { + switch (reg_offset) { + case 0: + gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P2) | + TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) | + MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); + break; + case 1: + gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P2) | + TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) | + MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); + break; + case 2: + gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P2) | + TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) | + MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); + break; + case 3: + gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P2) | + TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) | + MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); + break; + case 4: + gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P2) | + MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) | + TILE_SPLIT(split_equal_to_row_size)); + break; + case 5: + gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P2) | + MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); + break; + case 6: + gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P2) | + MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) | + TILE_SPLIT(split_equal_to_row_size)); + break; + case 7: + gb_tile_moden = (TILE_SPLIT(split_equal_to_row_size)); + break; + + case 8: + gb_tile_moden = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) | + PIPE_CONFIG(ADDR_SURF_P2)); + break; + case 9: + gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P2) | + MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING)); + break; + case 10: + gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P2) | + MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); + break; + case 11: + gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P2) | + MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8)); + break; + case 12: + gb_tile_moden = (TILE_SPLIT(split_equal_to_row_size)); + break; + case 13: + gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P2) | + MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING)); + break; + case 14: + gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P2) | + MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); + break; + case 15: + gb_tile_moden = (ARRAY_MODE(ARRAY_3D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P2) | + MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); + break; + case 16: + gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P2) | + MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8)); + break; + case 17: + gb_tile_moden = (TILE_SPLIT(split_equal_to_row_size)); + break; + case 18: + gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THICK) | + PIPE_CONFIG(ADDR_SURF_P2) | + MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); + break; + case 19: + gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THICK) | + PIPE_CONFIG(ADDR_SURF_P2) | + MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING)); + break; + case 20: + gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THICK) | + PIPE_CONFIG(ADDR_SURF_P2) | + MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); + break; + case 21: + gb_tile_moden = (ARRAY_MODE(ARRAY_3D_TILED_THICK) | + PIPE_CONFIG(ADDR_SURF_P2) | + MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); + break; + case 22: + gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) | + PIPE_CONFIG(ADDR_SURF_P2) | + MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); + break; + case 23: + gb_tile_moden = (TILE_SPLIT(split_equal_to_row_size)); + break; + case 24: + gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THICK) | + PIPE_CONFIG(ADDR_SURF_P2) | + MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); + break; + case 25: + gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_XTHICK) | + PIPE_CONFIG(ADDR_SURF_P2) | + MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); + break; + case 26: + gb_tile_moden = (ARRAY_MODE(ARRAY_3D_TILED_XTHICK) | + PIPE_CONFIG(ADDR_SURF_P2) | + MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); + break; + case 27: + gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P2) | + MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING)); + break; + case 28: + gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P2) | + MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); + break; + case 29: + gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P2) | + MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8)); + break; + case 30: + gb_tile_moden = (TILE_SPLIT(split_equal_to_row_size)); + break; + default: + gb_tile_moden = 0; + break; + } + adev->gfx.config.tile_mode_array[reg_offset] = gb_tile_moden; + WREG32(mmGB_TILE_MODE0 + reg_offset, gb_tile_moden); + } + for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++) { + switch (reg_offset) { + case 0: + gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | + NUM_BANKS(ADDR_SURF_8_BANK)); + break; + case 1: + gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | + NUM_BANKS(ADDR_SURF_8_BANK)); + break; + case 2: + gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | + NUM_BANKS(ADDR_SURF_8_BANK)); + break; + case 3: + gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | + NUM_BANKS(ADDR_SURF_8_BANK)); + break; + case 4: + gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | + NUM_BANKS(ADDR_SURF_8_BANK)); + break; + case 5: + gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | + NUM_BANKS(ADDR_SURF_8_BANK)); + break; + case 6: + gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | + NUM_BANKS(ADDR_SURF_8_BANK)); + break; + case 8: + gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_4) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_8) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | + NUM_BANKS(ADDR_SURF_16_BANK)); + break; + case 9: + gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_4) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | + NUM_BANKS(ADDR_SURF_16_BANK)); + break; + case 10: + gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | + NUM_BANKS(ADDR_SURF_16_BANK)); + break; + case 11: + gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | + NUM_BANKS(ADDR_SURF_16_BANK)); + break; + case 12: + gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | + NUM_BANKS(ADDR_SURF_16_BANK)); + break; + case 13: + gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | + NUM_BANKS(ADDR_SURF_16_BANK)); + break; + case 14: + gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | + NUM_BANKS(ADDR_SURF_8_BANK)); + break; + default: + gb_tile_moden = 0; + break; + } + adev->gfx.config.macrotile_mode_array[reg_offset] = gb_tile_moden; + WREG32(mmGB_MACROTILE_MODE0 + reg_offset, gb_tile_moden); + } + break; + } +} + +/** + * gfx_v7_0_select_se_sh - select which SE, SH to address + * + * @adev: amdgpu_device pointer + * @se_num: shader engine to address + * @sh_num: sh block to address + * + * Select which SE, SH combinations to address. Certain + * registers are instanced per SE or SH. 0xffffffff means + * broadcast to all SEs or SHs (CIK). + */ +void gfx_v7_0_select_se_sh(struct amdgpu_device *adev, u32 se_num, u32 sh_num) +{ + u32 data = GRBM_GFX_INDEX__INSTANCE_BROADCAST_WRITES_MASK; + + if ((se_num == 0xffffffff) && (sh_num == 0xffffffff)) + data |= GRBM_GFX_INDEX__SH_BROADCAST_WRITES_MASK | + GRBM_GFX_INDEX__SE_BROADCAST_WRITES_MASK; + else if (se_num == 0xffffffff) + data |= GRBM_GFX_INDEX__SE_BROADCAST_WRITES_MASK | + (sh_num << GRBM_GFX_INDEX__SH_INDEX__SHIFT); + else if (sh_num == 0xffffffff) + data |= GRBM_GFX_INDEX__SH_BROADCAST_WRITES_MASK | + (se_num << GRBM_GFX_INDEX__SE_INDEX__SHIFT); + else + data |= (sh_num << GRBM_GFX_INDEX__SH_INDEX__SHIFT) | + (se_num << GRBM_GFX_INDEX__SE_INDEX__SHIFT); + WREG32(mmGRBM_GFX_INDEX, data); +} + +/** + * gfx_v7_0_create_bitmask - create a bitmask + * + * @bit_width: length of the mask + * + * create a variable length bit mask (CIK). + * Returns the bitmask. + */ +static u32 gfx_v7_0_create_bitmask(u32 bit_width) +{ + u32 i, mask = 0; + + for (i = 0; i < bit_width; i++) { + mask <<= 1; + mask |= 1; + } + return mask; +} + +/** + * gfx_v7_0_get_rb_disabled - computes the mask of disabled RBs + * + * @adev: amdgpu_device pointer + * @max_rb_num: max RBs (render backends) for the asic + * @se_num: number of SEs (shader engines) for the asic + * @sh_per_se: number of SH blocks per SE for the asic + * + * Calculates the bitmask of disabled RBs (CIK). + * Returns the disabled RB bitmask. + */ +static u32 gfx_v7_0_get_rb_disabled(struct amdgpu_device *adev, + u32 max_rb_num_per_se, + u32 sh_per_se) +{ + u32 data, mask; + + data = RREG32(mmCC_RB_BACKEND_DISABLE); + if (data & 1) + data &= CC_RB_BACKEND_DISABLE__BACKEND_DISABLE_MASK; + else + data = 0; + + data |= RREG32(mmGC_USER_RB_BACKEND_DISABLE); + + data >>= GC_USER_RB_BACKEND_DISABLE__BACKEND_DISABLE__SHIFT; + + mask = gfx_v7_0_create_bitmask(max_rb_num_per_se / sh_per_se); + + return data & mask; +} + +/** + * gfx_v7_0_setup_rb - setup the RBs on the asic + * + * @adev: amdgpu_device pointer + * @se_num: number of SEs (shader engines) for the asic + * @sh_per_se: number of SH blocks per SE for the asic + * @max_rb_num: max RBs (render backends) for the asic + * + * Configures per-SE/SH RB registers (CIK). + */ +static void gfx_v7_0_setup_rb(struct amdgpu_device *adev, + u32 se_num, u32 sh_per_se, + u32 max_rb_num_per_se) +{ + int i, j; + u32 data, mask; + u32 disabled_rbs = 0; + u32 enabled_rbs = 0; + + mutex_lock(&adev->grbm_idx_mutex); + for (i = 0; i < se_num; i++) { + for (j = 0; j < sh_per_se; j++) { + gfx_v7_0_select_se_sh(adev, i, j); + data = gfx_v7_0_get_rb_disabled(adev, max_rb_num_per_se, sh_per_se); + if (adev->asic_type == CHIP_HAWAII) + disabled_rbs |= data << ((i * sh_per_se + j) * HAWAII_RB_BITMAP_WIDTH_PER_SH); + else + disabled_rbs |= data << ((i * sh_per_se + j) * CIK_RB_BITMAP_WIDTH_PER_SH); + } + } + gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff); + mutex_unlock(&adev->grbm_idx_mutex); + + mask = 1; + for (i = 0; i < max_rb_num_per_se * se_num; i++) { + if (!(disabled_rbs & mask)) + enabled_rbs |= mask; + mask <<= 1; + } + + adev->gfx.config.backend_enable_mask = enabled_rbs; + + mutex_lock(&adev->grbm_idx_mutex); + for (i = 0; i < se_num; i++) { + gfx_v7_0_select_se_sh(adev, i, 0xffffffff); + data = 0; + for (j = 0; j < sh_per_se; j++) { + switch (enabled_rbs & 3) { + case 0: + if (j == 0) + data |= (RASTER_CONFIG_RB_MAP_3 << + PA_SC_RASTER_CONFIG__PKR_MAP__SHIFT); + else + data |= (RASTER_CONFIG_RB_MAP_0 << + PA_SC_RASTER_CONFIG__PKR_MAP__SHIFT); + break; + case 1: + data |= (RASTER_CONFIG_RB_MAP_0 << (i * sh_per_se + j) * 2); + break; + case 2: + data |= (RASTER_CONFIG_RB_MAP_3 << (i * sh_per_se + j) * 2); + break; + case 3: + default: + data |= (RASTER_CONFIG_RB_MAP_2 << (i * sh_per_se + j) * 2); + break; + } + enabled_rbs >>= 2; + } + WREG32(mmPA_SC_RASTER_CONFIG, data); + } + gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff); + mutex_unlock(&adev->grbm_idx_mutex); +} + +/** + * gfx_v7_0_gpu_init - setup the 3D engine + * + * @adev: amdgpu_device pointer + * + * Configures the 3D engine and tiling configuration + * registers so that the 3D engine is usable. + */ +static void gfx_v7_0_gpu_init(struct amdgpu_device *adev) +{ + u32 gb_addr_config; + u32 mc_shared_chmap, mc_arb_ramcfg; + u32 dimm00_addr_map, dimm01_addr_map, dimm10_addr_map, dimm11_addr_map; + u32 tmp; + int i; + + switch (adev->asic_type) { + case CHIP_BONAIRE: + adev->gfx.config.max_shader_engines = 2; + adev->gfx.config.max_tile_pipes = 4; + adev->gfx.config.max_cu_per_sh = 7; + adev->gfx.config.max_sh_per_se = 1; + adev->gfx.config.max_backends_per_se = 2; + adev->gfx.config.max_texture_channel_caches = 4; + adev->gfx.config.max_gprs = 256; + adev->gfx.config.max_gs_threads = 32; + adev->gfx.config.max_hw_contexts = 8; + + adev->gfx.config.sc_prim_fifo_size_frontend = 0x20; + adev->gfx.config.sc_prim_fifo_size_backend = 0x100; + adev->gfx.config.sc_hiz_tile_fifo_size = 0x30; + adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130; + gb_addr_config = BONAIRE_GB_ADDR_CONFIG_GOLDEN; + break; + case CHIP_HAWAII: + adev->gfx.config.max_shader_engines = 4; + adev->gfx.config.max_tile_pipes = 16; + adev->gfx.config.max_cu_per_sh = 11; + adev->gfx.config.max_sh_per_se = 1; + adev->gfx.config.max_backends_per_se = 4; + adev->gfx.config.max_texture_channel_caches = 16; + adev->gfx.config.max_gprs = 256; + adev->gfx.config.max_gs_threads = 32; + adev->gfx.config.max_hw_contexts = 8; + + adev->gfx.config.sc_prim_fifo_size_frontend = 0x20; + adev->gfx.config.sc_prim_fifo_size_backend = 0x100; + adev->gfx.config.sc_hiz_tile_fifo_size = 0x30; + adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130; + gb_addr_config = HAWAII_GB_ADDR_CONFIG_GOLDEN; + break; + case CHIP_KAVERI: + adev->gfx.config.max_shader_engines = 1; + adev->gfx.config.max_tile_pipes = 4; + if ((adev->pdev->device == 0x1304) || + (adev->pdev->device == 0x1305) || + (adev->pdev->device == 0x130C) || + (adev->pdev->device == 0x130F) || + (adev->pdev->device == 0x1310) || + (adev->pdev->device == 0x1311) || + (adev->pdev->device == 0x131C)) { + adev->gfx.config.max_cu_per_sh = 8; + adev->gfx.config.max_backends_per_se = 2; + } else if ((adev->pdev->device == 0x1309) || + (adev->pdev->device == 0x130A) || + (adev->pdev->device == 0x130D) || + (adev->pdev->device == 0x1313) || + (adev->pdev->device == 0x131D)) { + adev->gfx.config.max_cu_per_sh = 6; + adev->gfx.config.max_backends_per_se = 2; + } else if ((adev->pdev->device == 0x1306) || + (adev->pdev->device == 0x1307) || + (adev->pdev->device == 0x130B) || + (adev->pdev->device == 0x130E) || + (adev->pdev->device == 0x1315) || + (adev->pdev->device == 0x131B)) { + adev->gfx.config.max_cu_per_sh = 4; + adev->gfx.config.max_backends_per_se = 1; + } else { + adev->gfx.config.max_cu_per_sh = 3; + adev->gfx.config.max_backends_per_se = 1; + } + adev->gfx.config.max_sh_per_se = 1; + adev->gfx.config.max_texture_channel_caches = 4; + adev->gfx.config.max_gprs = 256; + adev->gfx.config.max_gs_threads = 16; + adev->gfx.config.max_hw_contexts = 8; + + adev->gfx.config.sc_prim_fifo_size_frontend = 0x20; + adev->gfx.config.sc_prim_fifo_size_backend = 0x100; + adev->gfx.config.sc_hiz_tile_fifo_size = 0x30; + adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130; + gb_addr_config = BONAIRE_GB_ADDR_CONFIG_GOLDEN; + break; + case CHIP_KABINI: + case CHIP_MULLINS: + default: + adev->gfx.config.max_shader_engines = 1; + adev->gfx.config.max_tile_pipes = 2; + adev->gfx.config.max_cu_per_sh = 2; + adev->gfx.config.max_sh_per_se = 1; + adev->gfx.config.max_backends_per_se = 1; + adev->gfx.config.max_texture_channel_caches = 2; + adev->gfx.config.max_gprs = 256; + adev->gfx.config.max_gs_threads = 16; + adev->gfx.config.max_hw_contexts = 8; + + adev->gfx.config.sc_prim_fifo_size_frontend = 0x20; + adev->gfx.config.sc_prim_fifo_size_backend = 0x100; + adev->gfx.config.sc_hiz_tile_fifo_size = 0x30; + adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130; + gb_addr_config = BONAIRE_GB_ADDR_CONFIG_GOLDEN; + break; + } + + WREG32(mmGRBM_CNTL, (0xff << GRBM_CNTL__READ_TIMEOUT__SHIFT)); + + mc_shared_chmap = RREG32(mmMC_SHARED_CHMAP); + adev->gfx.config.mc_arb_ramcfg = RREG32(mmMC_ARB_RAMCFG); + mc_arb_ramcfg = adev->gfx.config.mc_arb_ramcfg; + + adev->gfx.config.num_tile_pipes = adev->gfx.config.max_tile_pipes; + adev->gfx.config.mem_max_burst_length_bytes = 256; + if (adev->flags & AMDGPU_IS_APU) { + /* Get memory bank mapping mode. */ + tmp = RREG32(mmMC_FUS_DRAM0_BANK_ADDR_MAPPING); + dimm00_addr_map = REG_GET_FIELD(tmp, MC_FUS_DRAM0_BANK_ADDR_MAPPING, DIMM0ADDRMAP); + dimm01_addr_map = REG_GET_FIELD(tmp, MC_FUS_DRAM0_BANK_ADDR_MAPPING, DIMM1ADDRMAP); + + tmp = RREG32(mmMC_FUS_DRAM1_BANK_ADDR_MAPPING); + dimm10_addr_map = REG_GET_FIELD(tmp, MC_FUS_DRAM1_BANK_ADDR_MAPPING, DIMM0ADDRMAP); + dimm11_addr_map = REG_GET_FIELD(tmp, MC_FUS_DRAM1_BANK_ADDR_MAPPING, DIMM1ADDRMAP); + + /* Validate settings in case only one DIMM installed. */ + if ((dimm00_addr_map == 0) || (dimm00_addr_map == 3) || (dimm00_addr_map == 4) || (dimm00_addr_map > 12)) + dimm00_addr_map = 0; + if ((dimm01_addr_map == 0) || (dimm01_addr_map == 3) || (dimm01_addr_map == 4) || (dimm01_addr_map > 12)) + dimm01_addr_map = 0; + if ((dimm10_addr_map == 0) || (dimm10_addr_map == 3) || (dimm10_addr_map == 4) || (dimm10_addr_map > 12)) + dimm10_addr_map = 0; + if ((dimm11_addr_map == 0) || (dimm11_addr_map == 3) || (dimm11_addr_map == 4) || (dimm11_addr_map > 12)) + dimm11_addr_map = 0; + + /* If DIMM Addr map is 8GB, ROW size should be 2KB. Otherwise 1KB. */ + /* If ROW size(DIMM1) != ROW size(DMIMM0), ROW size should be larger one. */ + if ((dimm00_addr_map == 11) || (dimm01_addr_map == 11) || (dimm10_addr_map == 11) || (dimm11_addr_map == 11)) + adev->gfx.config.mem_row_size_in_kb = 2; + else + adev->gfx.config.mem_row_size_in_kb = 1; + } else { + tmp = (mc_arb_ramcfg & MC_ARB_RAMCFG__NOOFCOLS_MASK) >> MC_ARB_RAMCFG__NOOFCOLS__SHIFT; + adev->gfx.config.mem_row_size_in_kb = (4 * (1 << (8 + tmp))) / 1024; + if (adev->gfx.config.mem_row_size_in_kb > 4) + adev->gfx.config.mem_row_size_in_kb = 4; + } + /* XXX use MC settings? */ + adev->gfx.config.shader_engine_tile_size = 32; + adev->gfx.config.num_gpus = 1; + adev->gfx.config.multi_gpu_tile_size = 64; + + /* fix up row size */ + gb_addr_config &= ~GB_ADDR_CONFIG__ROW_SIZE_MASK; + switch (adev->gfx.config.mem_row_size_in_kb) { + case 1: + default: + gb_addr_config |= (0 << GB_ADDR_CONFIG__ROW_SIZE__SHIFT); + break; + case 2: + gb_addr_config |= (1 << GB_ADDR_CONFIG__ROW_SIZE__SHIFT); + break; + case 4: + gb_addr_config |= (2 << GB_ADDR_CONFIG__ROW_SIZE__SHIFT); + break; + } + adev->gfx.config.gb_addr_config = gb_addr_config; + + WREG32(mmGB_ADDR_CONFIG, gb_addr_config); + WREG32(mmHDP_ADDR_CONFIG, gb_addr_config); + WREG32(mmDMIF_ADDR_CALC, gb_addr_config); + WREG32(mmSDMA0_TILING_CONFIG + SDMA0_REGISTER_OFFSET, gb_addr_config & 0x70); + WREG32(mmSDMA0_TILING_CONFIG + SDMA1_REGISTER_OFFSET, gb_addr_config & 0x70); + WREG32(mmUVD_UDEC_ADDR_CONFIG, gb_addr_config); + WREG32(mmUVD_UDEC_DB_ADDR_CONFIG, gb_addr_config); + WREG32(mmUVD_UDEC_DBW_ADDR_CONFIG, gb_addr_config); + + gfx_v7_0_tiling_mode_table_init(adev); + + gfx_v7_0_setup_rb(adev, adev->gfx.config.max_shader_engines, + adev->gfx.config.max_sh_per_se, + adev->gfx.config.max_backends_per_se); + + /* set HW defaults for 3D engine */ + WREG32(mmCP_MEQ_THRESHOLDS, + (0x30 << CP_MEQ_THRESHOLDS__MEQ1_START__SHIFT) | + (0x60 << CP_MEQ_THRESHOLDS__MEQ2_START__SHIFT)); + + mutex_lock(&adev->grbm_idx_mutex); + /* + * making sure that the following register writes will be broadcasted + * to all the shaders + */ + gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff); + + /* XXX SH_MEM regs */ + /* where to put LDS, scratch, GPUVM in FSA64 space */ + mutex_lock(&adev->srbm_mutex); + for (i = 0; i < 16; i++) { + cik_srbm_select(adev, 0, 0, 0, i); + /* CP and shaders */ + WREG32(mmSH_MEM_CONFIG, 0); + WREG32(mmSH_MEM_APE1_BASE, 1); + WREG32(mmSH_MEM_APE1_LIMIT, 0); + WREG32(mmSH_MEM_BASES, 0); + } + cik_srbm_select(adev, 0, 0, 0, 0); + mutex_unlock(&adev->srbm_mutex); + + WREG32(mmSX_DEBUG_1, 0x20); + + WREG32(mmTA_CNTL_AUX, 0x00010000); + + tmp = RREG32(mmSPI_CONFIG_CNTL); + tmp |= 0x03000000; + WREG32(mmSPI_CONFIG_CNTL, tmp); + + WREG32(mmSQ_CONFIG, 1); + + WREG32(mmDB_DEBUG, 0); + + tmp = RREG32(mmDB_DEBUG2) & ~0xf00fffff; + tmp |= 0x00000400; + WREG32(mmDB_DEBUG2, tmp); + + tmp = RREG32(mmDB_DEBUG3) & ~0x0002021c; + tmp |= 0x00020200; + WREG32(mmDB_DEBUG3, tmp); + + tmp = RREG32(mmCB_HW_CONTROL) & ~0x00010000; + tmp |= 0x00018208; + WREG32(mmCB_HW_CONTROL, tmp); + + WREG32(mmSPI_CONFIG_CNTL_1, (4 << SPI_CONFIG_CNTL_1__VTX_DONE_DELAY__SHIFT)); + + WREG32(mmPA_SC_FIFO_SIZE, + ((adev->gfx.config.sc_prim_fifo_size_frontend << PA_SC_FIFO_SIZE__SC_FRONTEND_PRIM_FIFO_SIZE__SHIFT) | + (adev->gfx.config.sc_prim_fifo_size_backend << PA_SC_FIFO_SIZE__SC_BACKEND_PRIM_FIFO_SIZE__SHIFT) | + (adev->gfx.config.sc_hiz_tile_fifo_size << PA_SC_FIFO_SIZE__SC_HIZ_TILE_FIFO_SIZE__SHIFT) | + (adev->gfx.config.sc_earlyz_tile_fifo_size << PA_SC_FIFO_SIZE__SC_EARLYZ_TILE_FIFO_SIZE__SHIFT))); + + WREG32(mmVGT_NUM_INSTANCES, 1); + + WREG32(mmCP_PERFMON_CNTL, 0); + + WREG32(mmSQ_CONFIG, 0); + + WREG32(mmPA_SC_FORCE_EOV_MAX_CNTS, + ((4095 << PA_SC_FORCE_EOV_MAX_CNTS__FORCE_EOV_MAX_CLK_CNT__SHIFT) | + (255 << PA_SC_FORCE_EOV_MAX_CNTS__FORCE_EOV_MAX_REZ_CNT__SHIFT))); + + WREG32(mmVGT_CACHE_INVALIDATION, + (VC_AND_TC << VGT_CACHE_INVALIDATION__CACHE_INVALIDATION__SHIFT) | + (ES_AND_GS_AUTO << VGT_CACHE_INVALIDATION__AUTO_INVLD_EN__SHIFT)); + + WREG32(mmVGT_GS_VERTEX_REUSE, 16); + WREG32(mmPA_SC_LINE_STIPPLE_STATE, 0); + + WREG32(mmPA_CL_ENHANCE, PA_CL_ENHANCE__CLIP_VTX_REORDER_ENA_MASK | + (3 << PA_CL_ENHANCE__NUM_CLIP_SEQ__SHIFT)); + WREG32(mmPA_SC_ENHANCE, PA_SC_ENHANCE__ENABLE_PA_SC_OUT_OF_ORDER_MASK); + mutex_unlock(&adev->grbm_idx_mutex); + + udelay(50); +} + +/* + * GPU scratch registers helpers function. + */ +/** + * gfx_v7_0_scratch_init - setup driver info for CP scratch regs + * + * @adev: amdgpu_device pointer + * + * Set up the number and offset of the CP scratch registers. + * NOTE: use of CP scratch registers is a legacy inferface and + * is not used by default on newer asics (r6xx+). On newer asics, + * memory buffers are used for fences rather than scratch regs. + */ +static void gfx_v7_0_scratch_init(struct amdgpu_device *adev) +{ + int i; + + adev->gfx.scratch.num_reg = 7; + adev->gfx.scratch.reg_base = mmSCRATCH_REG0; + for (i = 0; i < adev->gfx.scratch.num_reg; i++) { + adev->gfx.scratch.free[i] = true; + adev->gfx.scratch.reg[i] = adev->gfx.scratch.reg_base + i; + } +} + +/** + * gfx_v7_0_ring_test_ring - basic gfx ring test + * + * @adev: amdgpu_device pointer + * @ring: amdgpu_ring structure holding ring information + * + * Allocate a scratch register and write to it using the gfx ring (CIK). + * Provides a basic gfx ring test to verify that the ring is working. + * Used by gfx_v7_0_cp_gfx_resume(); + * Returns 0 on success, error on failure. + */ +static int gfx_v7_0_ring_test_ring(struct amdgpu_ring *ring) +{ + struct amdgpu_device *adev = ring->adev; + uint32_t scratch; + uint32_t tmp = 0; + unsigned i; + int r; + + r = amdgpu_gfx_scratch_get(adev, &scratch); + if (r) { + DRM_ERROR("amdgpu: cp failed to get scratch reg (%d).\n", r); + return r; + } + WREG32(scratch, 0xCAFEDEAD); + r = amdgpu_ring_lock(ring, 3); + if (r) { + DRM_ERROR("amdgpu: cp failed to lock ring %d (%d).\n", ring->idx, r); + amdgpu_gfx_scratch_free(adev, scratch); + return r; + } + amdgpu_ring_write(ring, PACKET3(PACKET3_SET_UCONFIG_REG, 1)); + amdgpu_ring_write(ring, (scratch - PACKET3_SET_UCONFIG_REG_START)); + amdgpu_ring_write(ring, 0xDEADBEEF); + amdgpu_ring_unlock_commit(ring); + + for (i = 0; i < adev->usec_timeout; i++) { + tmp = RREG32(scratch); + if (tmp == 0xDEADBEEF) + break; + DRM_UDELAY(1); + } + if (i < adev->usec_timeout) { + DRM_INFO("ring test on %d succeeded in %d usecs\n", ring->idx, i); + } else { + DRM_ERROR("amdgpu: ring %d test failed (scratch(0x%04X)=0x%08X)\n", + ring->idx, scratch, tmp); + r = -EINVAL; + } + amdgpu_gfx_scratch_free(adev, scratch); + return r; +} + +/** + * gfx_v7_0_hdp_flush_cp_ring_emit - emit an hdp flush on the cp + * + * @adev: amdgpu_device pointer + * @ridx: amdgpu ring index + * + * Emits an hdp flush on the cp. + */ +static void gfx_v7_0_hdp_flush_cp_ring_emit(struct amdgpu_ring *ring) +{ + u32 ref_and_mask; + + if (ring->type == AMDGPU_RING_TYPE_COMPUTE) { + switch (ring->me) { + case 1: + ref_and_mask = GPU_HDP_FLUSH_DONE__CP2_MASK << ring->pipe; + break; + case 2: + ref_and_mask = GPU_HDP_FLUSH_DONE__CP6_MASK << ring->pipe; + break; + default: + return; + } + } else { + ref_and_mask = GPU_HDP_FLUSH_DONE__CP0_MASK; + } + + amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5)); + amdgpu_ring_write(ring, (WAIT_REG_MEM_OPERATION(1) | /* write, wait, write */ + WAIT_REG_MEM_FUNCTION(3) | /* == */ + WAIT_REG_MEM_ENGINE(1))); /* pfp */ + amdgpu_ring_write(ring, mmGPU_HDP_FLUSH_REQ); + amdgpu_ring_write(ring, mmGPU_HDP_FLUSH_DONE); + amdgpu_ring_write(ring, ref_and_mask); + amdgpu_ring_write(ring, ref_and_mask); + amdgpu_ring_write(ring, 0x20); /* poll interval */ +} + +/** + * gfx_v7_0_ring_emit_fence_gfx - emit a fence on the gfx ring + * + * @adev: amdgpu_device pointer + * @fence: amdgpu fence object + * + * Emits a fence sequnce number on the gfx ring and flushes + * GPU caches. + */ +static void gfx_v7_0_ring_emit_fence_gfx(struct amdgpu_ring *ring, u64 addr, + u64 seq, bool write64bit) +{ + /* Workaround for cache flush problems. First send a dummy EOP + * event down the pipe with seq one below. + */ + amdgpu_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE_EOP, 4)); + amdgpu_ring_write(ring, (EOP_TCL1_ACTION_EN | + EOP_TC_ACTION_EN | + EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) | + EVENT_INDEX(5))); + amdgpu_ring_write(ring, addr & 0xfffffffc); + amdgpu_ring_write(ring, (upper_32_bits(addr) & 0xffff) | + DATA_SEL(1) | INT_SEL(0)); + amdgpu_ring_write(ring, lower_32_bits(seq - 1)); + amdgpu_ring_write(ring, upper_32_bits(seq - 1)); + + /* Then send the real EOP event down the pipe. */ + amdgpu_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE_EOP, 4)); + amdgpu_ring_write(ring, (EOP_TCL1_ACTION_EN | + EOP_TC_ACTION_EN | + EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) | + EVENT_INDEX(5))); + amdgpu_ring_write(ring, addr & 0xfffffffc); + amdgpu_ring_write(ring, (upper_32_bits(addr) & 0xffff) | + DATA_SEL(write64bit ? 2 : 1) | INT_SEL(2)); + amdgpu_ring_write(ring, lower_32_bits(seq)); + amdgpu_ring_write(ring, upper_32_bits(seq)); +} + +/** + * gfx_v7_0_ring_emit_fence_compute - emit a fence on the compute ring + * + * @adev: amdgpu_device pointer + * @fence: amdgpu fence object + * + * Emits a fence sequnce number on the compute ring and flushes + * GPU caches. + */ +static void gfx_v7_0_ring_emit_fence_compute(struct amdgpu_ring *ring, + u64 addr, u64 seq, + bool write64bits) +{ + /* RELEASE_MEM - flush caches, send int */ + amdgpu_ring_write(ring, PACKET3(PACKET3_RELEASE_MEM, 5)); + amdgpu_ring_write(ring, (EOP_TCL1_ACTION_EN | + EOP_TC_ACTION_EN | + EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) | + EVENT_INDEX(5))); + amdgpu_ring_write(ring, DATA_SEL(write64bits ? 2 : 1) | INT_SEL(2)); + amdgpu_ring_write(ring, addr & 0xfffffffc); + amdgpu_ring_write(ring, upper_32_bits(addr)); + amdgpu_ring_write(ring, lower_32_bits(seq)); + amdgpu_ring_write(ring, upper_32_bits(seq)); +} + +/** + * gfx_v7_0_ring_emit_semaphore - emit a semaphore on the CP ring + * + * @ring: amdgpu ring buffer object + * @semaphore: amdgpu semaphore object + * @emit_wait: Is this a sempahore wait? + * + * Emits a semaphore signal/wait packet to the CP ring and prevents the PFP + * from running ahead of semaphore waits. + */ +static bool gfx_v7_0_ring_emit_semaphore(struct amdgpu_ring *ring, + struct amdgpu_semaphore *semaphore, + bool emit_wait) +{ + uint64_t addr = semaphore->gpu_addr; + unsigned sel = emit_wait ? PACKET3_SEM_SEL_WAIT : PACKET3_SEM_SEL_SIGNAL; + + amdgpu_ring_write(ring, PACKET3(PACKET3_MEM_SEMAPHORE, 1)); + amdgpu_ring_write(ring, addr & 0xffffffff); + amdgpu_ring_write(ring, (upper_32_bits(addr) & 0xffff) | sel); + + if (emit_wait && (ring->type == AMDGPU_RING_TYPE_GFX)) { + /* Prevent the PFP from running ahead of the semaphore wait */ + amdgpu_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0)); + amdgpu_ring_write(ring, 0x0); + } + + return true; +} + +/* + * IB stuff + */ +/** + * gfx_v7_0_ring_emit_ib - emit an IB (Indirect Buffer) on the ring + * + * @ring: amdgpu_ring structure holding ring information + * @ib: amdgpu indirect buffer object + * + * Emits an DE (drawing engine) or CE (constant engine) IB + * on the gfx ring. IBs are usually generated by userspace + * acceleration drivers and submitted to the kernel for + * sheduling on the ring. This function schedules the IB + * on the gfx ring for execution by the GPU. + */ +static void gfx_v7_0_ring_emit_ib(struct amdgpu_ring *ring, + struct amdgpu_ib *ib) +{ + u32 header, control = 0; + u32 next_rptr = ring->wptr + 5; + if (ring->type == AMDGPU_RING_TYPE_COMPUTE) + control |= INDIRECT_BUFFER_VALID; + + if (ib->flush_hdp_writefifo) + next_rptr += 7; + + if (ring->need_ctx_switch && ring->type == AMDGPU_RING_TYPE_GFX) + next_rptr += 2; + + next_rptr += 4; + amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); + amdgpu_ring_write(ring, WRITE_DATA_DST_SEL(5) | WR_CONFIRM); + amdgpu_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc); + amdgpu_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xffffffff); + amdgpu_ring_write(ring, next_rptr); + + if (ib->flush_hdp_writefifo) + gfx_v7_0_hdp_flush_cp_ring_emit(ring); + + /* insert SWITCH_BUFFER packet before first IB in the ring frame */ + if (ring->need_ctx_switch && ring->type == AMDGPU_RING_TYPE_GFX) { + amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0)); + amdgpu_ring_write(ring, 0); + ring->need_ctx_switch = false; + } + + if (ib->is_const_ib) + header = PACKET3(PACKET3_INDIRECT_BUFFER_CONST, 2); + else + header = PACKET3(PACKET3_INDIRECT_BUFFER, 2); + + control |= ib->length_dw | + (ib->vm ? (ib->vm->ids[ring->idx].id << 24) : 0); + + amdgpu_ring_write(ring, header); + amdgpu_ring_write(ring, +#ifdef __BIG_ENDIAN + (2 << 0) | +#endif + (ib->gpu_addr & 0xFFFFFFFC)); + amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFFFF); + amdgpu_ring_write(ring, control); +} + +/** + * gfx_v7_0_ring_test_ib - basic ring IB test + * + * @ring: amdgpu_ring structure holding ring information + * + * Allocate an IB and execute it on the gfx ring (CIK). + * Provides a basic gfx ring test to verify that IBs are working. + * Returns 0 on success, error on failure. + */ +static int gfx_v7_0_ring_test_ib(struct amdgpu_ring *ring) +{ + struct amdgpu_device *adev = ring->adev; + struct amdgpu_ib ib; + uint32_t scratch; + uint32_t tmp = 0; + unsigned i; + int r; + + r = amdgpu_gfx_scratch_get(adev, &scratch); + if (r) { + DRM_ERROR("amdgpu: failed to get scratch reg (%d).\n", r); + return r; + } + WREG32(scratch, 0xCAFEDEAD); + r = amdgpu_ib_get(ring, NULL, 256, &ib); + if (r) { + DRM_ERROR("amdgpu: failed to get ib (%d).\n", r); + amdgpu_gfx_scratch_free(adev, scratch); + return r; + } + ib.ptr[0] = PACKET3(PACKET3_SET_UCONFIG_REG, 1); + ib.ptr[1] = ((scratch - PACKET3_SET_UCONFIG_REG_START)); + ib.ptr[2] = 0xDEADBEEF; + ib.length_dw = 3; + r = amdgpu_ib_schedule(adev, 1, &ib, AMDGPU_FENCE_OWNER_UNDEFINED); + if (r) { + amdgpu_gfx_scratch_free(adev, scratch); + amdgpu_ib_free(adev, &ib); + DRM_ERROR("amdgpu: failed to schedule ib (%d).\n", r); + return r; + } + r = amdgpu_fence_wait(ib.fence, false); + if (r) { + DRM_ERROR("amdgpu: fence wait failed (%d).\n", r); + amdgpu_gfx_scratch_free(adev, scratch); + amdgpu_ib_free(adev, &ib); + return r; + } + for (i = 0; i < adev->usec_timeout; i++) { + tmp = RREG32(scratch); + if (tmp == 0xDEADBEEF) + break; + DRM_UDELAY(1); + } + if (i < adev->usec_timeout) { + DRM_INFO("ib test on ring %d succeeded in %u usecs\n", + ib.fence->ring->idx, i); + } else { + DRM_ERROR("amdgpu: ib test failed (scratch(0x%04X)=0x%08X)\n", + scratch, tmp); + r = -EINVAL; + } + amdgpu_gfx_scratch_free(adev, scratch); + amdgpu_ib_free(adev, &ib); + return r; +} + +/* + * CP. + * On CIK, gfx and compute now have independant command processors. + * + * GFX + * Gfx consists of a single ring and can process both gfx jobs and + * compute jobs. The gfx CP consists of three microengines (ME): + * PFP - Pre-Fetch Parser + * ME - Micro Engine + * CE - Constant Engine + * The PFP and ME make up what is considered the Drawing Engine (DE). + * The CE is an asynchronous engine used for updating buffer desciptors + * used by the DE so that they can be loaded into cache in parallel + * while the DE is processing state update packets. + * + * Compute + * The compute CP consists of two microengines (ME): + * MEC1 - Compute MicroEngine 1 + * MEC2 - Compute MicroEngine 2 + * Each MEC supports 4 compute pipes and each pipe supports 8 queues. + * The queues are exposed to userspace and are programmed directly + * by the compute runtime. + */ +/** + * gfx_v7_0_cp_gfx_enable - enable/disable the gfx CP MEs + * + * @adev: amdgpu_device pointer + * @enable: enable or disable the MEs + * + * Halts or unhalts the gfx MEs. + */ +static void gfx_v7_0_cp_gfx_enable(struct amdgpu_device *adev, bool enable) +{ + int i; + + if (enable) { + WREG32(mmCP_ME_CNTL, 0); + } else { + WREG32(mmCP_ME_CNTL, (CP_ME_CNTL__ME_HALT_MASK | CP_ME_CNTL__PFP_HALT_MASK | CP_ME_CNTL__CE_HALT_MASK)); + for (i = 0; i < adev->gfx.num_gfx_rings; i++) + adev->gfx.gfx_ring[i].ready = false; + } + udelay(50); +} + +/** + * gfx_v7_0_cp_gfx_load_microcode - load the gfx CP ME ucode + * + * @adev: amdgpu_device pointer + * + * Loads the gfx PFP, ME, and CE ucode. + * Returns 0 for success, -EINVAL if the ucode is not available. + */ +static int gfx_v7_0_cp_gfx_load_microcode(struct amdgpu_device *adev) +{ + const struct gfx_firmware_header_v1_0 *pfp_hdr; + const struct gfx_firmware_header_v1_0 *ce_hdr; + const struct gfx_firmware_header_v1_0 *me_hdr; + const __le32 *fw_data; + unsigned i, fw_size; + + if (!adev->gfx.me_fw || !adev->gfx.pfp_fw || !adev->gfx.ce_fw) + return -EINVAL; + + pfp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.pfp_fw->data; + ce_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.ce_fw->data; + me_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.me_fw->data; + + amdgpu_ucode_print_gfx_hdr(&pfp_hdr->header); + amdgpu_ucode_print_gfx_hdr(&ce_hdr->header); + amdgpu_ucode_print_gfx_hdr(&me_hdr->header); + adev->gfx.pfp_fw_version = le32_to_cpu(pfp_hdr->header.ucode_version); + adev->gfx.ce_fw_version = le32_to_cpu(ce_hdr->header.ucode_version); + adev->gfx.me_fw_version = le32_to_cpu(me_hdr->header.ucode_version); + + gfx_v7_0_cp_gfx_enable(adev, false); + + /* PFP */ + fw_data = (const __le32 *) + (adev->gfx.pfp_fw->data + + le32_to_cpu(pfp_hdr->header.ucode_array_offset_bytes)); + fw_size = le32_to_cpu(pfp_hdr->header.ucode_size_bytes) / 4; + WREG32(mmCP_PFP_UCODE_ADDR, 0); + for (i = 0; i < fw_size; i++) + WREG32(mmCP_PFP_UCODE_DATA, le32_to_cpup(fw_data++)); + WREG32(mmCP_PFP_UCODE_ADDR, adev->gfx.pfp_fw_version); + + /* CE */ + fw_data = (const __le32 *) + (adev->gfx.ce_fw->data + + le32_to_cpu(ce_hdr->header.ucode_array_offset_bytes)); + fw_size = le32_to_cpu(ce_hdr->header.ucode_size_bytes) / 4; + WREG32(mmCP_CE_UCODE_ADDR, 0); + for (i = 0; i < fw_size; i++) + WREG32(mmCP_CE_UCODE_DATA, le32_to_cpup(fw_data++)); + WREG32(mmCP_CE_UCODE_ADDR, adev->gfx.ce_fw_version); + + /* ME */ + fw_data = (const __le32 *) + (adev->gfx.me_fw->data + + le32_to_cpu(me_hdr->header.ucode_array_offset_bytes)); + fw_size = le32_to_cpu(me_hdr->header.ucode_size_bytes) / 4; + WREG32(mmCP_ME_RAM_WADDR, 0); + for (i = 0; i < fw_size; i++) + WREG32(mmCP_ME_RAM_DATA, le32_to_cpup(fw_data++)); + WREG32(mmCP_ME_RAM_WADDR, adev->gfx.me_fw_version); + + return 0; +} + +/** + * gfx_v7_0_cp_gfx_start - start the gfx ring + * + * @adev: amdgpu_device pointer + * + * Enables the ring and loads the clear state context and other + * packets required to init the ring. + * Returns 0 for success, error for failure. + */ +static int gfx_v7_0_cp_gfx_start(struct amdgpu_device *adev) +{ + struct amdgpu_ring *ring = &adev->gfx.gfx_ring[0]; + const struct cs_section_def *sect = NULL; + const struct cs_extent_def *ext = NULL; + int r, i; + + /* init the CP */ + WREG32(mmCP_MAX_CONTEXT, adev->gfx.config.max_hw_contexts - 1); + WREG32(mmCP_ENDIAN_SWAP, 0); + WREG32(mmCP_DEVICE_ID, 1); + + gfx_v7_0_cp_gfx_enable(adev, true); + + r = amdgpu_ring_lock(ring, gfx_v7_0_get_csb_size(adev) + 8); + if (r) { + DRM_ERROR("amdgpu: cp failed to lock ring (%d).\n", r); + return r; + } + + /* init the CE partitions. CE only used for gfx on CIK */ + amdgpu_ring_write(ring, PACKET3(PACKET3_SET_BASE, 2)); + amdgpu_ring_write(ring, PACKET3_BASE_INDEX(CE_PARTITION_BASE)); + amdgpu_ring_write(ring, 0x8000); + amdgpu_ring_write(ring, 0x8000); + + /* clear state buffer */ + amdgpu_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0)); + amdgpu_ring_write(ring, PACKET3_PREAMBLE_BEGIN_CLEAR_STATE); + + amdgpu_ring_write(ring, PACKET3(PACKET3_CONTEXT_CONTROL, 1)); + amdgpu_ring_write(ring, 0x80000000); + amdgpu_ring_write(ring, 0x80000000); + + for (sect = adev->gfx.rlc.cs_data; sect->section != NULL; ++sect) { + for (ext = sect->section; ext->extent != NULL; ++ext) { + if (sect->id == SECT_CONTEXT) { + amdgpu_ring_write(ring, + PACKET3(PACKET3_SET_CONTEXT_REG, ext->reg_count)); + amdgpu_ring_write(ring, ext->reg_index - PACKET3_SET_CONTEXT_REG_START); + for (i = 0; i < ext->reg_count; i++) + amdgpu_ring_write(ring, ext->extent[i]); + } + } + } + + amdgpu_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 2)); + amdgpu_ring_write(ring, mmPA_SC_RASTER_CONFIG - PACKET3_SET_CONTEXT_REG_START); + switch (adev->asic_type) { + case CHIP_BONAIRE: + amdgpu_ring_write(ring, 0x16000012); + amdgpu_ring_write(ring, 0x00000000); + break; + case CHIP_KAVERI: + amdgpu_ring_write(ring, 0x00000000); /* XXX */ + amdgpu_ring_write(ring, 0x00000000); + break; + case CHIP_KABINI: + case CHIP_MULLINS: + amdgpu_ring_write(ring, 0x00000000); /* XXX */ + amdgpu_ring_write(ring, 0x00000000); + break; + case CHIP_HAWAII: + amdgpu_ring_write(ring, 0x3a00161a); + amdgpu_ring_write(ring, 0x0000002e); + break; + default: + amdgpu_ring_write(ring, 0x00000000); + amdgpu_ring_write(ring, 0x00000000); + break; + } + + amdgpu_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0)); + amdgpu_ring_write(ring, PACKET3_PREAMBLE_END_CLEAR_STATE); + + amdgpu_ring_write(ring, PACKET3(PACKET3_CLEAR_STATE, 0)); + amdgpu_ring_write(ring, 0); + + amdgpu_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 2)); + amdgpu_ring_write(ring, 0x00000316); + amdgpu_ring_write(ring, 0x0000000e); /* VGT_VERTEX_REUSE_BLOCK_CNTL */ + amdgpu_ring_write(ring, 0x00000010); /* VGT_OUT_DEALLOC_CNTL */ + + amdgpu_ring_unlock_commit(ring); + + return 0; +} + +/** + * gfx_v7_0_cp_gfx_resume - setup the gfx ring buffer registers + * + * @adev: amdgpu_device pointer + * + * Program the location and size of the gfx ring buffer + * and test it to make sure it's working. + * Returns 0 for success, error for failure. + */ +static int gfx_v7_0_cp_gfx_resume(struct amdgpu_device *adev) +{ + struct amdgpu_ring *ring; + u32 tmp; + u32 rb_bufsz; + u64 rb_addr, rptr_addr; + int r; + + WREG32(mmCP_SEM_WAIT_TIMER, 0x0); + if (adev->asic_type != CHIP_HAWAII) + WREG32(mmCP_SEM_INCOMPLETE_TIMER_CNTL, 0x0); + + /* Set the write pointer delay */ + WREG32(mmCP_RB_WPTR_DELAY, 0); + + /* set the RB to use vmid 0 */ + WREG32(mmCP_RB_VMID, 0); + + WREG32(mmSCRATCH_ADDR, 0); + + /* ring 0 - compute and gfx */ + /* Set ring buffer size */ + ring = &adev->gfx.gfx_ring[0]; + rb_bufsz = order_base_2(ring->ring_size / 8); + tmp = (order_base_2(AMDGPU_GPU_PAGE_SIZE/8) << 8) | rb_bufsz; +#ifdef __BIG_ENDIAN + tmp |= BUF_SWAP_32BIT; +#endif + WREG32(mmCP_RB0_CNTL, tmp); + + /* Initialize the ring buffer's read and write pointers */ + WREG32(mmCP_RB0_CNTL, tmp | CP_RB0_CNTL__RB_RPTR_WR_ENA_MASK); + ring->wptr = 0; + WREG32(mmCP_RB0_WPTR, ring->wptr); + + /* set the wb address wether it's enabled or not */ + rptr_addr = adev->wb.gpu_addr + (ring->rptr_offs * 4); + WREG32(mmCP_RB0_RPTR_ADDR, lower_32_bits(rptr_addr)); + WREG32(mmCP_RB0_RPTR_ADDR_HI, upper_32_bits(rptr_addr) & 0xFF); + + /* scratch register shadowing is no longer supported */ + WREG32(mmSCRATCH_UMSK, 0); + + mdelay(1); + WREG32(mmCP_RB0_CNTL, tmp); + + rb_addr = ring->gpu_addr >> 8; + WREG32(mmCP_RB0_BASE, rb_addr); + WREG32(mmCP_RB0_BASE_HI, upper_32_bits(rb_addr)); + + /* start the ring */ + gfx_v7_0_cp_gfx_start(adev); + ring->ready = true; + r = amdgpu_ring_test_ring(ring); + if (r) { + ring->ready = false; + return r; + } + + return 0; +} + +static u32 gfx_v7_0_ring_get_rptr_gfx(struct amdgpu_ring *ring) +{ + u32 rptr; + + rptr = ring->adev->wb.wb[ring->rptr_offs]; + + return rptr; +} + +static u32 gfx_v7_0_ring_get_wptr_gfx(struct amdgpu_ring *ring) +{ + struct amdgpu_device *adev = ring->adev; + u32 wptr; + + wptr = RREG32(mmCP_RB0_WPTR); + + return wptr; +} + +static void gfx_v7_0_ring_set_wptr_gfx(struct amdgpu_ring *ring) +{ + struct amdgpu_device *adev = ring->adev; + + WREG32(mmCP_RB0_WPTR, ring->wptr); + (void)RREG32(mmCP_RB0_WPTR); +} + +static u32 gfx_v7_0_ring_get_rptr_compute(struct amdgpu_ring *ring) +{ + u32 rptr; + + rptr = ring->adev->wb.wb[ring->rptr_offs]; + + return rptr; +} + +static u32 gfx_v7_0_ring_get_wptr_compute(struct amdgpu_ring *ring) +{ + u32 wptr; + + /* XXX check if swapping is necessary on BE */ + wptr = ring->adev->wb.wb[ring->wptr_offs]; + + return wptr; +} + +static void gfx_v7_0_ring_set_wptr_compute(struct amdgpu_ring *ring) +{ + struct amdgpu_device *adev = ring->adev; + + /* XXX check if swapping is necessary on BE */ + adev->wb.wb[ring->wptr_offs] = ring->wptr; + WDOORBELL32(ring->doorbell_index, ring->wptr); +} + +/** + * gfx_v7_0_cp_compute_enable - enable/disable the compute CP MEs + * + * @adev: amdgpu_device pointer + * @enable: enable or disable the MEs + * + * Halts or unhalts the compute MEs. + */ +static void gfx_v7_0_cp_compute_enable(struct amdgpu_device *adev, bool enable) +{ + int i; + + if (enable) { + WREG32(mmCP_MEC_CNTL, 0); + } else { + WREG32(mmCP_MEC_CNTL, (CP_MEC_CNTL__MEC_ME1_HALT_MASK | CP_MEC_CNTL__MEC_ME2_HALT_MASK)); + for (i = 0; i < adev->gfx.num_compute_rings; i++) + adev->gfx.compute_ring[i].ready = false; + } + udelay(50); +} + +/** + * gfx_v7_0_cp_compute_load_microcode - load the compute CP ME ucode + * + * @adev: amdgpu_device pointer + * + * Loads the compute MEC1&2 ucode. + * Returns 0 for success, -EINVAL if the ucode is not available. + */ +static int gfx_v7_0_cp_compute_load_microcode(struct amdgpu_device *adev) +{ + const struct gfx_firmware_header_v1_0 *mec_hdr; + const __le32 *fw_data; + unsigned i, fw_size; + + if (!adev->gfx.mec_fw) + return -EINVAL; + + mec_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data; + amdgpu_ucode_print_gfx_hdr(&mec_hdr->header); + adev->gfx.mec_fw_version = le32_to_cpu(mec_hdr->header.ucode_version); + + gfx_v7_0_cp_compute_enable(adev, false); + + /* MEC1 */ + fw_data = (const __le32 *) + (adev->gfx.mec_fw->data + + le32_to_cpu(mec_hdr->header.ucode_array_offset_bytes)); + fw_size = le32_to_cpu(mec_hdr->header.ucode_size_bytes) / 4; + WREG32(mmCP_MEC_ME1_UCODE_ADDR, 0); + for (i = 0; i < fw_size; i++) + WREG32(mmCP_MEC_ME1_UCODE_DATA, le32_to_cpup(fw_data++)); + WREG32(mmCP_MEC_ME1_UCODE_ADDR, 0); + + if (adev->asic_type == CHIP_KAVERI) { + const struct gfx_firmware_header_v1_0 *mec2_hdr; + + if (!adev->gfx.mec2_fw) + return -EINVAL; + + mec2_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec2_fw->data; + amdgpu_ucode_print_gfx_hdr(&mec2_hdr->header); + adev->gfx.mec2_fw_version = le32_to_cpu(mec2_hdr->header.ucode_version); + + /* MEC2 */ + fw_data = (const __le32 *) + (adev->gfx.mec2_fw->data + + le32_to_cpu(mec2_hdr->header.ucode_array_offset_bytes)); + fw_size = le32_to_cpu(mec2_hdr->header.ucode_size_bytes) / 4; + WREG32(mmCP_MEC_ME2_UCODE_ADDR, 0); + for (i = 0; i < fw_size; i++) + WREG32(mmCP_MEC_ME2_UCODE_DATA, le32_to_cpup(fw_data++)); + WREG32(mmCP_MEC_ME2_UCODE_ADDR, 0); + } + + return 0; +} + +/** + * gfx_v7_0_cp_compute_start - start the compute queues + * + * @adev: amdgpu_device pointer + * + * Enable the compute queues. + * Returns 0 for success, error for failure. + */ +static int gfx_v7_0_cp_compute_start(struct amdgpu_device *adev) +{ + gfx_v7_0_cp_compute_enable(adev, true); + + return 0; +} + +/** + * gfx_v7_0_cp_compute_fini - stop the compute queues + * + * @adev: amdgpu_device pointer + * + * Stop the compute queues and tear down the driver queue + * info. + */ +static void gfx_v7_0_cp_compute_fini(struct amdgpu_device *adev) +{ + int i, r; + + for (i = 0; i < adev->gfx.num_compute_rings; i++) { + struct amdgpu_ring *ring = &adev->gfx.compute_ring[i]; + + if (ring->mqd_obj) { + r = amdgpu_bo_reserve(ring->mqd_obj, false); + if (unlikely(r != 0)) + dev_warn(adev->dev, "(%d) reserve MQD bo failed\n", r); + + amdgpu_bo_unpin(ring->mqd_obj); + amdgpu_bo_unreserve(ring->mqd_obj); + + amdgpu_bo_unref(&ring->mqd_obj); + ring->mqd_obj = NULL; + } + } +} + +static void gfx_v7_0_mec_fini(struct amdgpu_device *adev) +{ + int r; + + if (adev->gfx.mec.hpd_eop_obj) { + r = amdgpu_bo_reserve(adev->gfx.mec.hpd_eop_obj, false); + if (unlikely(r != 0)) + dev_warn(adev->dev, "(%d) reserve HPD EOP bo failed\n", r); + amdgpu_bo_unpin(adev->gfx.mec.hpd_eop_obj); + amdgpu_bo_unreserve(adev->gfx.mec.hpd_eop_obj); + + amdgpu_bo_unref(&adev->gfx.mec.hpd_eop_obj); + adev->gfx.mec.hpd_eop_obj = NULL; + } +} + +#define MEC_HPD_SIZE 2048 + +static int gfx_v7_0_mec_init(struct amdgpu_device *adev) +{ + int r; + u32 *hpd; + + /* + * KV: 2 MEC, 4 Pipes/MEC, 8 Queues/Pipe - 64 Queues total + * CI/KB: 1 MEC, 4 Pipes/MEC, 8 Queues/Pipe - 32 Queues total + * Nonetheless, we assign only 1 pipe because all other pipes will + * be handled by KFD + */ + adev->gfx.mec.num_mec = 1; + adev->gfx.mec.num_pipe = 1; + adev->gfx.mec.num_queue = adev->gfx.mec.num_mec * adev->gfx.mec.num_pipe * 8; + + if (adev->gfx.mec.hpd_eop_obj == NULL) { + r = amdgpu_bo_create(adev, + adev->gfx.mec.num_mec *adev->gfx.mec.num_pipe * MEC_HPD_SIZE * 2, + PAGE_SIZE, true, + AMDGPU_GEM_DOMAIN_GTT, 0, NULL, + &adev->gfx.mec.hpd_eop_obj); + if (r) { + dev_warn(adev->dev, "(%d) create HDP EOP bo failed\n", r); + return r; + } + } + + r = amdgpu_bo_reserve(adev->gfx.mec.hpd_eop_obj, false); + if (unlikely(r != 0)) { + gfx_v7_0_mec_fini(adev); + return r; + } + r = amdgpu_bo_pin(adev->gfx.mec.hpd_eop_obj, AMDGPU_GEM_DOMAIN_GTT, + &adev->gfx.mec.hpd_eop_gpu_addr); + if (r) { + dev_warn(adev->dev, "(%d) pin HDP EOP bo failed\n", r); + gfx_v7_0_mec_fini(adev); + return r; + } + r = amdgpu_bo_kmap(adev->gfx.mec.hpd_eop_obj, (void **)&hpd); + if (r) { + dev_warn(adev->dev, "(%d) map HDP EOP bo failed\n", r); + gfx_v7_0_mec_fini(adev); + return r; + } + + /* clear memory. Not sure if this is required or not */ + memset(hpd, 0, adev->gfx.mec.num_mec *adev->gfx.mec.num_pipe * MEC_HPD_SIZE * 2); + + amdgpu_bo_kunmap(adev->gfx.mec.hpd_eop_obj); + amdgpu_bo_unreserve(adev->gfx.mec.hpd_eop_obj); + + return 0; +} + +struct hqd_registers +{ + u32 cp_mqd_base_addr; + u32 cp_mqd_base_addr_hi; + u32 cp_hqd_active; + u32 cp_hqd_vmid; + u32 cp_hqd_persistent_state; + u32 cp_hqd_pipe_priority; + u32 cp_hqd_queue_priority; + u32 cp_hqd_quantum; + u32 cp_hqd_pq_base; + u32 cp_hqd_pq_base_hi; + u32 cp_hqd_pq_rptr; + u32 cp_hqd_pq_rptr_report_addr; + u32 cp_hqd_pq_rptr_report_addr_hi; + u32 cp_hqd_pq_wptr_poll_addr; + u32 cp_hqd_pq_wptr_poll_addr_hi; + u32 cp_hqd_pq_doorbell_control; + u32 cp_hqd_pq_wptr; + u32 cp_hqd_pq_control; + u32 cp_hqd_ib_base_addr; + u32 cp_hqd_ib_base_addr_hi; + u32 cp_hqd_ib_rptr; + u32 cp_hqd_ib_control; + u32 cp_hqd_iq_timer; + u32 cp_hqd_iq_rptr; + u32 cp_hqd_dequeue_request; + u32 cp_hqd_dma_offload; + u32 cp_hqd_sema_cmd; + u32 cp_hqd_msg_type; + u32 cp_hqd_atomic0_preop_lo; + u32 cp_hqd_atomic0_preop_hi; + u32 cp_hqd_atomic1_preop_lo; + u32 cp_hqd_atomic1_preop_hi; + u32 cp_hqd_hq_scheduler0; + u32 cp_hqd_hq_scheduler1; + u32 cp_mqd_control; +}; + +struct bonaire_mqd +{ + u32 header; + u32 dispatch_initiator; + u32 dimensions[3]; + u32 start_idx[3]; + u32 num_threads[3]; + u32 pipeline_stat_enable; + u32 perf_counter_enable; + u32 pgm[2]; + u32 tba[2]; + u32 tma[2]; + u32 pgm_rsrc[2]; + u32 vmid; + u32 resource_limits; + u32 static_thread_mgmt01[2]; + u32 tmp_ring_size; + u32 static_thread_mgmt23[2]; + u32 restart[3]; + u32 thread_trace_enable; + u32 reserved1; + u32 user_data[16]; + u32 vgtcs_invoke_count[2]; + struct hqd_registers queue_state; + u32 dequeue_cntr; + u32 interrupt_queue[64]; +}; + +/** + * gfx_v7_0_cp_compute_resume - setup the compute queue registers + * + * @adev: amdgpu_device pointer + * + * Program the compute queues and test them to make sure they + * are working. + * Returns 0 for success, error for failure. + */ +static int gfx_v7_0_cp_compute_resume(struct amdgpu_device *adev) +{ + int r, i, j; + u32 tmp; + bool use_doorbell = true; + u64 hqd_gpu_addr; + u64 mqd_gpu_addr; + u64 eop_gpu_addr; + u64 wb_gpu_addr; + u32 *buf; + struct bonaire_mqd *mqd; + + r = gfx_v7_0_cp_compute_start(adev); + if (r) + return r; + + /* fix up chicken bits */ + tmp = RREG32(mmCP_CPF_DEBUG); + tmp |= (1 << 23); + WREG32(mmCP_CPF_DEBUG, tmp); + + /* init the pipes */ + mutex_lock(&adev->srbm_mutex); + for (i = 0; i < (adev->gfx.mec.num_pipe * adev->gfx.mec.num_mec); i++) { + int me = (i < 4) ? 1 : 2; + int pipe = (i < 4) ? i : (i - 4); + + eop_gpu_addr = adev->gfx.mec.hpd_eop_gpu_addr + (i * MEC_HPD_SIZE * 2); + + cik_srbm_select(adev, me, pipe, 0, 0); + + /* write the EOP addr */ + WREG32(mmCP_HPD_EOP_BASE_ADDR, eop_gpu_addr >> 8); + WREG32(mmCP_HPD_EOP_BASE_ADDR_HI, upper_32_bits(eop_gpu_addr) >> 8); + + /* set the VMID assigned */ + WREG32(mmCP_HPD_EOP_VMID, 0); + + /* set the EOP size, register value is 2^(EOP_SIZE+1) dwords */ + tmp = RREG32(mmCP_HPD_EOP_CONTROL); + tmp &= ~CP_HPD_EOP_CONTROL__EOP_SIZE_MASK; + tmp |= order_base_2(MEC_HPD_SIZE / 8); + WREG32(mmCP_HPD_EOP_CONTROL, tmp); + } + cik_srbm_select(adev, 0, 0, 0, 0); + mutex_unlock(&adev->srbm_mutex); + + /* init the queues. Just two for now. */ + for (i = 0; i < adev->gfx.num_compute_rings; i++) { + struct amdgpu_ring *ring = &adev->gfx.compute_ring[i]; + + if (ring->mqd_obj == NULL) { + r = amdgpu_bo_create(adev, + sizeof(struct bonaire_mqd), + PAGE_SIZE, true, + AMDGPU_GEM_DOMAIN_GTT, 0, NULL, + &ring->mqd_obj); + if (r) { + dev_warn(adev->dev, "(%d) create MQD bo failed\n", r); + return r; + } + } + + r = amdgpu_bo_reserve(ring->mqd_obj, false); + if (unlikely(r != 0)) { + gfx_v7_0_cp_compute_fini(adev); + return r; + } + r = amdgpu_bo_pin(ring->mqd_obj, AMDGPU_GEM_DOMAIN_GTT, + &mqd_gpu_addr); + if (r) { + dev_warn(adev->dev, "(%d) pin MQD bo failed\n", r); + gfx_v7_0_cp_compute_fini(adev); + return r; + } + r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&buf); + if (r) { + dev_warn(adev->dev, "(%d) map MQD bo failed\n", r); + gfx_v7_0_cp_compute_fini(adev); + return r; + } + + /* init the mqd struct */ + memset(buf, 0, sizeof(struct bonaire_mqd)); + + mqd = (struct bonaire_mqd *)buf; + mqd->header = 0xC0310800; + mqd->static_thread_mgmt01[0] = 0xffffffff; + mqd->static_thread_mgmt01[1] = 0xffffffff; + mqd->static_thread_mgmt23[0] = 0xffffffff; + mqd->static_thread_mgmt23[1] = 0xffffffff; + + mutex_lock(&adev->srbm_mutex); + cik_srbm_select(adev, ring->me, + ring->pipe, + ring->queue, 0); + + /* disable wptr polling */ + tmp = RREG32(mmCP_PQ_WPTR_POLL_CNTL); + tmp &= ~CP_PQ_WPTR_POLL_CNTL__EN_MASK; + WREG32(mmCP_PQ_WPTR_POLL_CNTL, tmp); + + /* enable doorbell? */ + mqd->queue_state.cp_hqd_pq_doorbell_control = + RREG32(mmCP_HQD_PQ_DOORBELL_CONTROL); + if (use_doorbell) + mqd->queue_state.cp_hqd_pq_doorbell_control |= CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_EN_MASK; + else + mqd->queue_state.cp_hqd_pq_doorbell_control &= ~CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_EN_MASK; + WREG32(mmCP_HQD_PQ_DOORBELL_CONTROL, + mqd->queue_state.cp_hqd_pq_doorbell_control); + + /* disable the queue if it's active */ + mqd->queue_state.cp_hqd_dequeue_request = 0; + mqd->queue_state.cp_hqd_pq_rptr = 0; + mqd->queue_state.cp_hqd_pq_wptr= 0; + if (RREG32(mmCP_HQD_ACTIVE) & 1) { + WREG32(mmCP_HQD_DEQUEUE_REQUEST, 1); + for (j = 0; j < adev->usec_timeout; j++) { + if (!(RREG32(mmCP_HQD_ACTIVE) & 1)) + break; + udelay(1); + } + WREG32(mmCP_HQD_DEQUEUE_REQUEST, mqd->queue_state.cp_hqd_dequeue_request); + WREG32(mmCP_HQD_PQ_RPTR, mqd->queue_state.cp_hqd_pq_rptr); + WREG32(mmCP_HQD_PQ_WPTR, mqd->queue_state.cp_hqd_pq_wptr); + } + + /* set the pointer to the MQD */ + mqd->queue_state.cp_mqd_base_addr = mqd_gpu_addr & 0xfffffffc; + mqd->queue_state.cp_mqd_base_addr_hi = upper_32_bits(mqd_gpu_addr); + WREG32(mmCP_MQD_BASE_ADDR, mqd->queue_state.cp_mqd_base_addr); + WREG32(mmCP_MQD_BASE_ADDR_HI, mqd->queue_state.cp_mqd_base_addr_hi); + /* set MQD vmid to 0 */ + mqd->queue_state.cp_mqd_control = RREG32(mmCP_MQD_CONTROL); + mqd->queue_state.cp_mqd_control &= ~CP_MQD_CONTROL__VMID_MASK; + WREG32(mmCP_MQD_CONTROL, mqd->queue_state.cp_mqd_control); + + /* set the pointer to the HQD, this is similar CP_RB0_BASE/_HI */ + hqd_gpu_addr = ring->gpu_addr >> 8; + mqd->queue_state.cp_hqd_pq_base = hqd_gpu_addr; + mqd->queue_state.cp_hqd_pq_base_hi = upper_32_bits(hqd_gpu_addr); + WREG32(mmCP_HQD_PQ_BASE, mqd->queue_state.cp_hqd_pq_base); + WREG32(mmCP_HQD_PQ_BASE_HI, mqd->queue_state.cp_hqd_pq_base_hi); + + /* set up the HQD, this is similar to CP_RB0_CNTL */ + mqd->queue_state.cp_hqd_pq_control = RREG32(mmCP_HQD_PQ_CONTROL); + mqd->queue_state.cp_hqd_pq_control &= + ~(CP_HQD_PQ_CONTROL__QUEUE_SIZE_MASK | + CP_HQD_PQ_CONTROL__RPTR_BLOCK_SIZE_MASK); + + mqd->queue_state.cp_hqd_pq_control |= + order_base_2(ring->ring_size / 8); + mqd->queue_state.cp_hqd_pq_control |= + (order_base_2(AMDGPU_GPU_PAGE_SIZE/8) << 8); +#ifdef __BIG_ENDIAN + mqd->queue_state.cp_hqd_pq_control |= BUF_SWAP_32BIT; +#endif + mqd->queue_state.cp_hqd_pq_control &= + ~(CP_HQD_PQ_CONTROL__UNORD_DISPATCH_MASK | + CP_HQD_PQ_CONTROL__ROQ_PQ_IB_FLIP_MASK | + CP_HQD_PQ_CONTROL__PQ_VOLATILE_MASK); + mqd->queue_state.cp_hqd_pq_control |= + CP_HQD_PQ_CONTROL__PRIV_STATE_MASK | + CP_HQD_PQ_CONTROL__KMD_QUEUE_MASK; /* assuming kernel queue control */ + WREG32(mmCP_HQD_PQ_CONTROL, mqd->queue_state.cp_hqd_pq_control); + + /* only used if CP_PQ_WPTR_POLL_CNTL.CP_PQ_WPTR_POLL_CNTL__EN_MASK=1 */ + wb_gpu_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4); + mqd->queue_state.cp_hqd_pq_wptr_poll_addr = wb_gpu_addr & 0xfffffffc; + mqd->queue_state.cp_hqd_pq_wptr_poll_addr_hi = upper_32_bits(wb_gpu_addr) & 0xffff; + WREG32(mmCP_HQD_PQ_WPTR_POLL_ADDR, mqd->queue_state.cp_hqd_pq_wptr_poll_addr); + WREG32(mmCP_HQD_PQ_WPTR_POLL_ADDR_HI, + mqd->queue_state.cp_hqd_pq_wptr_poll_addr_hi); + + /* set the wb address wether it's enabled or not */ + wb_gpu_addr = adev->wb.gpu_addr + (ring->rptr_offs * 4); + mqd->queue_state.cp_hqd_pq_rptr_report_addr = wb_gpu_addr & 0xfffffffc; + mqd->queue_state.cp_hqd_pq_rptr_report_addr_hi = + upper_32_bits(wb_gpu_addr) & 0xffff; + WREG32(mmCP_HQD_PQ_RPTR_REPORT_ADDR, + mqd->queue_state.cp_hqd_pq_rptr_report_addr); + WREG32(mmCP_HQD_PQ_RPTR_REPORT_ADDR_HI, + mqd->queue_state.cp_hqd_pq_rptr_report_addr_hi); + + /* enable the doorbell if requested */ + if (use_doorbell) { + mqd->queue_state.cp_hqd_pq_doorbell_control = + RREG32(mmCP_HQD_PQ_DOORBELL_CONTROL); + mqd->queue_state.cp_hqd_pq_doorbell_control &= + ~CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_OFFSET_MASK; + mqd->queue_state.cp_hqd_pq_doorbell_control |= + (ring->doorbell_index << + CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_OFFSET__SHIFT); + mqd->queue_state.cp_hqd_pq_doorbell_control |= + CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_EN_MASK; + mqd->queue_state.cp_hqd_pq_doorbell_control &= + ~(CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_SOURCE_MASK | + CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_HIT_MASK); + + } else { + mqd->queue_state.cp_hqd_pq_doorbell_control = 0; + } + WREG32(mmCP_HQD_PQ_DOORBELL_CONTROL, + mqd->queue_state.cp_hqd_pq_doorbell_control); + + /* read and write pointers, similar to CP_RB0_WPTR/_RPTR */ + ring->wptr = 0; + mqd->queue_state.cp_hqd_pq_wptr = ring->wptr; + WREG32(mmCP_HQD_PQ_WPTR, mqd->queue_state.cp_hqd_pq_wptr); + mqd->queue_state.cp_hqd_pq_rptr = RREG32(mmCP_HQD_PQ_RPTR); + + /* set the vmid for the queue */ + mqd->queue_state.cp_hqd_vmid = 0; + WREG32(mmCP_HQD_VMID, mqd->queue_state.cp_hqd_vmid); + + /* activate the queue */ + mqd->queue_state.cp_hqd_active = 1; + WREG32(mmCP_HQD_ACTIVE, mqd->queue_state.cp_hqd_active); + + cik_srbm_select(adev, 0, 0, 0, 0); + mutex_unlock(&adev->srbm_mutex); + + amdgpu_bo_kunmap(ring->mqd_obj); + amdgpu_bo_unreserve(ring->mqd_obj); + + ring->ready = true; + r = amdgpu_ring_test_ring(ring); + if (r) + ring->ready = false; + } + + return 0; +} + +static void gfx_v7_0_cp_enable(struct amdgpu_device *adev, bool enable) +{ + gfx_v7_0_cp_gfx_enable(adev, enable); + gfx_v7_0_cp_compute_enable(adev, enable); +} + +static int gfx_v7_0_cp_load_microcode(struct amdgpu_device *adev) +{ + int r; + + r = gfx_v7_0_cp_gfx_load_microcode(adev); + if (r) + return r; + r = gfx_v7_0_cp_compute_load_microcode(adev); + if (r) + return r; + + return 0; +} + +static void gfx_v7_0_enable_gui_idle_interrupt(struct amdgpu_device *adev, + bool enable) +{ + u32 tmp = RREG32(mmCP_INT_CNTL_RING0); + + if (enable) + tmp |= (CP_INT_CNTL_RING0__CNTX_BUSY_INT_ENABLE_MASK | + CP_INT_CNTL_RING0__CNTX_EMPTY_INT_ENABLE_MASK); + else + tmp &= ~(CP_INT_CNTL_RING0__CNTX_BUSY_INT_ENABLE_MASK | + CP_INT_CNTL_RING0__CNTX_EMPTY_INT_ENABLE_MASK); + WREG32(mmCP_INT_CNTL_RING0, tmp); +} + +static int gfx_v7_0_cp_resume(struct amdgpu_device *adev) +{ + int r; + + gfx_v7_0_enable_gui_idle_interrupt(adev, false); + + r = gfx_v7_0_cp_load_microcode(adev); + if (r) + return r; + + r = gfx_v7_0_cp_gfx_resume(adev); + if (r) + return r; + r = gfx_v7_0_cp_compute_resume(adev); + if (r) + return r; + + gfx_v7_0_enable_gui_idle_interrupt(adev, true); + + return 0; +} + +static void gfx_v7_0_ce_sync_me(struct amdgpu_ring *ring) +{ + struct amdgpu_device *adev = ring->adev; + u64 gpu_addr = adev->wb.gpu_addr + adev->gfx.ce_sync_offs * 4; + + /* instruct DE to set a magic number */ + amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); + amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) | + WRITE_DATA_DST_SEL(5))); + amdgpu_ring_write(ring, gpu_addr & 0xfffffffc); + amdgpu_ring_write(ring, upper_32_bits(gpu_addr) & 0xffffffff); + amdgpu_ring_write(ring, 1); + + /* let CE wait till condition satisfied */ + amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5)); + amdgpu_ring_write(ring, (WAIT_REG_MEM_OPERATION(0) | /* wait */ + WAIT_REG_MEM_MEM_SPACE(1) | /* memory */ + WAIT_REG_MEM_FUNCTION(3) | /* == */ + WAIT_REG_MEM_ENGINE(2))); /* ce */ + amdgpu_ring_write(ring, gpu_addr & 0xfffffffc); + amdgpu_ring_write(ring, upper_32_bits(gpu_addr) & 0xffffffff); + amdgpu_ring_write(ring, 1); + amdgpu_ring_write(ring, 0xffffffff); + amdgpu_ring_write(ring, 4); /* poll interval */ + + /* instruct CE to reset wb of ce_sync to zero */ + amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); + amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(2) | + WRITE_DATA_DST_SEL(5) | + WR_CONFIRM)); + amdgpu_ring_write(ring, gpu_addr & 0xfffffffc); + amdgpu_ring_write(ring, upper_32_bits(gpu_addr) & 0xffffffff); + amdgpu_ring_write(ring, 0); +} + +/* + * vm + * VMID 0 is the physical GPU addresses as used by the kernel. + * VMIDs 1-15 are used for userspace clients and are handled + * by the amdgpu vm/hsa code. + */ +/** + * gfx_v7_0_ring_emit_vm_flush - cik vm flush using the CP + * + * @adev: amdgpu_device pointer + * + * Update the page table base and flush the VM TLB + * using the CP (CIK). + */ +static void gfx_v7_0_ring_emit_vm_flush(struct amdgpu_ring *ring, + unsigned vm_id, uint64_t pd_addr) +{ + int usepfp = (ring->type == AMDGPU_RING_TYPE_GFX); + + amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); + amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(usepfp) | + WRITE_DATA_DST_SEL(0))); + if (vm_id < 8) { + amdgpu_ring_write(ring, + (mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + vm_id)); + } else { + amdgpu_ring_write(ring, + (mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + vm_id - 8)); + } + amdgpu_ring_write(ring, 0); + amdgpu_ring_write(ring, pd_addr >> 12); + + /* update SH_MEM_* regs */ + amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); + amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) | + WRITE_DATA_DST_SEL(0))); + amdgpu_ring_write(ring, mmSRBM_GFX_CNTL); + amdgpu_ring_write(ring, 0); + amdgpu_ring_write(ring, VMID(vm_id)); + + amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 6)); + amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) | + WRITE_DATA_DST_SEL(0))); + amdgpu_ring_write(ring, mmSH_MEM_BASES); + amdgpu_ring_write(ring, 0); + + amdgpu_ring_write(ring, 0); /* SH_MEM_BASES */ + amdgpu_ring_write(ring, 0); /* SH_MEM_CONFIG */ + amdgpu_ring_write(ring, 1); /* SH_MEM_APE1_BASE */ + amdgpu_ring_write(ring, 0); /* SH_MEM_APE1_LIMIT */ + + amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); + amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) | + WRITE_DATA_DST_SEL(0))); + amdgpu_ring_write(ring, mmSRBM_GFX_CNTL); + amdgpu_ring_write(ring, 0); + amdgpu_ring_write(ring, VMID(0)); + + + /* bits 0-15 are the VM contexts0-15 */ + amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); + amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) | + WRITE_DATA_DST_SEL(0))); + amdgpu_ring_write(ring, mmVM_INVALIDATE_REQUEST); + amdgpu_ring_write(ring, 0); + amdgpu_ring_write(ring, 1 << vm_id); + + /* wait for the invalidate to complete */ + amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5)); + amdgpu_ring_write(ring, (WAIT_REG_MEM_OPERATION(0) | /* wait */ + WAIT_REG_MEM_FUNCTION(0) | /* always */ + WAIT_REG_MEM_ENGINE(0))); /* me */ + amdgpu_ring_write(ring, mmVM_INVALIDATE_REQUEST); + amdgpu_ring_write(ring, 0); + amdgpu_ring_write(ring, 0); /* ref */ + amdgpu_ring_write(ring, 0); /* mask */ + amdgpu_ring_write(ring, 0x20); /* poll interval */ + + /* compute doesn't have PFP */ + if (usepfp) { + /* sync PFP to ME, otherwise we might get invalid PFP reads */ + amdgpu_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0)); + amdgpu_ring_write(ring, 0x0); + + /* synce CE with ME to prevent CE fetch CEIB before context switch done */ + gfx_v7_0_ce_sync_me(ring); + } +} + +/* + * RLC + * The RLC is a multi-purpose microengine that handles a + * variety of functions. + */ +static void gfx_v7_0_rlc_fini(struct amdgpu_device *adev) +{ + int r; + + /* save restore block */ + if (adev->gfx.rlc.save_restore_obj) { + r = amdgpu_bo_reserve(adev->gfx.rlc.save_restore_obj, false); + if (unlikely(r != 0)) + dev_warn(adev->dev, "(%d) reserve RLC sr bo failed\n", r); + amdgpu_bo_unpin(adev->gfx.rlc.save_restore_obj); + amdgpu_bo_unreserve(adev->gfx.rlc.save_restore_obj); + + amdgpu_bo_unref(&adev->gfx.rlc.save_restore_obj); + adev->gfx.rlc.save_restore_obj = NULL; + } + + /* clear state block */ + if (adev->gfx.rlc.clear_state_obj) { + r = amdgpu_bo_reserve(adev->gfx.rlc.clear_state_obj, false); + if (unlikely(r != 0)) + dev_warn(adev->dev, "(%d) reserve RLC c bo failed\n", r); + amdgpu_bo_unpin(adev->gfx.rlc.clear_state_obj); + amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj); + + amdgpu_bo_unref(&adev->gfx.rlc.clear_state_obj); + adev->gfx.rlc.clear_state_obj = NULL; + } + + /* clear state block */ + if (adev->gfx.rlc.cp_table_obj) { + r = amdgpu_bo_reserve(adev->gfx.rlc.cp_table_obj, false); + if (unlikely(r != 0)) + dev_warn(adev->dev, "(%d) reserve RLC cp table bo failed\n", r); + amdgpu_bo_unpin(adev->gfx.rlc.cp_table_obj); + amdgpu_bo_unreserve(adev->gfx.rlc.cp_table_obj); + + amdgpu_bo_unref(&adev->gfx.rlc.cp_table_obj); + adev->gfx.rlc.cp_table_obj = NULL; + } +} + +static int gfx_v7_0_rlc_init(struct amdgpu_device *adev) +{ + const u32 *src_ptr; + volatile u32 *dst_ptr; + u32 dws, i; + const struct cs_section_def *cs_data; + int r; + + /* allocate rlc buffers */ + if (adev->flags & AMDGPU_IS_APU) { + if (adev->asic_type == CHIP_KAVERI) { + adev->gfx.rlc.reg_list = spectre_rlc_save_restore_register_list; + adev->gfx.rlc.reg_list_size = + (u32)ARRAY_SIZE(spectre_rlc_save_restore_register_list); + } else { + adev->gfx.rlc.reg_list = kalindi_rlc_save_restore_register_list; + adev->gfx.rlc.reg_list_size = + (u32)ARRAY_SIZE(kalindi_rlc_save_restore_register_list); + } + } + adev->gfx.rlc.cs_data = ci_cs_data; + adev->gfx.rlc.cp_table_size = CP_ME_TABLE_SIZE * 5 * 4; + + src_ptr = adev->gfx.rlc.reg_list; + dws = adev->gfx.rlc.reg_list_size; + dws += (5 * 16) + 48 + 48 + 64; + + cs_data = adev->gfx.rlc.cs_data; + + if (src_ptr) { + /* save restore block */ + if (adev->gfx.rlc.save_restore_obj == NULL) { + r = amdgpu_bo_create(adev, dws * 4, PAGE_SIZE, true, + AMDGPU_GEM_DOMAIN_VRAM, 0, NULL, &adev->gfx.rlc.save_restore_obj); + if (r) { + dev_warn(adev->dev, "(%d) create RLC sr bo failed\n", r); + return r; + } + } + + r = amdgpu_bo_reserve(adev->gfx.rlc.save_restore_obj, false); + if (unlikely(r != 0)) { + gfx_v7_0_rlc_fini(adev); + return r; + } + r = amdgpu_bo_pin(adev->gfx.rlc.save_restore_obj, AMDGPU_GEM_DOMAIN_VRAM, + &adev->gfx.rlc.save_restore_gpu_addr); + if (r) { + amdgpu_bo_unreserve(adev->gfx.rlc.save_restore_obj); + dev_warn(adev->dev, "(%d) pin RLC sr bo failed\n", r); + gfx_v7_0_rlc_fini(adev); + return r; + } + + r = amdgpu_bo_kmap(adev->gfx.rlc.save_restore_obj, (void **)&adev->gfx.rlc.sr_ptr); + if (r) { + dev_warn(adev->dev, "(%d) map RLC sr bo failed\n", r); + gfx_v7_0_rlc_fini(adev); + return r; + } + /* write the sr buffer */ + dst_ptr = adev->gfx.rlc.sr_ptr; + for (i = 0; i < adev->gfx.rlc.reg_list_size; i++) + dst_ptr[i] = cpu_to_le32(src_ptr[i]); + amdgpu_bo_kunmap(adev->gfx.rlc.save_restore_obj); + amdgpu_bo_unreserve(adev->gfx.rlc.save_restore_obj); + } + + if (cs_data) { + /* clear state block */ + adev->gfx.rlc.clear_state_size = dws = gfx_v7_0_get_csb_size(adev); + + if (adev->gfx.rlc.clear_state_obj == NULL) { + r = amdgpu_bo_create(adev, dws * 4, PAGE_SIZE, true, + AMDGPU_GEM_DOMAIN_VRAM, 0, NULL, &adev->gfx.rlc.clear_state_obj); + if (r) { + dev_warn(adev->dev, "(%d) create RLC c bo failed\n", r); + gfx_v7_0_rlc_fini(adev); + return r; + } + } + r = amdgpu_bo_reserve(adev->gfx.rlc.clear_state_obj, false); + if (unlikely(r != 0)) { + gfx_v7_0_rlc_fini(adev); + return r; + } + r = amdgpu_bo_pin(adev->gfx.rlc.clear_state_obj, AMDGPU_GEM_DOMAIN_VRAM, + &adev->gfx.rlc.clear_state_gpu_addr); + if (r) { + amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj); + dev_warn(adev->dev, "(%d) pin RLC c bo failed\n", r); + gfx_v7_0_rlc_fini(adev); + return r; + } + + r = amdgpu_bo_kmap(adev->gfx.rlc.clear_state_obj, (void **)&adev->gfx.rlc.cs_ptr); + if (r) { + dev_warn(adev->dev, "(%d) map RLC c bo failed\n", r); + gfx_v7_0_rlc_fini(adev); + return r; + } + /* set up the cs buffer */ + dst_ptr = adev->gfx.rlc.cs_ptr; + gfx_v7_0_get_csb_buffer(adev, dst_ptr); + amdgpu_bo_kunmap(adev->gfx.rlc.clear_state_obj); + amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj); + } + + if (adev->gfx.rlc.cp_table_size) { + if (adev->gfx.rlc.cp_table_obj == NULL) { + r = amdgpu_bo_create(adev, adev->gfx.rlc.cp_table_size, PAGE_SIZE, true, + AMDGPU_GEM_DOMAIN_VRAM, 0, NULL, &adev->gfx.rlc.cp_table_obj); + if (r) { + dev_warn(adev->dev, "(%d) create RLC cp table bo failed\n", r); + gfx_v7_0_rlc_fini(adev); + return r; + } + } + + r = amdgpu_bo_reserve(adev->gfx.rlc.cp_table_obj, false); + if (unlikely(r != 0)) { + dev_warn(adev->dev, "(%d) reserve RLC cp table bo failed\n", r); + gfx_v7_0_rlc_fini(adev); + return r; + } + r = amdgpu_bo_pin(adev->gfx.rlc.cp_table_obj, AMDGPU_GEM_DOMAIN_VRAM, + &adev->gfx.rlc.cp_table_gpu_addr); + if (r) { + amdgpu_bo_unreserve(adev->gfx.rlc.cp_table_obj); + dev_warn(adev->dev, "(%d) pin RLC cp_table bo failed\n", r); + gfx_v7_0_rlc_fini(adev); + return r; + } + r = amdgpu_bo_kmap(adev->gfx.rlc.cp_table_obj, (void **)&adev->gfx.rlc.cp_table_ptr); + if (r) { + dev_warn(adev->dev, "(%d) map RLC cp table bo failed\n", r); + gfx_v7_0_rlc_fini(adev); + return r; + } + + gfx_v7_0_init_cp_pg_table(adev); + + amdgpu_bo_kunmap(adev->gfx.rlc.cp_table_obj); + amdgpu_bo_unreserve(adev->gfx.rlc.cp_table_obj); + + } + + return 0; +} + +static void gfx_v7_0_enable_lbpw(struct amdgpu_device *adev, bool enable) +{ + u32 tmp; + + tmp = RREG32(mmRLC_LB_CNTL); + if (enable) + tmp |= RLC_LB_CNTL__LOAD_BALANCE_ENABLE_MASK; + else + tmp &= ~RLC_LB_CNTL__LOAD_BALANCE_ENABLE_MASK; + WREG32(mmRLC_LB_CNTL, tmp); +} + +static void gfx_v7_0_wait_for_rlc_serdes(struct amdgpu_device *adev) +{ + u32 i, j, k; + u32 mask; + + mutex_lock(&adev->grbm_idx_mutex); + for (i = 0; i < adev->gfx.config.max_shader_engines; i++) { + for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) { + gfx_v7_0_select_se_sh(adev, i, j); + for (k = 0; k < adev->usec_timeout; k++) { + if (RREG32(mmRLC_SERDES_CU_MASTER_BUSY) == 0) + break; + udelay(1); + } + } + } + gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff); + mutex_unlock(&adev->grbm_idx_mutex); + + mask = RLC_SERDES_NONCU_MASTER_BUSY__SE_MASTER_BUSY_MASK | + RLC_SERDES_NONCU_MASTER_BUSY__GC_MASTER_BUSY_MASK | + RLC_SERDES_NONCU_MASTER_BUSY__TC0_MASTER_BUSY_MASK | + RLC_SERDES_NONCU_MASTER_BUSY__TC1_MASTER_BUSY_MASK; + for (k = 0; k < adev->usec_timeout; k++) { + if ((RREG32(mmRLC_SERDES_NONCU_MASTER_BUSY) & mask) == 0) + break; + udelay(1); + } +} + +static void gfx_v7_0_update_rlc(struct amdgpu_device *adev, u32 rlc) +{ + u32 tmp; + + tmp = RREG32(mmRLC_CNTL); + if (tmp != rlc) + WREG32(mmRLC_CNTL, rlc); +} + +static u32 gfx_v7_0_halt_rlc(struct amdgpu_device *adev) +{ + u32 data, orig; + + orig = data = RREG32(mmRLC_CNTL); + + if (data & RLC_CNTL__RLC_ENABLE_F32_MASK) { + u32 i; + + data &= ~RLC_CNTL__RLC_ENABLE_F32_MASK; + WREG32(mmRLC_CNTL, data); + + for (i = 0; i < adev->usec_timeout; i++) { + if ((RREG32(mmRLC_GPM_STAT) & RLC_GPM_STAT__RLC_BUSY_MASK) == 0) + break; + udelay(1); + } + + gfx_v7_0_wait_for_rlc_serdes(adev); + } + + return orig; +} + +void gfx_v7_0_enter_rlc_safe_mode(struct amdgpu_device *adev) +{ + u32 tmp, i, mask; + + tmp = 0x1 | (1 << 1); + WREG32(mmRLC_GPR_REG2, tmp); + + mask = RLC_GPM_STAT__GFX_POWER_STATUS_MASK | + RLC_GPM_STAT__GFX_CLOCK_STATUS_MASK; + for (i = 0; i < adev->usec_timeout; i++) { + if ((RREG32(mmRLC_GPM_STAT) & mask) == mask) + break; + udelay(1); + } + + for (i = 0; i < adev->usec_timeout; i++) { + if ((RREG32(mmRLC_GPR_REG2) & 0x1) == 0) + break; + udelay(1); + } +} + +void gfx_v7_0_exit_rlc_safe_mode(struct amdgpu_device *adev) +{ + u32 tmp; + + tmp = 0x1 | (0 << 1); + WREG32(mmRLC_GPR_REG2, tmp); +} + +/** + * gfx_v7_0_rlc_stop - stop the RLC ME + * + * @adev: amdgpu_device pointer + * + * Halt the RLC ME (MicroEngine) (CIK). + */ +void gfx_v7_0_rlc_stop(struct amdgpu_device *adev) +{ + WREG32(mmRLC_CNTL, 0); + + gfx_v7_0_enable_gui_idle_interrupt(adev, false); + + gfx_v7_0_wait_for_rlc_serdes(adev); +} + +/** + * gfx_v7_0_rlc_start - start the RLC ME + * + * @adev: amdgpu_device pointer + * + * Unhalt the RLC ME (MicroEngine) (CIK). + */ +static void gfx_v7_0_rlc_start(struct amdgpu_device *adev) +{ + WREG32(mmRLC_CNTL, RLC_CNTL__RLC_ENABLE_F32_MASK); + + gfx_v7_0_enable_gui_idle_interrupt(adev, true); + + udelay(50); +} + +static void gfx_v7_0_rlc_reset(struct amdgpu_device *adev) +{ + u32 tmp = RREG32(mmGRBM_SOFT_RESET); + + tmp |= GRBM_SOFT_RESET__SOFT_RESET_RLC_MASK; + WREG32(mmGRBM_SOFT_RESET, tmp); + udelay(50); + tmp &= ~GRBM_SOFT_RESET__SOFT_RESET_RLC_MASK; + WREG32(mmGRBM_SOFT_RESET, tmp); + udelay(50); +} + +/** + * gfx_v7_0_rlc_resume - setup the RLC hw + * + * @adev: amdgpu_device pointer + * + * Initialize the RLC registers, load the ucode, + * and start the RLC (CIK). + * Returns 0 for success, -EINVAL if the ucode is not available. + */ +static int gfx_v7_0_rlc_resume(struct amdgpu_device *adev) +{ + const struct rlc_firmware_header_v1_0 *hdr; + const __le32 *fw_data; + unsigned i, fw_size; + u32 tmp; + + if (!adev->gfx.rlc_fw) + return -EINVAL; + + hdr = (const struct rlc_firmware_header_v1_0 *)adev->gfx.rlc_fw->data; + amdgpu_ucode_print_rlc_hdr(&hdr->header); + adev->gfx.rlc_fw_version = le32_to_cpu(hdr->header.ucode_version); + + gfx_v7_0_rlc_stop(adev); + + /* disable CG */ + tmp = RREG32(mmRLC_CGCG_CGLS_CTRL) & 0xfffffffc; + WREG32(mmRLC_CGCG_CGLS_CTRL, tmp); + + gfx_v7_0_rlc_reset(adev); + + gfx_v7_0_init_pg(adev); + + WREG32(mmRLC_LB_CNTR_INIT, 0); + WREG32(mmRLC_LB_CNTR_MAX, 0x00008000); + + mutex_lock(&adev->grbm_idx_mutex); + gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff); + WREG32(mmRLC_LB_INIT_CU_MASK, 0xffffffff); + WREG32(mmRLC_LB_PARAMS, 0x00600408); + WREG32(mmRLC_LB_CNTL, 0x80000004); + mutex_unlock(&adev->grbm_idx_mutex); + + WREG32(mmRLC_MC_CNTL, 0); + WREG32(mmRLC_UCODE_CNTL, 0); + + fw_data = (const __le32 *) + (adev->gfx.rlc_fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes)); + fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4; + WREG32(mmRLC_GPM_UCODE_ADDR, 0); + for (i = 0; i < fw_size; i++) + WREG32(mmRLC_GPM_UCODE_DATA, le32_to_cpup(fw_data++)); + WREG32(mmRLC_GPM_UCODE_ADDR, adev->gfx.rlc_fw_version); + + /* XXX - find out what chips support lbpw */ + gfx_v7_0_enable_lbpw(adev, false); + + if (adev->asic_type == CHIP_BONAIRE) + WREG32(mmRLC_DRIVER_CPDMA_STATUS, 0); + + gfx_v7_0_rlc_start(adev); + + return 0; +} + +static void gfx_v7_0_enable_cgcg(struct amdgpu_device *adev, bool enable) +{ + u32 data, orig, tmp, tmp2; + + orig = data = RREG32(mmRLC_CGCG_CGLS_CTRL); + + if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_GFX_CGCG)) { + gfx_v7_0_enable_gui_idle_interrupt(adev, true); + + tmp = gfx_v7_0_halt_rlc(adev); + + mutex_lock(&adev->grbm_idx_mutex); + gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff); + WREG32(mmRLC_SERDES_WR_CU_MASTER_MASK, 0xffffffff); + WREG32(mmRLC_SERDES_WR_NONCU_MASTER_MASK, 0xffffffff); + tmp2 = RLC_SERDES_WR_CTRL__BPM_ADDR_MASK | + RLC_SERDES_WR_CTRL__CGCG_OVERRIDE_0_MASK | + RLC_SERDES_WR_CTRL__CGLS_ENABLE_MASK; + WREG32(mmRLC_SERDES_WR_CTRL, tmp2); + mutex_unlock(&adev->grbm_idx_mutex); + + gfx_v7_0_update_rlc(adev, tmp); + + data |= RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK | RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK; + } else { + gfx_v7_0_enable_gui_idle_interrupt(adev, false); + + RREG32(mmCB_CGTT_SCLK_CTRL); + RREG32(mmCB_CGTT_SCLK_CTRL); + RREG32(mmCB_CGTT_SCLK_CTRL); + RREG32(mmCB_CGTT_SCLK_CTRL); + + data &= ~(RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK | RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK); + } + + if (orig != data) + WREG32(mmRLC_CGCG_CGLS_CTRL, data); + +} + +static void gfx_v7_0_enable_mgcg(struct amdgpu_device *adev, bool enable) +{ + u32 data, orig, tmp = 0; + + if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_GFX_MGCG)) { + if (adev->cg_flags & AMDGPU_CG_SUPPORT_GFX_MGLS) { + if (adev->cg_flags & AMDGPU_CG_SUPPORT_GFX_CP_LS) { + orig = data = RREG32(mmCP_MEM_SLP_CNTL); + data |= CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK; + if (orig != data) + WREG32(mmCP_MEM_SLP_CNTL, data); + } + } + + orig = data = RREG32(mmRLC_CGTT_MGCG_OVERRIDE); + data |= 0x00000001; + data &= 0xfffffffd; + if (orig != data) + WREG32(mmRLC_CGTT_MGCG_OVERRIDE, data); + + tmp = gfx_v7_0_halt_rlc(adev); + + mutex_lock(&adev->grbm_idx_mutex); + gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff); + WREG32(mmRLC_SERDES_WR_CU_MASTER_MASK, 0xffffffff); + WREG32(mmRLC_SERDES_WR_NONCU_MASTER_MASK, 0xffffffff); + data = RLC_SERDES_WR_CTRL__BPM_ADDR_MASK | + RLC_SERDES_WR_CTRL__MGCG_OVERRIDE_0_MASK; + WREG32(mmRLC_SERDES_WR_CTRL, data); + mutex_unlock(&adev->grbm_idx_mutex); + + gfx_v7_0_update_rlc(adev, tmp); + + if (adev->cg_flags & AMDGPU_CG_SUPPORT_GFX_CGTS) { + orig = data = RREG32(mmCGTS_SM_CTRL_REG); + data &= ~CGTS_SM_CTRL_REG__SM_MODE_MASK; + data |= (0x2 << CGTS_SM_CTRL_REG__SM_MODE__SHIFT); + data |= CGTS_SM_CTRL_REG__SM_MODE_ENABLE_MASK; + data &= ~CGTS_SM_CTRL_REG__OVERRIDE_MASK; + if ((adev->cg_flags & AMDGPU_CG_SUPPORT_GFX_MGLS) && + (adev->cg_flags & AMDGPU_CG_SUPPORT_GFX_CGTS_LS)) + data &= ~CGTS_SM_CTRL_REG__LS_OVERRIDE_MASK; + data &= ~CGTS_SM_CTRL_REG__ON_MONITOR_ADD_MASK; + data |= CGTS_SM_CTRL_REG__ON_MONITOR_ADD_EN_MASK; + data |= (0x96 << CGTS_SM_CTRL_REG__ON_MONITOR_ADD__SHIFT); + if (orig != data) + WREG32(mmCGTS_SM_CTRL_REG, data); + } + } else { + orig = data = RREG32(mmRLC_CGTT_MGCG_OVERRIDE); + data |= 0x00000003; + if (orig != data) + WREG32(mmRLC_CGTT_MGCG_OVERRIDE, data); + + data = RREG32(mmRLC_MEM_SLP_CNTL); + if (data & RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK) { + data &= ~RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK; + WREG32(mmRLC_MEM_SLP_CNTL, data); + } + + data = RREG32(mmCP_MEM_SLP_CNTL); + if (data & CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK) { + data &= ~CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK; + WREG32(mmCP_MEM_SLP_CNTL, data); + } + + orig = data = RREG32(mmCGTS_SM_CTRL_REG); + data |= CGTS_SM_CTRL_REG__OVERRIDE_MASK | CGTS_SM_CTRL_REG__LS_OVERRIDE_MASK; + if (orig != data) + WREG32(mmCGTS_SM_CTRL_REG, data); + + tmp = gfx_v7_0_halt_rlc(adev); + + mutex_lock(&adev->grbm_idx_mutex); + gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff); + WREG32(mmRLC_SERDES_WR_CU_MASTER_MASK, 0xffffffff); + WREG32(mmRLC_SERDES_WR_NONCU_MASTER_MASK, 0xffffffff); + data = RLC_SERDES_WR_CTRL__BPM_ADDR_MASK | RLC_SERDES_WR_CTRL__MGCG_OVERRIDE_1_MASK; + WREG32(mmRLC_SERDES_WR_CTRL, data); + mutex_unlock(&adev->grbm_idx_mutex); + + gfx_v7_0_update_rlc(adev, tmp); + } +} + +static void gfx_v7_0_update_cg(struct amdgpu_device *adev, + bool enable) +{ + gfx_v7_0_enable_gui_idle_interrupt(adev, false); + /* order matters! */ + if (enable) { + gfx_v7_0_enable_mgcg(adev, true); + gfx_v7_0_enable_cgcg(adev, true); + } else { + gfx_v7_0_enable_cgcg(adev, false); + gfx_v7_0_enable_mgcg(adev, false); + } + gfx_v7_0_enable_gui_idle_interrupt(adev, true); +} + +static void gfx_v7_0_enable_sclk_slowdown_on_pu(struct amdgpu_device *adev, + bool enable) +{ + u32 data, orig; + + orig = data = RREG32(mmRLC_PG_CNTL); + if (enable && (adev->pg_flags & AMDGPU_PG_SUPPORT_RLC_SMU_HS)) + data |= RLC_PG_CNTL__SMU_CLK_SLOWDOWN_ON_PU_ENABLE_MASK; + else + data &= ~RLC_PG_CNTL__SMU_CLK_SLOWDOWN_ON_PU_ENABLE_MASK; + if (orig != data) + WREG32(mmRLC_PG_CNTL, data); +} + +static void gfx_v7_0_enable_sclk_slowdown_on_pd(struct amdgpu_device *adev, + bool enable) +{ + u32 data, orig; + + orig = data = RREG32(mmRLC_PG_CNTL); + if (enable && (adev->pg_flags & AMDGPU_PG_SUPPORT_RLC_SMU_HS)) + data |= RLC_PG_CNTL__SMU_CLK_SLOWDOWN_ON_PD_ENABLE_MASK; + else + data &= ~RLC_PG_CNTL__SMU_CLK_SLOWDOWN_ON_PD_ENABLE_MASK; + if (orig != data) + WREG32(mmRLC_PG_CNTL, data); +} + +static void gfx_v7_0_enable_cp_pg(struct amdgpu_device *adev, bool enable) +{ + u32 data, orig; + + orig = data = RREG32(mmRLC_PG_CNTL); + if (enable && (adev->pg_flags & AMDGPU_PG_SUPPORT_CP)) + data &= ~0x8000; + else + data |= 0x8000; + if (orig != data) + WREG32(mmRLC_PG_CNTL, data); +} + +static void gfx_v7_0_enable_gds_pg(struct amdgpu_device *adev, bool enable) +{ + u32 data, orig; + + orig = data = RREG32(mmRLC_PG_CNTL); + if (enable && (adev->pg_flags & AMDGPU_PG_SUPPORT_GDS)) + data &= ~0x2000; + else + data |= 0x2000; + if (orig != data) + WREG32(mmRLC_PG_CNTL, data); +} + +static void gfx_v7_0_init_cp_pg_table(struct amdgpu_device *adev) +{ + const __le32 *fw_data; + volatile u32 *dst_ptr; + int me, i, max_me = 4; + u32 bo_offset = 0; + u32 table_offset, table_size; + + if (adev->asic_type == CHIP_KAVERI) + max_me = 5; + + if (adev->gfx.rlc.cp_table_ptr == NULL) + return; + + /* write the cp table buffer */ + dst_ptr = adev->gfx.rlc.cp_table_ptr; + for (me = 0; me < max_me; me++) { + if (me == 0) { + const struct gfx_firmware_header_v1_0 *hdr = + (const struct gfx_firmware_header_v1_0 *)adev->gfx.ce_fw->data; + fw_data = (const __le32 *) + (adev->gfx.ce_fw->data + + le32_to_cpu(hdr->header.ucode_array_offset_bytes)); + table_offset = le32_to_cpu(hdr->jt_offset); + table_size = le32_to_cpu(hdr->jt_size); + } else if (me == 1) { + const struct gfx_firmware_header_v1_0 *hdr = + (const struct gfx_firmware_header_v1_0 *)adev->gfx.pfp_fw->data; + fw_data = (const __le32 *) + (adev->gfx.pfp_fw->data + + le32_to_cpu(hdr->header.ucode_array_offset_bytes)); + table_offset = le32_to_cpu(hdr->jt_offset); + table_size = le32_to_cpu(hdr->jt_size); + } else if (me == 2) { + const struct gfx_firmware_header_v1_0 *hdr = + (const struct gfx_firmware_header_v1_0 *)adev->gfx.me_fw->data; + fw_data = (const __le32 *) + (adev->gfx.me_fw->data + + le32_to_cpu(hdr->header.ucode_array_offset_bytes)); + table_offset = le32_to_cpu(hdr->jt_offset); + table_size = le32_to_cpu(hdr->jt_size); + } else if (me == 3) { + const struct gfx_firmware_header_v1_0 *hdr = + (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data; + fw_data = (const __le32 *) + (adev->gfx.mec_fw->data + + le32_to_cpu(hdr->header.ucode_array_offset_bytes)); + table_offset = le32_to_cpu(hdr->jt_offset); + table_size = le32_to_cpu(hdr->jt_size); + } else { + const struct gfx_firmware_header_v1_0 *hdr = + (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec2_fw->data; + fw_data = (const __le32 *) + (adev->gfx.mec2_fw->data + + le32_to_cpu(hdr->header.ucode_array_offset_bytes)); + table_offset = le32_to_cpu(hdr->jt_offset); + table_size = le32_to_cpu(hdr->jt_size); + } + + for (i = 0; i < table_size; i ++) { + dst_ptr[bo_offset + i] = + cpu_to_le32(le32_to_cpu(fw_data[table_offset + i])); + } + + bo_offset += table_size; + } +} + +static void gfx_v7_0_enable_gfx_cgpg(struct amdgpu_device *adev, + bool enable) +{ + u32 data, orig; + + if (enable && (adev->pg_flags & AMDGPU_PG_SUPPORT_GFX_PG)) { + orig = data = RREG32(mmRLC_PG_CNTL); + data |= RLC_PG_CNTL__GFX_POWER_GATING_ENABLE_MASK; + if (orig != data) + WREG32(mmRLC_PG_CNTL, data); + + orig = data = RREG32(mmRLC_AUTO_PG_CTRL); + data |= RLC_AUTO_PG_CTRL__AUTO_PG_EN_MASK; + if (orig != data) + WREG32(mmRLC_AUTO_PG_CTRL, data); + } else { + orig = data = RREG32(mmRLC_PG_CNTL); + data &= ~RLC_PG_CNTL__GFX_POWER_GATING_ENABLE_MASK; + if (orig != data) + WREG32(mmRLC_PG_CNTL, data); + + orig = data = RREG32(mmRLC_AUTO_PG_CTRL); + data &= ~RLC_AUTO_PG_CTRL__AUTO_PG_EN_MASK; + if (orig != data) + WREG32(mmRLC_AUTO_PG_CTRL, data); + + data = RREG32(mmDB_RENDER_CONTROL); + } +} + +static u32 gfx_v7_0_get_cu_active_bitmap(struct amdgpu_device *adev, + u32 se, u32 sh) +{ + u32 mask = 0, tmp, tmp1; + int i; + + gfx_v7_0_select_se_sh(adev, se, sh); + tmp = RREG32(mmCC_GC_SHADER_ARRAY_CONFIG); + tmp1 = RREG32(mmGC_USER_SHADER_ARRAY_CONFIG); + gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff); + + tmp &= 0xffff0000; + + tmp |= tmp1; + tmp >>= 16; + + for (i = 0; i < adev->gfx.config.max_cu_per_sh; i ++) { + mask <<= 1; + mask |= 1; + } + + return (~tmp) & mask; +} + +static void gfx_v7_0_init_ao_cu_mask(struct amdgpu_device *adev) +{ + uint32_t tmp, active_cu_number; + struct amdgpu_cu_info cu_info; + + gfx_v7_0_get_cu_info(adev, &cu_info); + tmp = cu_info.ao_cu_mask; + active_cu_number = cu_info.number; + + WREG32(mmRLC_PG_ALWAYS_ON_CU_MASK, tmp); + + tmp = RREG32(mmRLC_MAX_PG_CU); + tmp &= ~RLC_MAX_PG_CU__MAX_POWERED_UP_CU_MASK; + tmp |= (active_cu_number << RLC_MAX_PG_CU__MAX_POWERED_UP_CU__SHIFT); + WREG32(mmRLC_MAX_PG_CU, tmp); +} + +static void gfx_v7_0_enable_gfx_static_mgpg(struct amdgpu_device *adev, + bool enable) +{ + u32 data, orig; + + orig = data = RREG32(mmRLC_PG_CNTL); + if (enable && (adev->pg_flags & AMDGPU_PG_SUPPORT_GFX_SMG)) + data |= RLC_PG_CNTL__STATIC_PER_CU_PG_ENABLE_MASK; + else + data &= ~RLC_PG_CNTL__STATIC_PER_CU_PG_ENABLE_MASK; + if (orig != data) + WREG32(mmRLC_PG_CNTL, data); +} + +static void gfx_v7_0_enable_gfx_dynamic_mgpg(struct amdgpu_device *adev, + bool enable) +{ + u32 data, orig; + + orig = data = RREG32(mmRLC_PG_CNTL); + if (enable && (adev->pg_flags & AMDGPU_PG_SUPPORT_GFX_DMG)) + data |= RLC_PG_CNTL__DYN_PER_CU_PG_ENABLE_MASK; + else + data &= ~RLC_PG_CNTL__DYN_PER_CU_PG_ENABLE_MASK; + if (orig != data) + WREG32(mmRLC_PG_CNTL, data); +} + +#define RLC_SAVE_AND_RESTORE_STARTING_OFFSET 0x90 +#define RLC_CLEAR_STATE_DESCRIPTOR_OFFSET 0x3D + +static void gfx_v7_0_init_gfx_cgpg(struct amdgpu_device *adev) +{ + u32 data, orig; + u32 i; + + if (adev->gfx.rlc.cs_data) { + WREG32(mmRLC_GPM_SCRATCH_ADDR, RLC_CLEAR_STATE_DESCRIPTOR_OFFSET); + WREG32(mmRLC_GPM_SCRATCH_DATA, upper_32_bits(adev->gfx.rlc.clear_state_gpu_addr)); + WREG32(mmRLC_GPM_SCRATCH_DATA, lower_32_bits(adev->gfx.rlc.clear_state_gpu_addr)); + WREG32(mmRLC_GPM_SCRATCH_DATA, adev->gfx.rlc.clear_state_size); + } else { + WREG32(mmRLC_GPM_SCRATCH_ADDR, RLC_CLEAR_STATE_DESCRIPTOR_OFFSET); + for (i = 0; i < 3; i++) + WREG32(mmRLC_GPM_SCRATCH_DATA, 0); + } + if (adev->gfx.rlc.reg_list) { + WREG32(mmRLC_GPM_SCRATCH_ADDR, RLC_SAVE_AND_RESTORE_STARTING_OFFSET); + for (i = 0; i < adev->gfx.rlc.reg_list_size; i++) + WREG32(mmRLC_GPM_SCRATCH_DATA, adev->gfx.rlc.reg_list[i]); + } + + orig = data = RREG32(mmRLC_PG_CNTL); + data |= RLC_PG_CNTL__GFX_POWER_GATING_SRC_MASK; + if (orig != data) + WREG32(mmRLC_PG_CNTL, data); + + WREG32(mmRLC_SAVE_AND_RESTORE_BASE, adev->gfx.rlc.save_restore_gpu_addr >> 8); + WREG32(mmRLC_JUMP_TABLE_RESTORE, adev->gfx.rlc.cp_table_gpu_addr >> 8); + + data = RREG32(mmCP_RB_WPTR_POLL_CNTL); + data &= ~CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK; + data |= (0x60 << CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT); + WREG32(mmCP_RB_WPTR_POLL_CNTL, data); + + data = 0x10101010; + WREG32(mmRLC_PG_DELAY, data); + + data = RREG32(mmRLC_PG_DELAY_2); + data &= ~0xff; + data |= 0x3; + WREG32(mmRLC_PG_DELAY_2, data); + + data = RREG32(mmRLC_AUTO_PG_CTRL); + data &= ~RLC_AUTO_PG_CTRL__GRBM_REG_SAVE_GFX_IDLE_THRESHOLD_MASK; + data |= (0x700 << RLC_AUTO_PG_CTRL__GRBM_REG_SAVE_GFX_IDLE_THRESHOLD__SHIFT); + WREG32(mmRLC_AUTO_PG_CTRL, data); + +} + +static void gfx_v7_0_update_gfx_pg(struct amdgpu_device *adev, bool enable) +{ + gfx_v7_0_enable_gfx_cgpg(adev, enable); + gfx_v7_0_enable_gfx_static_mgpg(adev, enable); + gfx_v7_0_enable_gfx_dynamic_mgpg(adev, enable); +} + +static u32 gfx_v7_0_get_csb_size(struct amdgpu_device *adev) +{ + u32 count = 0; + const struct cs_section_def *sect = NULL; + const struct cs_extent_def *ext = NULL; + + if (adev->gfx.rlc.cs_data == NULL) + return 0; + + /* begin clear state */ + count += 2; + /* context control state */ + count += 3; + + for (sect = adev->gfx.rlc.cs_data; sect->section != NULL; ++sect) { + for (ext = sect->section; ext->extent != NULL; ++ext) { + if (sect->id == SECT_CONTEXT) + count += 2 + ext->reg_count; + else + return 0; + } + } + /* pa_sc_raster_config/pa_sc_raster_config1 */ + count += 4; + /* end clear state */ + count += 2; + /* clear state */ + count += 2; + + return count; +} + +static void gfx_v7_0_get_csb_buffer(struct amdgpu_device *adev, + volatile u32 *buffer) +{ + u32 count = 0, i; + const struct cs_section_def *sect = NULL; + const struct cs_extent_def *ext = NULL; + + if (adev->gfx.rlc.cs_data == NULL) + return; + if (buffer == NULL) + return; + + buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0)); + buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_BEGIN_CLEAR_STATE); + + buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CONTEXT_CONTROL, 1)); + buffer[count++] = cpu_to_le32(0x80000000); + buffer[count++] = cpu_to_le32(0x80000000); + + for (sect = adev->gfx.rlc.cs_data; sect->section != NULL; ++sect) { + for (ext = sect->section; ext->extent != NULL; ++ext) { + if (sect->id == SECT_CONTEXT) { + buffer[count++] = + cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, ext->reg_count)); + buffer[count++] = cpu_to_le32(ext->reg_index - PACKET3_SET_CONTEXT_REG_START); + for (i = 0; i < ext->reg_count; i++) + buffer[count++] = cpu_to_le32(ext->extent[i]); + } else { + return; + } + } + } + + buffer[count++] = cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, 2)); + buffer[count++] = cpu_to_le32(mmPA_SC_RASTER_CONFIG - PACKET3_SET_CONTEXT_REG_START); + switch (adev->asic_type) { + case CHIP_BONAIRE: + buffer[count++] = cpu_to_le32(0x16000012); + buffer[count++] = cpu_to_le32(0x00000000); + break; + case CHIP_KAVERI: + buffer[count++] = cpu_to_le32(0x00000000); /* XXX */ + buffer[count++] = cpu_to_le32(0x00000000); + break; + case CHIP_KABINI: + case CHIP_MULLINS: + buffer[count++] = cpu_to_le32(0x00000000); /* XXX */ + buffer[count++] = cpu_to_le32(0x00000000); + break; + case CHIP_HAWAII: + buffer[count++] = cpu_to_le32(0x3a00161a); + buffer[count++] = cpu_to_le32(0x0000002e); + break; + default: + buffer[count++] = cpu_to_le32(0x00000000); + buffer[count++] = cpu_to_le32(0x00000000); + break; + } + + buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0)); + buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_END_CLEAR_STATE); + + buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CLEAR_STATE, 0)); + buffer[count++] = cpu_to_le32(0); +} + +static void gfx_v7_0_init_pg(struct amdgpu_device *adev) +{ + if (adev->pg_flags & (AMDGPU_PG_SUPPORT_GFX_PG | + AMDGPU_PG_SUPPORT_GFX_SMG | + AMDGPU_PG_SUPPORT_GFX_DMG | + AMDGPU_PG_SUPPORT_CP | + AMDGPU_PG_SUPPORT_GDS | + AMDGPU_PG_SUPPORT_RLC_SMU_HS)) { + gfx_v7_0_enable_sclk_slowdown_on_pu(adev, true); + gfx_v7_0_enable_sclk_slowdown_on_pd(adev, true); + if (adev->pg_flags & AMDGPU_PG_SUPPORT_GFX_PG) { + gfx_v7_0_init_gfx_cgpg(adev); + gfx_v7_0_enable_cp_pg(adev, true); + gfx_v7_0_enable_gds_pg(adev, true); + } + gfx_v7_0_init_ao_cu_mask(adev); + gfx_v7_0_update_gfx_pg(adev, true); + } +} + +static void gfx_v7_0_fini_pg(struct amdgpu_device *adev) +{ + if (adev->pg_flags & (AMDGPU_PG_SUPPORT_GFX_PG | + AMDGPU_PG_SUPPORT_GFX_SMG | + AMDGPU_PG_SUPPORT_GFX_DMG | + AMDGPU_PG_SUPPORT_CP | + AMDGPU_PG_SUPPORT_GDS | + AMDGPU_PG_SUPPORT_RLC_SMU_HS)) { + gfx_v7_0_update_gfx_pg(adev, false); + if (adev->pg_flags & AMDGPU_PG_SUPPORT_GFX_PG) { + gfx_v7_0_enable_cp_pg(adev, false); + gfx_v7_0_enable_gds_pg(adev, false); + } + } +} + +/** + * gfx_v7_0_get_gpu_clock_counter - return GPU clock counter snapshot + * + * @adev: amdgpu_device pointer + * + * Fetches a GPU clock counter snapshot (SI). + * Returns the 64 bit clock counter snapshot. + */ +uint64_t gfx_v7_0_get_gpu_clock_counter(struct amdgpu_device *adev) +{ + uint64_t clock; + + mutex_lock(&adev->gfx.gpu_clock_mutex); + WREG32(mmRLC_CAPTURE_GPU_CLOCK_COUNT, 1); + clock = (uint64_t)RREG32(mmRLC_GPU_CLOCK_COUNT_LSB) | + ((uint64_t)RREG32(mmRLC_GPU_CLOCK_COUNT_MSB) << 32ULL); + mutex_unlock(&adev->gfx.gpu_clock_mutex); + return clock; +} + +static void gfx_v7_0_ring_emit_gds_switch(struct amdgpu_ring *ring, + uint32_t vmid, + uint32_t gds_base, uint32_t gds_size, + uint32_t gws_base, uint32_t gws_size, + uint32_t oa_base, uint32_t oa_size) +{ + gds_base = gds_base >> AMDGPU_GDS_SHIFT; + gds_size = gds_size >> AMDGPU_GDS_SHIFT; + + gws_base = gws_base >> AMDGPU_GWS_SHIFT; + gws_size = gws_size >> AMDGPU_GWS_SHIFT; + + oa_base = oa_base >> AMDGPU_OA_SHIFT; + oa_size = oa_size >> AMDGPU_OA_SHIFT; + + /* GDS Base */ + amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); + amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) | + WRITE_DATA_DST_SEL(0))); + amdgpu_ring_write(ring, amdgpu_gds_reg_offset[vmid].mem_base); + amdgpu_ring_write(ring, 0); + amdgpu_ring_write(ring, gds_base); + + /* GDS Size */ + amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); + amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) | + WRITE_DATA_DST_SEL(0))); + amdgpu_ring_write(ring, amdgpu_gds_reg_offset[vmid].mem_size); + amdgpu_ring_write(ring, 0); + amdgpu_ring_write(ring, gds_size); + + /* GWS */ + amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); + amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) | + WRITE_DATA_DST_SEL(0))); + amdgpu_ring_write(ring, amdgpu_gds_reg_offset[vmid].gws); + amdgpu_ring_write(ring, 0); + amdgpu_ring_write(ring, gws_size << GDS_GWS_VMID0__SIZE__SHIFT | gws_base); + + /* OA */ + amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); + amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) | + WRITE_DATA_DST_SEL(0))); + amdgpu_ring_write(ring, amdgpu_gds_reg_offset[vmid].oa); + amdgpu_ring_write(ring, 0); + amdgpu_ring_write(ring, (1 << (oa_size + oa_base)) - (1 << oa_base)); +} + +static int gfx_v7_0_early_init(struct amdgpu_device *adev) +{ + + adev->gfx.num_gfx_rings = GFX7_NUM_GFX_RINGS; + adev->gfx.num_compute_rings = GFX7_NUM_COMPUTE_RINGS; + gfx_v7_0_set_ring_funcs(adev); + gfx_v7_0_set_irq_funcs(adev); + gfx_v7_0_set_gds_init(adev); + + return 0; +} + +static int gfx_v7_0_sw_init(struct amdgpu_device *adev) +{ + struct amdgpu_ring *ring; + int i, r; + + /* EOP Event */ + r = amdgpu_irq_add_id(adev, 181, &adev->gfx.eop_irq); + if (r) + return r; + + /* Privileged reg */ + r = amdgpu_irq_add_id(adev, 184, &adev->gfx.priv_reg_irq); + if (r) + return r; + + /* Privileged inst */ + r = amdgpu_irq_add_id(adev, 185, &adev->gfx.priv_inst_irq); + if (r) + return r; + + gfx_v7_0_scratch_init(adev); + + r = gfx_v7_0_init_microcode(adev); + if (r) { + DRM_ERROR("Failed to load gfx firmware!\n"); + return r; + } + + r = gfx_v7_0_rlc_init(adev); + if (r) { + DRM_ERROR("Failed to init rlc BOs!\n"); + return r; + } + + /* allocate mec buffers */ + r = gfx_v7_0_mec_init(adev); + if (r) { + DRM_ERROR("Failed to init MEC BOs!\n"); + return r; + } + + r = amdgpu_wb_get(adev, &adev->gfx.ce_sync_offs); + if (r) { + DRM_ERROR("(%d) gfx.ce_sync_offs wb alloc failed\n", r); + return r; + } + + for (i = 0; i < adev->gfx.num_gfx_rings; i++) { + ring = &adev->gfx.gfx_ring[i]; + ring->ring_obj = NULL; + sprintf(ring->name, "gfx"); + r = amdgpu_ring_init(adev, ring, 1024 * 1024, + PACKET3(PACKET3_NOP, 0x3FFF), 0xf, + &adev->gfx.eop_irq, AMDGPU_CP_IRQ_GFX_EOP, + AMDGPU_RING_TYPE_GFX); + if (r) + return r; + } + + /* set up the compute queues */ + for (i = 0; i < adev->gfx.num_compute_rings; i++) { + unsigned irq_type; + + /* max 32 queues per MEC */ + if ((i >= 32) || (i >= AMDGPU_MAX_COMPUTE_RINGS)) { + DRM_ERROR("Too many (%d) compute rings!\n", i); + break; + } + ring = &adev->gfx.compute_ring[i]; + ring->ring_obj = NULL; + ring->use_doorbell = true; + ring->doorbell_index = AMDGPU_DOORBELL_MEC_RING0 + i; + ring->me = 1; /* first MEC */ + ring->pipe = i / 8; + ring->queue = i % 8; + sprintf(ring->name, "comp %d.%d.%d", ring->me, ring->pipe, ring->queue); + irq_type = AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP + ring->pipe; + /* type-2 packets are deprecated on MEC, use type-3 instead */ + r = amdgpu_ring_init(adev, ring, 1024 * 1024, + PACKET3(PACKET3_NOP, 0x3FFF), 0xf, + &adev->gfx.eop_irq, irq_type, + AMDGPU_RING_TYPE_COMPUTE); + if (r) + return r; + } + + /* reserve GDS, GWS and OA resource for gfx */ + r = amdgpu_bo_create(adev, adev->gds.mem.gfx_partition_size, + PAGE_SIZE, true, + AMDGPU_GEM_DOMAIN_GDS, 0, + NULL, &adev->gds.gds_gfx_bo); + if (r) + return r; + + r = amdgpu_bo_create(adev, adev->gds.gws.gfx_partition_size, + PAGE_SIZE, true, + AMDGPU_GEM_DOMAIN_GWS, 0, + NULL, &adev->gds.gws_gfx_bo); + if (r) + return r; + + r = amdgpu_bo_create(adev, adev->gds.oa.gfx_partition_size, + PAGE_SIZE, true, + AMDGPU_GEM_DOMAIN_OA, 0, + NULL, &adev->gds.oa_gfx_bo); + if (r) + return r; + + return r; +} + +static int gfx_v7_0_sw_fini(struct amdgpu_device *adev) +{ + int i; + + amdgpu_bo_unref(&adev->gds.oa_gfx_bo); + amdgpu_bo_unref(&adev->gds.gws_gfx_bo); + amdgpu_bo_unref(&adev->gds.gds_gfx_bo); + + for (i = 0; i < adev->gfx.num_gfx_rings; i++) + amdgpu_ring_fini(&adev->gfx.gfx_ring[i]); + for (i = 0; i < adev->gfx.num_compute_rings; i++) + amdgpu_ring_fini(&adev->gfx.compute_ring[i]); + + amdgpu_wb_free(adev, adev->gfx.ce_sync_offs); + + gfx_v7_0_cp_compute_fini(adev); + gfx_v7_0_rlc_fini(adev); + gfx_v7_0_mec_fini(adev); + + return 0; +} + +static int gfx_v7_0_hw_init(struct amdgpu_device *adev) +{ + int r; + + gfx_v7_0_gpu_init(adev); + + /* init rlc */ + r = gfx_v7_0_rlc_resume(adev); + if (r) + return r; + + r = gfx_v7_0_cp_resume(adev); + if (r) + return r; + + return r; +} + +static int gfx_v7_0_hw_fini(struct amdgpu_device *adev) +{ + gfx_v7_0_cp_enable(adev, false); + gfx_v7_0_rlc_stop(adev); + gfx_v7_0_fini_pg(adev); + + return 0; +} + +static int gfx_v7_0_suspend(struct amdgpu_device *adev) +{ + return gfx_v7_0_hw_fini(adev); +} + +static int gfx_v7_0_resume(struct amdgpu_device *adev) +{ + return gfx_v7_0_hw_init(adev); +} + +static bool gfx_v7_0_is_idle(struct amdgpu_device *adev) +{ + if (RREG32(mmGRBM_STATUS) & GRBM_STATUS__GUI_ACTIVE_MASK) + return false; + else + return true; +} + +static int gfx_v7_0_wait_for_idle(struct amdgpu_device *adev) +{ + unsigned i; + u32 tmp; + + for (i = 0; i < adev->usec_timeout; i++) { + /* read MC_STATUS */ + tmp = RREG32(mmGRBM_STATUS) & GRBM_STATUS__GUI_ACTIVE_MASK; + + if (!tmp) + return 0; + udelay(1); + } + return -ETIMEDOUT; +} + +static void gfx_v7_0_print_status(struct amdgpu_device *adev) +{ + int i; + + dev_info(adev->dev, "GFX 7.x registers\n"); + dev_info(adev->dev, " GRBM_STATUS=0x%08X\n", + RREG32(mmGRBM_STATUS)); + dev_info(adev->dev, " GRBM_STATUS2=0x%08X\n", + RREG32(mmGRBM_STATUS2)); + dev_info(adev->dev, " GRBM_STATUS_SE0=0x%08X\n", + RREG32(mmGRBM_STATUS_SE0)); + dev_info(adev->dev, " GRBM_STATUS_SE1=0x%08X\n", + RREG32(mmGRBM_STATUS_SE1)); + dev_info(adev->dev, " GRBM_STATUS_SE2=0x%08X\n", + RREG32(mmGRBM_STATUS_SE2)); + dev_info(adev->dev, " GRBM_STATUS_SE3=0x%08X\n", + RREG32(mmGRBM_STATUS_SE3)); + dev_info(adev->dev, " CP_STAT = 0x%08x\n", RREG32(mmCP_STAT)); + dev_info(adev->dev, " CP_STALLED_STAT1 = 0x%08x\n", + RREG32(mmCP_STALLED_STAT1)); + dev_info(adev->dev, " CP_STALLED_STAT2 = 0x%08x\n", + RREG32(mmCP_STALLED_STAT2)); + dev_info(adev->dev, " CP_STALLED_STAT3 = 0x%08x\n", + RREG32(mmCP_STALLED_STAT3)); + dev_info(adev->dev, " CP_CPF_BUSY_STAT = 0x%08x\n", + RREG32(mmCP_CPF_BUSY_STAT)); + dev_info(adev->dev, " CP_CPF_STALLED_STAT1 = 0x%08x\n", + RREG32(mmCP_CPF_STALLED_STAT1)); + dev_info(adev->dev, " CP_CPF_STATUS = 0x%08x\n", RREG32(mmCP_CPF_STATUS)); + dev_info(adev->dev, " CP_CPC_BUSY_STAT = 0x%08x\n", RREG32(mmCP_CPC_BUSY_STAT)); + dev_info(adev->dev, " CP_CPC_STALLED_STAT1 = 0x%08x\n", + RREG32(mmCP_CPC_STALLED_STAT1)); + dev_info(adev->dev, " CP_CPC_STATUS = 0x%08x\n", RREG32(mmCP_CPC_STATUS)); + + for (i = 0; i < 32; i++) { + dev_info(adev->dev, " GB_TILE_MODE%d=0x%08X\n", + i, RREG32(mmGB_TILE_MODE0 + (i * 4))); + } + for (i = 0; i < 16; i++) { + dev_info(adev->dev, " GB_MACROTILE_MODE%d=0x%08X\n", + i, RREG32(mmGB_MACROTILE_MODE0 + (i * 4))); + } + for (i = 0; i < adev->gfx.config.max_shader_engines; i++) { + dev_info(adev->dev, " se: %d\n", i); + gfx_v7_0_select_se_sh(adev, i, 0xffffffff); + dev_info(adev->dev, " PA_SC_RASTER_CONFIG=0x%08X\n", + RREG32(mmPA_SC_RASTER_CONFIG)); + dev_info(adev->dev, " PA_SC_RASTER_CONFIG_1=0x%08X\n", + RREG32(mmPA_SC_RASTER_CONFIG_1)); + } + gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff); + + dev_info(adev->dev, " GB_ADDR_CONFIG=0x%08X\n", + RREG32(mmGB_ADDR_CONFIG)); + dev_info(adev->dev, " HDP_ADDR_CONFIG=0x%08X\n", + RREG32(mmHDP_ADDR_CONFIG)); + dev_info(adev->dev, " DMIF_ADDR_CALC=0x%08X\n", + RREG32(mmDMIF_ADDR_CALC)); + dev_info(adev->dev, " SDMA0_TILING_CONFIG=0x%08X\n", + RREG32(mmSDMA0_TILING_CONFIG + SDMA0_REGISTER_OFFSET)); + dev_info(adev->dev, " SDMA1_TILING_CONFIG=0x%08X\n", + RREG32(mmSDMA0_TILING_CONFIG + SDMA1_REGISTER_OFFSET)); + dev_info(adev->dev, " UVD_UDEC_ADDR_CONFIG=0x%08X\n", + RREG32(mmUVD_UDEC_ADDR_CONFIG)); + dev_info(adev->dev, " UVD_UDEC_DB_ADDR_CONFIG=0x%08X\n", + RREG32(mmUVD_UDEC_DB_ADDR_CONFIG)); + dev_info(adev->dev, " UVD_UDEC_DBW_ADDR_CONFIG=0x%08X\n", + RREG32(mmUVD_UDEC_DBW_ADDR_CONFIG)); + + dev_info(adev->dev, " CP_MEQ_THRESHOLDS=0x%08X\n", + RREG32(mmCP_MEQ_THRESHOLDS)); + dev_info(adev->dev, " SX_DEBUG_1=0x%08X\n", + RREG32(mmSX_DEBUG_1)); + dev_info(adev->dev, " TA_CNTL_AUX=0x%08X\n", + RREG32(mmTA_CNTL_AUX)); + dev_info(adev->dev, " SPI_CONFIG_CNTL=0x%08X\n", + RREG32(mmSPI_CONFIG_CNTL)); + dev_info(adev->dev, " SQ_CONFIG=0x%08X\n", + RREG32(mmSQ_CONFIG)); + dev_info(adev->dev, " DB_DEBUG=0x%08X\n", + RREG32(mmDB_DEBUG)); + dev_info(adev->dev, " DB_DEBUG2=0x%08X\n", + RREG32(mmDB_DEBUG2)); + dev_info(adev->dev, " DB_DEBUG3=0x%08X\n", + RREG32(mmDB_DEBUG3)); + dev_info(adev->dev, " CB_HW_CONTROL=0x%08X\n", + RREG32(mmCB_HW_CONTROL)); + dev_info(adev->dev, " SPI_CONFIG_CNTL_1=0x%08X\n", + RREG32(mmSPI_CONFIG_CNTL_1)); + dev_info(adev->dev, " PA_SC_FIFO_SIZE=0x%08X\n", + RREG32(mmPA_SC_FIFO_SIZE)); + dev_info(adev->dev, " VGT_NUM_INSTANCES=0x%08X\n", + RREG32(mmVGT_NUM_INSTANCES)); + dev_info(adev->dev, " CP_PERFMON_CNTL=0x%08X\n", + RREG32(mmCP_PERFMON_CNTL)); + dev_info(adev->dev, " PA_SC_FORCE_EOV_MAX_CNTS=0x%08X\n", + RREG32(mmPA_SC_FORCE_EOV_MAX_CNTS)); + dev_info(adev->dev, " VGT_CACHE_INVALIDATION=0x%08X\n", + RREG32(mmVGT_CACHE_INVALIDATION)); + dev_info(adev->dev, " VGT_GS_VERTEX_REUSE=0x%08X\n", + RREG32(mmVGT_GS_VERTEX_REUSE)); + dev_info(adev->dev, " PA_SC_LINE_STIPPLE_STATE=0x%08X\n", + RREG32(mmPA_SC_LINE_STIPPLE_STATE)); + dev_info(adev->dev, " PA_CL_ENHANCE=0x%08X\n", + RREG32(mmPA_CL_ENHANCE)); + dev_info(adev->dev, " PA_SC_ENHANCE=0x%08X\n", + RREG32(mmPA_SC_ENHANCE)); + + dev_info(adev->dev, " CP_ME_CNTL=0x%08X\n", + RREG32(mmCP_ME_CNTL)); + dev_info(adev->dev, " CP_MAX_CONTEXT=0x%08X\n", + RREG32(mmCP_MAX_CONTEXT)); + dev_info(adev->dev, " CP_ENDIAN_SWAP=0x%08X\n", + RREG32(mmCP_ENDIAN_SWAP)); + dev_info(adev->dev, " CP_DEVICE_ID=0x%08X\n", + RREG32(mmCP_DEVICE_ID)); + + dev_info(adev->dev, " CP_SEM_WAIT_TIMER=0x%08X\n", + RREG32(mmCP_SEM_WAIT_TIMER)); + if (adev->asic_type != CHIP_HAWAII) + dev_info(adev->dev, " CP_SEM_INCOMPLETE_TIMER_CNTL=0x%08X\n", + RREG32(mmCP_SEM_INCOMPLETE_TIMER_CNTL)); + + dev_info(adev->dev, " CP_RB_WPTR_DELAY=0x%08X\n", + RREG32(mmCP_RB_WPTR_DELAY)); + dev_info(adev->dev, " CP_RB_VMID=0x%08X\n", + RREG32(mmCP_RB_VMID)); + dev_info(adev->dev, " CP_RB0_CNTL=0x%08X\n", + RREG32(mmCP_RB0_CNTL)); + dev_info(adev->dev, " CP_RB0_WPTR=0x%08X\n", + RREG32(mmCP_RB0_WPTR)); + dev_info(adev->dev, " CP_RB0_RPTR_ADDR=0x%08X\n", + RREG32(mmCP_RB0_RPTR_ADDR)); + dev_info(adev->dev, " CP_RB0_RPTR_ADDR_HI=0x%08X\n", + RREG32(mmCP_RB0_RPTR_ADDR_HI)); + dev_info(adev->dev, " CP_RB0_CNTL=0x%08X\n", + RREG32(mmCP_RB0_CNTL)); + dev_info(adev->dev, " CP_RB0_BASE=0x%08X\n", + RREG32(mmCP_RB0_BASE)); + dev_info(adev->dev, " CP_RB0_BASE_HI=0x%08X\n", + RREG32(mmCP_RB0_BASE_HI)); + dev_info(adev->dev, " CP_MEC_CNTL=0x%08X\n", + RREG32(mmCP_MEC_CNTL)); + dev_info(adev->dev, " CP_CPF_DEBUG=0x%08X\n", + RREG32(mmCP_CPF_DEBUG)); + + dev_info(adev->dev, " SCRATCH_ADDR=0x%08X\n", + RREG32(mmSCRATCH_ADDR)); + dev_info(adev->dev, " SCRATCH_UMSK=0x%08X\n", + RREG32(mmSCRATCH_UMSK)); + + /* init the pipes */ + mutex_lock(&adev->srbm_mutex); + for (i = 0; i < (adev->gfx.mec.num_pipe * adev->gfx.mec.num_mec); i++) { + int me = (i < 4) ? 1 : 2; + int pipe = (i < 4) ? i : (i - 4); + int queue; + + dev_info(adev->dev, " me: %d, pipe: %d\n", me, pipe); + cik_srbm_select(adev, me, pipe, 0, 0); + dev_info(adev->dev, " CP_HPD_EOP_BASE_ADDR=0x%08X\n", + RREG32(mmCP_HPD_EOP_BASE_ADDR)); + dev_info(adev->dev, " CP_HPD_EOP_BASE_ADDR_HI=0x%08X\n", + RREG32(mmCP_HPD_EOP_BASE_ADDR_HI)); + dev_info(adev->dev, " CP_HPD_EOP_VMID=0x%08X\n", + RREG32(mmCP_HPD_EOP_VMID)); + dev_info(adev->dev, " CP_HPD_EOP_CONTROL=0x%08X\n", + RREG32(mmCP_HPD_EOP_CONTROL)); + + for (queue = 0; queue < 8; i++) { + cik_srbm_select(adev, me, pipe, queue, 0); + dev_info(adev->dev, " queue: %d\n", queue); + dev_info(adev->dev, " CP_PQ_WPTR_POLL_CNTL=0x%08X\n", + RREG32(mmCP_PQ_WPTR_POLL_CNTL)); + dev_info(adev->dev, " CP_HQD_PQ_DOORBELL_CONTROL=0x%08X\n", + RREG32(mmCP_HQD_PQ_DOORBELL_CONTROL)); + dev_info(adev->dev, " CP_HQD_ACTIVE=0x%08X\n", + RREG32(mmCP_HQD_ACTIVE)); + dev_info(adev->dev, " CP_HQD_DEQUEUE_REQUEST=0x%08X\n", + RREG32(mmCP_HQD_DEQUEUE_REQUEST)); + dev_info(adev->dev, " CP_HQD_PQ_RPTR=0x%08X\n", + RREG32(mmCP_HQD_PQ_RPTR)); + dev_info(adev->dev, " CP_HQD_PQ_WPTR=0x%08X\n", + RREG32(mmCP_HQD_PQ_WPTR)); + dev_info(adev->dev, " CP_HQD_PQ_BASE=0x%08X\n", + RREG32(mmCP_HQD_PQ_BASE)); + dev_info(adev->dev, " CP_HQD_PQ_BASE_HI=0x%08X\n", + RREG32(mmCP_HQD_PQ_BASE_HI)); + dev_info(adev->dev, " CP_HQD_PQ_CONTROL=0x%08X\n", + RREG32(mmCP_HQD_PQ_CONTROL)); + dev_info(adev->dev, " CP_HQD_PQ_WPTR_POLL_ADDR=0x%08X\n", + RREG32(mmCP_HQD_PQ_WPTR_POLL_ADDR)); + dev_info(adev->dev, " CP_HQD_PQ_WPTR_POLL_ADDR_HI=0x%08X\n", + RREG32(mmCP_HQD_PQ_WPTR_POLL_ADDR_HI)); + dev_info(adev->dev, " CP_HQD_PQ_RPTR_REPORT_ADDR=0x%08X\n", + RREG32(mmCP_HQD_PQ_RPTR_REPORT_ADDR)); + dev_info(adev->dev, " CP_HQD_PQ_RPTR_REPORT_ADDR_HI=0x%08X\n", + RREG32(mmCP_HQD_PQ_RPTR_REPORT_ADDR_HI)); + dev_info(adev->dev, " CP_HQD_PQ_DOORBELL_CONTROL=0x%08X\n", + RREG32(mmCP_HQD_PQ_DOORBELL_CONTROL)); + dev_info(adev->dev, " CP_HQD_PQ_WPTR=0x%08X\n", + RREG32(mmCP_HQD_PQ_WPTR)); + dev_info(adev->dev, " CP_HQD_VMID=0x%08X\n", + RREG32(mmCP_HQD_VMID)); + dev_info(adev->dev, " CP_MQD_BASE_ADDR=0x%08X\n", + RREG32(mmCP_MQD_BASE_ADDR)); + dev_info(adev->dev, " CP_MQD_BASE_ADDR_HI=0x%08X\n", + RREG32(mmCP_MQD_BASE_ADDR_HI)); + dev_info(adev->dev, " CP_MQD_CONTROL=0x%08X\n", + RREG32(mmCP_MQD_CONTROL)); + } + } + cik_srbm_select(adev, 0, 0, 0, 0); + mutex_unlock(&adev->srbm_mutex); + + dev_info(adev->dev, " CP_INT_CNTL_RING0=0x%08X\n", + RREG32(mmCP_INT_CNTL_RING0)); + dev_info(adev->dev, " RLC_LB_CNTL=0x%08X\n", + RREG32(mmRLC_LB_CNTL)); + dev_info(adev->dev, " RLC_CNTL=0x%08X\n", + RREG32(mmRLC_CNTL)); + dev_info(adev->dev, " RLC_CGCG_CGLS_CTRL=0x%08X\n", + RREG32(mmRLC_CGCG_CGLS_CTRL)); + dev_info(adev->dev, " RLC_LB_CNTR_INIT=0x%08X\n", + RREG32(mmRLC_LB_CNTR_INIT)); + dev_info(adev->dev, " RLC_LB_CNTR_MAX=0x%08X\n", + RREG32(mmRLC_LB_CNTR_MAX)); + dev_info(adev->dev, " RLC_LB_INIT_CU_MASK=0x%08X\n", + RREG32(mmRLC_LB_INIT_CU_MASK)); + dev_info(adev->dev, " RLC_LB_PARAMS=0x%08X\n", + RREG32(mmRLC_LB_PARAMS)); + dev_info(adev->dev, " RLC_LB_CNTL=0x%08X\n", + RREG32(mmRLC_LB_CNTL)); + dev_info(adev->dev, " RLC_MC_CNTL=0x%08X\n", + RREG32(mmRLC_MC_CNTL)); + dev_info(adev->dev, " RLC_UCODE_CNTL=0x%08X\n", + RREG32(mmRLC_UCODE_CNTL)); + + if (adev->asic_type == CHIP_BONAIRE) + dev_info(adev->dev, " RLC_DRIVER_CPDMA_STATUS=0x%08X\n", + RREG32(mmRLC_DRIVER_CPDMA_STATUS)); + + mutex_lock(&adev->srbm_mutex); + for (i = 0; i < 16; i++) { + cik_srbm_select(adev, 0, 0, 0, i); + dev_info(adev->dev, " VM %d:\n", i); + dev_info(adev->dev, " SH_MEM_CONFIG=0x%08X\n", + RREG32(mmSH_MEM_CONFIG)); + dev_info(adev->dev, " SH_MEM_APE1_BASE=0x%08X\n", + RREG32(mmSH_MEM_APE1_BASE)); + dev_info(adev->dev, " SH_MEM_APE1_LIMIT=0x%08X\n", + RREG32(mmSH_MEM_APE1_LIMIT)); + dev_info(adev->dev, " SH_MEM_BASES=0x%08X\n", + RREG32(mmSH_MEM_BASES)); + } + cik_srbm_select(adev, 0, 0, 0, 0); + mutex_unlock(&adev->srbm_mutex); +} + +static int gfx_v7_0_soft_reset(struct amdgpu_device *adev) +{ + u32 grbm_soft_reset = 0, srbm_soft_reset = 0; + u32 tmp; + + /* GRBM_STATUS */ + tmp = RREG32(mmGRBM_STATUS); + if (tmp & (GRBM_STATUS__PA_BUSY_MASK | GRBM_STATUS__SC_BUSY_MASK | + GRBM_STATUS__BCI_BUSY_MASK | GRBM_STATUS__SX_BUSY_MASK | + GRBM_STATUS__TA_BUSY_MASK | GRBM_STATUS__VGT_BUSY_MASK | + GRBM_STATUS__DB_BUSY_MASK | GRBM_STATUS__CB_BUSY_MASK | + GRBM_STATUS__GDS_BUSY_MASK | GRBM_STATUS__SPI_BUSY_MASK | + GRBM_STATUS__IA_BUSY_MASK | GRBM_STATUS__IA_BUSY_NO_DMA_MASK)) + grbm_soft_reset |= GRBM_SOFT_RESET__SOFT_RESET_CP_MASK | + GRBM_SOFT_RESET__SOFT_RESET_GFX_MASK; + + if (tmp & (GRBM_STATUS__CP_BUSY_MASK | GRBM_STATUS__CP_COHERENCY_BUSY_MASK)) { + grbm_soft_reset |= GRBM_SOFT_RESET__SOFT_RESET_CP_MASK; + srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_GRBM_MASK; + } + + /* GRBM_STATUS2 */ + tmp = RREG32(mmGRBM_STATUS2); + if (tmp & GRBM_STATUS2__RLC_BUSY_MASK) + grbm_soft_reset |= GRBM_SOFT_RESET__SOFT_RESET_RLC_MASK; + + /* SRBM_STATUS */ + tmp = RREG32(mmSRBM_STATUS); + if (tmp & SRBM_STATUS__GRBM_RQ_PENDING_MASK) + srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_GRBM_MASK; + + if (grbm_soft_reset || srbm_soft_reset) { + gfx_v7_0_print_status(adev); + /* disable CG/PG */ + gfx_v7_0_fini_pg(adev); + gfx_v7_0_update_cg(adev, false); + + /* stop the rlc */ + gfx_v7_0_rlc_stop(adev); + + /* Disable GFX parsing/prefetching */ + WREG32(mmCP_ME_CNTL, CP_ME_CNTL__ME_HALT_MASK | CP_ME_CNTL__PFP_HALT_MASK | CP_ME_CNTL__CE_HALT_MASK); + + /* Disable MEC parsing/prefetching */ + WREG32(mmCP_MEC_CNTL, CP_MEC_CNTL__MEC_ME1_HALT_MASK | CP_MEC_CNTL__MEC_ME2_HALT_MASK); + + if (grbm_soft_reset) { + tmp = RREG32(mmGRBM_SOFT_RESET); + tmp |= grbm_soft_reset; + dev_info(adev->dev, "GRBM_SOFT_RESET=0x%08X\n", tmp); + WREG32(mmGRBM_SOFT_RESET, tmp); + tmp = RREG32(mmGRBM_SOFT_RESET); + + udelay(50); + + tmp &= ~grbm_soft_reset; + WREG32(mmGRBM_SOFT_RESET, tmp); + tmp = RREG32(mmGRBM_SOFT_RESET); + } + + if (srbm_soft_reset) { + tmp = RREG32(mmSRBM_SOFT_RESET); + tmp |= srbm_soft_reset; + dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp); + WREG32(mmSRBM_SOFT_RESET, tmp); + tmp = RREG32(mmSRBM_SOFT_RESET); + + udelay(50); + + tmp &= ~srbm_soft_reset; + WREG32(mmSRBM_SOFT_RESET, tmp); + tmp = RREG32(mmSRBM_SOFT_RESET); + } + /* Wait a little for things to settle down */ + udelay(50); + gfx_v7_0_print_status(adev); + } + return 0; +} + +static void gfx_v7_0_set_gfx_eop_interrupt_state(struct amdgpu_device *adev, + enum amdgpu_interrupt_state state) +{ + u32 cp_int_cntl; + + switch (state) { + case AMDGPU_IRQ_STATE_DISABLE: + cp_int_cntl = RREG32(mmCP_INT_CNTL_RING0); + cp_int_cntl &= ~CP_INT_CNTL_RING0__TIME_STAMP_INT_ENABLE_MASK; + WREG32(mmCP_INT_CNTL_RING0, cp_int_cntl); + break; + case AMDGPU_IRQ_STATE_ENABLE: + cp_int_cntl = RREG32(mmCP_INT_CNTL_RING0); + cp_int_cntl |= CP_INT_CNTL_RING0__TIME_STAMP_INT_ENABLE_MASK; + WREG32(mmCP_INT_CNTL_RING0, cp_int_cntl); + break; + default: + break; + } +} + +static void gfx_v7_0_set_compute_eop_interrupt_state(struct amdgpu_device *adev, + int me, int pipe, + enum amdgpu_interrupt_state state) +{ + u32 mec_int_cntl, mec_int_cntl_reg; + + /* + * amdgpu controls only pipe 0 of MEC1. That's why this function only + * handles the setting of interrupts for this specific pipe. All other + * pipes' interrupts are set by amdkfd. + */ + + if (me == 1) { + switch (pipe) { + case 0: + mec_int_cntl_reg = mmCP_ME1_PIPE0_INT_CNTL; + break; + default: + DRM_DEBUG("invalid pipe %d\n", pipe); + return; + } + } else { + DRM_DEBUG("invalid me %d\n", me); + return; + } + + switch (state) { + case AMDGPU_IRQ_STATE_DISABLE: + mec_int_cntl = RREG32(mec_int_cntl_reg); + mec_int_cntl &= ~CP_INT_CNTL_RING0__TIME_STAMP_INT_ENABLE_MASK; + WREG32(mec_int_cntl_reg, mec_int_cntl); + break; + case AMDGPU_IRQ_STATE_ENABLE: + mec_int_cntl = RREG32(mec_int_cntl_reg); + mec_int_cntl |= CP_INT_CNTL_RING0__TIME_STAMP_INT_ENABLE_MASK; + WREG32(mec_int_cntl_reg, mec_int_cntl); + break; + default: + break; + } +} + +static int gfx_v7_0_set_priv_reg_fault_state(struct amdgpu_device *adev, + struct amdgpu_irq_src *src, + unsigned type, + enum amdgpu_interrupt_state state) +{ + u32 cp_int_cntl; + + switch (state) { + case AMDGPU_IRQ_STATE_DISABLE: + cp_int_cntl = RREG32(mmCP_INT_CNTL_RING0); + cp_int_cntl &= ~CP_INT_CNTL_RING0__PRIV_REG_INT_ENABLE_MASK; + WREG32(mmCP_INT_CNTL_RING0, cp_int_cntl); + break; + case AMDGPU_IRQ_STATE_ENABLE: + cp_int_cntl = RREG32(mmCP_INT_CNTL_RING0); + cp_int_cntl |= CP_INT_CNTL_RING0__PRIV_REG_INT_ENABLE_MASK; + WREG32(mmCP_INT_CNTL_RING0, cp_int_cntl); + break; + default: + break; + } + + return 0; +} + +static int gfx_v7_0_set_priv_inst_fault_state(struct amdgpu_device *adev, + struct amdgpu_irq_src *src, + unsigned type, + enum amdgpu_interrupt_state state) +{ + u32 cp_int_cntl; + + switch (state) { + case AMDGPU_IRQ_STATE_DISABLE: + cp_int_cntl = RREG32(mmCP_INT_CNTL_RING0); + cp_int_cntl &= ~CP_INT_CNTL_RING0__PRIV_INSTR_INT_ENABLE_MASK; + WREG32(mmCP_INT_CNTL_RING0, cp_int_cntl); + break; + case AMDGPU_IRQ_STATE_ENABLE: + cp_int_cntl = RREG32(mmCP_INT_CNTL_RING0); + cp_int_cntl |= CP_INT_CNTL_RING0__PRIV_INSTR_INT_ENABLE_MASK; + WREG32(mmCP_INT_CNTL_RING0, cp_int_cntl); + break; + default: + break; + } + + return 0; +} + +static int gfx_v7_0_set_eop_interrupt_state(struct amdgpu_device *adev, + struct amdgpu_irq_src *src, + unsigned type, + enum amdgpu_interrupt_state state) +{ + switch (type) { + case AMDGPU_CP_IRQ_GFX_EOP: + gfx_v7_0_set_gfx_eop_interrupt_state(adev, state); + break; + case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP: + gfx_v7_0_set_compute_eop_interrupt_state(adev, 1, 0, state); + break; + case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE1_EOP: + gfx_v7_0_set_compute_eop_interrupt_state(adev, 1, 1, state); + break; + case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE2_EOP: + gfx_v7_0_set_compute_eop_interrupt_state(adev, 1, 2, state); + break; + case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE3_EOP: + gfx_v7_0_set_compute_eop_interrupt_state(adev, 1, 3, state); + break; + case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE0_EOP: + gfx_v7_0_set_compute_eop_interrupt_state(adev, 2, 0, state); + break; + case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE1_EOP: + gfx_v7_0_set_compute_eop_interrupt_state(adev, 2, 1, state); + break; + case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE2_EOP: + gfx_v7_0_set_compute_eop_interrupt_state(adev, 2, 2, state); + break; + case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE3_EOP: + gfx_v7_0_set_compute_eop_interrupt_state(adev, 2, 3, state); + break; + default: + break; + } + return 0; +} + +static int gfx_v7_0_eop_irq(struct amdgpu_device *adev, + struct amdgpu_irq_src *source, + struct amdgpu_iv_entry *entry) +{ + u8 me_id, pipe_id; + struct amdgpu_ring *ring; + int i; + + DRM_DEBUG("IH: CP EOP\n"); + me_id = (entry->ring_id & 0x0c) >> 2; + pipe_id = (entry->ring_id & 0x03) >> 0; + switch (me_id) { + case 0: + amdgpu_fence_process(&adev->gfx.gfx_ring[0]); + break; + case 1: + case 2: + for (i = 0; i < adev->gfx.num_compute_rings; i++) { + ring = &adev->gfx.compute_ring[i]; + if ((ring->me == me_id) & (ring->pipe == pipe_id)) + amdgpu_fence_process(ring); + } + break; + } + return 0; +} + +static int gfx_v7_0_priv_reg_irq(struct amdgpu_device *adev, + struct amdgpu_irq_src *source, + struct amdgpu_iv_entry *entry) +{ + DRM_ERROR("Illegal register access in command stream\n"); + schedule_work(&adev->reset_work); + return 0; +} + +static int gfx_v7_0_priv_inst_irq(struct amdgpu_device *adev, + struct amdgpu_irq_src *source, + struct amdgpu_iv_entry *entry) +{ + DRM_ERROR("Illegal instruction in command stream\n"); + // XXX soft reset the gfx block only + schedule_work(&adev->reset_work); + return 0; +} + +static int gfx_v7_0_set_clockgating_state(struct amdgpu_device *adev, + enum amdgpu_clockgating_state state) +{ + bool gate = false; + + if (state == AMDGPU_CG_STATE_GATE) + gate = true; + + gfx_v7_0_enable_gui_idle_interrupt(adev, false); + /* order matters! */ + if (gate) { + gfx_v7_0_enable_mgcg(adev, true); + gfx_v7_0_enable_cgcg(adev, true); + } else { + gfx_v7_0_enable_cgcg(adev, false); + gfx_v7_0_enable_mgcg(adev, false); + } + gfx_v7_0_enable_gui_idle_interrupt(adev, true); + + return 0; +} + +static int gfx_v7_0_set_powergating_state(struct amdgpu_device *adev, + enum amdgpu_powergating_state state) +{ + bool gate = false; + + if (state == AMDGPU_PG_STATE_GATE) + gate = true; + + if (adev->pg_flags & (AMDGPU_PG_SUPPORT_GFX_PG | + AMDGPU_PG_SUPPORT_GFX_SMG | + AMDGPU_PG_SUPPORT_GFX_DMG | + AMDGPU_PG_SUPPORT_CP | + AMDGPU_PG_SUPPORT_GDS | + AMDGPU_PG_SUPPORT_RLC_SMU_HS)) { + gfx_v7_0_update_gfx_pg(adev, gate); + if (adev->pg_flags & AMDGPU_PG_SUPPORT_GFX_PG) { + gfx_v7_0_enable_cp_pg(adev, gate); + gfx_v7_0_enable_gds_pg(adev, gate); + } + } + + return 0; +} + +const struct amdgpu_ip_funcs gfx_v7_0_ip_funcs = { + .early_init = gfx_v7_0_early_init, + .late_init = NULL, + .sw_init = gfx_v7_0_sw_init, + .sw_fini = gfx_v7_0_sw_fini, + .hw_init = gfx_v7_0_hw_init, + .hw_fini = gfx_v7_0_hw_fini, + .suspend = gfx_v7_0_suspend, + .resume = gfx_v7_0_resume, + .is_idle = gfx_v7_0_is_idle, + .wait_for_idle = gfx_v7_0_wait_for_idle, + .soft_reset = gfx_v7_0_soft_reset, + .print_status = gfx_v7_0_print_status, + .set_clockgating_state = gfx_v7_0_set_clockgating_state, + .set_powergating_state = gfx_v7_0_set_powergating_state, +}; + +/** + * gfx_v7_0_ring_is_lockup - check if the 3D engine is locked up + * + * @adev: amdgpu_device pointer + * @ring: amdgpu_ring structure holding ring information + * + * Check if the 3D engine is locked up (CIK). + * Returns true if the engine is locked, false if not. + */ +static bool gfx_v7_0_ring_is_lockup(struct amdgpu_ring *ring) +{ + if (gfx_v7_0_is_idle(ring->adev)) { + amdgpu_ring_lockup_update(ring); + return false; + } + return amdgpu_ring_test_lockup(ring); +} + +static const struct amdgpu_ring_funcs gfx_v7_0_ring_funcs_gfx = { + .get_rptr = gfx_v7_0_ring_get_rptr_gfx, + .get_wptr = gfx_v7_0_ring_get_wptr_gfx, + .set_wptr = gfx_v7_0_ring_set_wptr_gfx, + .parse_cs = NULL, + .emit_ib = gfx_v7_0_ring_emit_ib, + .emit_fence = gfx_v7_0_ring_emit_fence_gfx, + .emit_semaphore = gfx_v7_0_ring_emit_semaphore, + .emit_vm_flush = gfx_v7_0_ring_emit_vm_flush, + .emit_gds_switch = gfx_v7_0_ring_emit_gds_switch, + .test_ring = gfx_v7_0_ring_test_ring, + .test_ib = gfx_v7_0_ring_test_ib, + .is_lockup = gfx_v7_0_ring_is_lockup, +}; + +static const struct amdgpu_ring_funcs gfx_v7_0_ring_funcs_compute = { + .get_rptr = gfx_v7_0_ring_get_rptr_compute, + .get_wptr = gfx_v7_0_ring_get_wptr_compute, + .set_wptr = gfx_v7_0_ring_set_wptr_compute, + .parse_cs = NULL, + .emit_ib = gfx_v7_0_ring_emit_ib, + .emit_fence = gfx_v7_0_ring_emit_fence_compute, + .emit_semaphore = gfx_v7_0_ring_emit_semaphore, + .emit_vm_flush = gfx_v7_0_ring_emit_vm_flush, + .emit_gds_switch = gfx_v7_0_ring_emit_gds_switch, + .test_ring = gfx_v7_0_ring_test_ring, + .test_ib = gfx_v7_0_ring_test_ib, + .is_lockup = gfx_v7_0_ring_is_lockup, +}; + +static void gfx_v7_0_set_ring_funcs(struct amdgpu_device *adev) +{ + int i; + + for (i = 0; i < adev->gfx.num_gfx_rings; i++) + adev->gfx.gfx_ring[i].funcs = &gfx_v7_0_ring_funcs_gfx; + for (i = 0; i < adev->gfx.num_compute_rings; i++) + adev->gfx.compute_ring[i].funcs = &gfx_v7_0_ring_funcs_compute; +} + +static const struct amdgpu_irq_src_funcs gfx_v7_0_eop_irq_funcs = { + .set = gfx_v7_0_set_eop_interrupt_state, + .process = gfx_v7_0_eop_irq, +}; + +static const struct amdgpu_irq_src_funcs gfx_v7_0_priv_reg_irq_funcs = { + .set = gfx_v7_0_set_priv_reg_fault_state, + .process = gfx_v7_0_priv_reg_irq, +}; + +static const struct amdgpu_irq_src_funcs gfx_v7_0_priv_inst_irq_funcs = { + .set = gfx_v7_0_set_priv_inst_fault_state, + .process = gfx_v7_0_priv_inst_irq, +}; + +static void gfx_v7_0_set_irq_funcs(struct amdgpu_device *adev) +{ + adev->gfx.eop_irq.num_types = AMDGPU_CP_IRQ_LAST; + adev->gfx.eop_irq.funcs = &gfx_v7_0_eop_irq_funcs; + + adev->gfx.priv_reg_irq.num_types = 1; + adev->gfx.priv_reg_irq.funcs = &gfx_v7_0_priv_reg_irq_funcs; + + adev->gfx.priv_inst_irq.num_types = 1; + adev->gfx.priv_inst_irq.funcs = &gfx_v7_0_priv_inst_irq_funcs; +} + +static void gfx_v7_0_set_gds_init(struct amdgpu_device *adev) +{ + /* init asci gds info */ + adev->gds.mem.total_size = RREG32(mmGDS_VMID0_SIZE); + adev->gds.gws.total_size = 64; + adev->gds.oa.total_size = 16; + + if (adev->gds.mem.total_size == 64 * 1024) { + adev->gds.mem.gfx_partition_size = 4096; + adev->gds.mem.cs_partition_size = 4096; + + adev->gds.gws.gfx_partition_size = 4; + adev->gds.gws.cs_partition_size = 4; + + adev->gds.oa.gfx_partition_size = 4; + adev->gds.oa.cs_partition_size = 1; + } else { + adev->gds.mem.gfx_partition_size = 1024; + adev->gds.mem.cs_partition_size = 1024; + + adev->gds.gws.gfx_partition_size = 16; + adev->gds.gws.cs_partition_size = 16; + + adev->gds.oa.gfx_partition_size = 4; + adev->gds.oa.cs_partition_size = 4; + } +} + + +int gfx_v7_0_get_cu_info(struct amdgpu_device *adev, + struct amdgpu_cu_info *cu_info) +{ + int i, j, k, counter, active_cu_number = 0; + u32 mask, bitmap, ao_bitmap, ao_cu_mask = 0; + + if (!adev || !cu_info) + return -EINVAL; + + mutex_lock(&adev->grbm_idx_mutex); + for (i = 0; i < adev->gfx.config.max_shader_engines; i++) { + for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) { + mask = 1; + ao_bitmap = 0; + counter = 0; + bitmap = gfx_v7_0_get_cu_active_bitmap(adev, i, j); + cu_info->bitmap[i][j] = bitmap; + + for (k = 0; k < adev->gfx.config.max_cu_per_sh; k ++) { + if (bitmap & mask) { + if (counter < 2) + ao_bitmap |= mask; + counter ++; + } + mask <<= 1; + } + active_cu_number += counter; + ao_cu_mask |= (ao_bitmap << (i * 16 + j * 8)); + } + } + + cu_info->number = active_cu_number; + cu_info->ao_cu_mask = ao_cu_mask; + mutex_unlock(&adev->grbm_idx_mutex); + return 0; +} diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.h b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.h new file mode 100644 index 000000000000..668b91a89e1e --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.h @@ -0,0 +1,37 @@ +/* + * Copyright 2014 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef __GFX_V7_0_H__ +#define __GFX_V7_0_H__ + +extern const struct amdgpu_ip_funcs gfx_v7_0_ip_funcs; + +/* XXX these shouldn't be exported */ +void gfx_v7_0_enter_rlc_safe_mode(struct amdgpu_device *adev); +void gfx_v7_0_exit_rlc_safe_mode(struct amdgpu_device *adev); +void gfx_v7_0_rlc_stop(struct amdgpu_device *adev); +uint64_t gfx_v7_0_get_gpu_clock_counter(struct amdgpu_device *adev); +void gfx_v7_0_select_se_sh(struct amdgpu_device *adev, u32 se_num, u32 sh_num); +int gfx_v7_0_get_cu_info(struct amdgpu_device *adev, struct amdgpu_cu_info *cu_info); + +#endif diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c new file mode 100644 index 000000000000..b9b1df6bdaa4 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c @@ -0,0 +1,1307 @@ +/* + * Copyright 2014 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#include +#include "drmP.h" +#include "amdgpu.h" +#include "cikd.h" +#include "cik.h" +#include "gmc_v7_0.h" +#include "amdgpu_ucode.h" + +#include "bif/bif_4_1_d.h" +#include "bif/bif_4_1_sh_mask.h" + +#include "gmc/gmc_7_1_d.h" +#include "gmc/gmc_7_1_sh_mask.h" + +#include "oss/oss_2_0_d.h" +#include "oss/oss_2_0_sh_mask.h" + +static void gmc_v7_0_set_gart_funcs(struct amdgpu_device *adev); +static void gmc_v7_0_set_irq_funcs(struct amdgpu_device *adev); + +MODULE_FIRMWARE("radeon/boniare_mc.bin"); +MODULE_FIRMWARE("radeon/hawaii_mc.bin"); + +/** + * gmc8_mc_wait_for_idle - wait for MC idle callback. + * + * @adev: amdgpu_device pointer + * + * Wait for the MC (memory controller) to be idle. + * (evergreen+). + * Returns 0 if the MC is idle, -1 if not. + */ +int gmc_v7_0_mc_wait_for_idle(struct amdgpu_device *adev) +{ + unsigned i; + u32 tmp; + + for (i = 0; i < adev->usec_timeout; i++) { + /* read MC_STATUS */ + tmp = RREG32(mmSRBM_STATUS) & 0x1F00; + if (!tmp) + return 0; + udelay(1); + } + return -1; +} + +void gmc_v7_0_mc_stop(struct amdgpu_device *adev, + struct amdgpu_mode_mc_save *save) +{ + u32 blackout; + + if (adev->mode_info.num_crtc) + amdgpu_display_stop_mc_access(adev, save); + + amdgpu_asic_wait_for_mc_idle(adev); + + blackout = RREG32(mmMC_SHARED_BLACKOUT_CNTL); + if (REG_GET_FIELD(blackout, MC_SHARED_BLACKOUT_CNTL, BLACKOUT_MODE) != 1) { + /* Block CPU access */ + WREG32(mmBIF_FB_EN, 0); + /* blackout the MC */ + blackout = REG_SET_FIELD(blackout, + MC_SHARED_BLACKOUT_CNTL, BLACKOUT_MODE, 0); + WREG32(mmMC_SHARED_BLACKOUT_CNTL, blackout | 1); + } + /* wait for the MC to settle */ + udelay(100); +} + +void gmc_v7_0_mc_resume(struct amdgpu_device *adev, + struct amdgpu_mode_mc_save *save) +{ + u32 tmp; + + /* unblackout the MC */ + tmp = RREG32(mmMC_SHARED_BLACKOUT_CNTL); + tmp = REG_SET_FIELD(tmp, MC_SHARED_BLACKOUT_CNTL, BLACKOUT_MODE, 0); + WREG32(mmMC_SHARED_BLACKOUT_CNTL, tmp); + /* allow CPU access */ + tmp = REG_SET_FIELD(0, BIF_FB_EN, FB_READ_EN, 1); + tmp = REG_SET_FIELD(tmp, BIF_FB_EN, FB_WRITE_EN, 1); + WREG32(mmBIF_FB_EN, tmp); + + if (adev->mode_info.num_crtc) + amdgpu_display_resume_mc_access(adev, save); +} + +/** + * gmc_v7_0_init_microcode - load ucode images from disk + * + * @adev: amdgpu_device pointer + * + * Use the firmware interface to load the ucode images into + * the driver (not loaded into hw). + * Returns 0 on success, error on failure. + */ +static int gmc_v7_0_init_microcode(struct amdgpu_device *adev) +{ + const char *chip_name; + char fw_name[30]; + int err; + + DRM_DEBUG("\n"); + + switch (adev->asic_type) { + case CHIP_BONAIRE: + chip_name = "bonaire"; + break; + case CHIP_HAWAII: + chip_name = "hawaii"; + break; + case CHIP_KAVERI: + case CHIP_KABINI: + return 0; + default: BUG(); + } + + snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", chip_name); + err = request_firmware(&adev->mc.fw, fw_name, adev->dev); + if (err) + goto out; + err = amdgpu_ucode_validate(adev->mc.fw); + +out: + if (err) { + printk(KERN_ERR + "cik_mc: Failed to load firmware \"%s\"\n", + fw_name); + release_firmware(adev->mc.fw); + adev->mc.fw = NULL; + } + return err; +} + +/** + * gmc_v7_0_mc_load_microcode - load MC ucode into the hw + * + * @adev: amdgpu_device pointer + * + * Load the GDDR MC ucode into the hw (CIK). + * Returns 0 on success, error on failure. + */ +static int gmc_v7_0_mc_load_microcode(struct amdgpu_device *adev) +{ + const struct mc_firmware_header_v1_0 *hdr; + const __le32 *fw_data = NULL; + const __le32 *io_mc_regs = NULL; + u32 running, blackout = 0; + int i, ucode_size, regs_size; + + if (!adev->mc.fw) + return -EINVAL; + + hdr = (const struct mc_firmware_header_v1_0 *)adev->mc.fw->data; + amdgpu_ucode_print_mc_hdr(&hdr->header); + + adev->mc.fw_version = le32_to_cpu(hdr->header.ucode_version); + regs_size = le32_to_cpu(hdr->io_debug_size_bytes) / (4 * 2); + io_mc_regs = (const __le32 *) + (adev->mc.fw->data + le32_to_cpu(hdr->io_debug_array_offset_bytes)); + ucode_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4; + fw_data = (const __le32 *) + (adev->mc.fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes)); + + running = REG_GET_FIELD(RREG32(mmMC_SEQ_SUP_CNTL), MC_SEQ_SUP_CNTL, RUN); + + if (running == 0) { + if (running) { + blackout = RREG32(mmMC_SHARED_BLACKOUT_CNTL); + WREG32(mmMC_SHARED_BLACKOUT_CNTL, blackout | 1); + } + + /* reset the engine and set to writable */ + WREG32(mmMC_SEQ_SUP_CNTL, 0x00000008); + WREG32(mmMC_SEQ_SUP_CNTL, 0x00000010); + + /* load mc io regs */ + for (i = 0; i < regs_size; i++) { + WREG32(mmMC_SEQ_IO_DEBUG_INDEX, le32_to_cpup(io_mc_regs++)); + WREG32(mmMC_SEQ_IO_DEBUG_DATA, le32_to_cpup(io_mc_regs++)); + } + /* load the MC ucode */ + for (i = 0; i < ucode_size; i++) + WREG32(mmMC_SEQ_SUP_PGM, le32_to_cpup(fw_data++)); + + /* put the engine back into the active state */ + WREG32(mmMC_SEQ_SUP_CNTL, 0x00000008); + WREG32(mmMC_SEQ_SUP_CNTL, 0x00000004); + WREG32(mmMC_SEQ_SUP_CNTL, 0x00000001); + + /* wait for training to complete */ + for (i = 0; i < adev->usec_timeout; i++) { + if (REG_GET_FIELD(RREG32(mmMC_SEQ_TRAIN_WAKEUP_CNTL), + MC_SEQ_TRAIN_WAKEUP_CNTL, TRAIN_DONE_D0)) + break; + udelay(1); + } + for (i = 0; i < adev->usec_timeout; i++) { + if (REG_GET_FIELD(RREG32(mmMC_SEQ_TRAIN_WAKEUP_CNTL), + MC_SEQ_TRAIN_WAKEUP_CNTL, TRAIN_DONE_D1)) + break; + udelay(1); + } + + if (running) + WREG32(mmMC_SHARED_BLACKOUT_CNTL, blackout); + } + + return 0; +} + +static void gmc_v7_0_vram_gtt_location(struct amdgpu_device *adev, + struct amdgpu_mc *mc) +{ + if (mc->mc_vram_size > 0xFFC0000000ULL) { + /* leave room for at least 1024M GTT */ + dev_warn(adev->dev, "limiting VRAM\n"); + mc->real_vram_size = 0xFFC0000000ULL; + mc->mc_vram_size = 0xFFC0000000ULL; + } + amdgpu_vram_location(adev, &adev->mc, 0); + adev->mc.gtt_base_align = 0; + amdgpu_gtt_location(adev, mc); +} + +/** + * gmc_v7_0_mc_program - program the GPU memory controller + * + * @adev: amdgpu_device pointer + * + * Set the location of vram, gart, and AGP in the GPU's + * physical address space (CIK). + */ +static void gmc_v7_0_mc_program(struct amdgpu_device *adev) +{ + struct amdgpu_mode_mc_save save; + u32 tmp; + int i, j; + + /* Initialize HDP */ + for (i = 0, j = 0; i < 32; i++, j += 0x6) { + WREG32((0xb05 + j), 0x00000000); + WREG32((0xb06 + j), 0x00000000); + WREG32((0xb07 + j), 0x00000000); + WREG32((0xb08 + j), 0x00000000); + WREG32((0xb09 + j), 0x00000000); + } + WREG32(mmHDP_REG_COHERENCY_FLUSH_CNTL, 0); + + if (adev->mode_info.num_crtc) + amdgpu_display_set_vga_render_state(adev, false); + + gmc_v7_0_mc_stop(adev, &save); + if (amdgpu_asic_wait_for_mc_idle(adev)) { + dev_warn(adev->dev, "Wait for MC idle timedout !\n"); + } + /* Update configuration */ + WREG32(mmMC_VM_SYSTEM_APERTURE_LOW_ADDR, + adev->mc.vram_start >> 12); + WREG32(mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR, + adev->mc.vram_end >> 12); + WREG32(mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR, + adev->vram_scratch.gpu_addr >> 12); + tmp = ((adev->mc.vram_end >> 24) & 0xFFFF) << 16; + tmp |= ((adev->mc.vram_start >> 24) & 0xFFFF); + WREG32(mmMC_VM_FB_LOCATION, tmp); + /* XXX double check these! */ + WREG32(mmHDP_NONSURFACE_BASE, (adev->mc.vram_start >> 8)); + WREG32(mmHDP_NONSURFACE_INFO, (2 << 7) | (1 << 30)); + WREG32(mmHDP_NONSURFACE_SIZE, 0x3FFFFFFF); + WREG32(mmMC_VM_AGP_BASE, 0); + WREG32(mmMC_VM_AGP_TOP, 0x0FFFFFFF); + WREG32(mmMC_VM_AGP_BOT, 0x0FFFFFFF); + if (amdgpu_asic_wait_for_mc_idle(adev)) { + dev_warn(adev->dev, "Wait for MC idle timedout !\n"); + } + gmc_v7_0_mc_resume(adev, &save); + + WREG32(mmBIF_FB_EN, BIF_FB_EN__FB_READ_EN_MASK | BIF_FB_EN__FB_WRITE_EN_MASK); + + tmp = RREG32(mmHDP_MISC_CNTL); + tmp = REG_SET_FIELD(tmp, HDP_MISC_CNTL, FLUSH_INVALIDATE_CACHE, 1); + WREG32(mmHDP_MISC_CNTL, tmp); + + tmp = RREG32(mmHDP_HOST_PATH_CNTL); + WREG32(mmHDP_HOST_PATH_CNTL, tmp); +} + +/** + * gmc_v7_0_mc_init - initialize the memory controller driver params + * + * @adev: amdgpu_device pointer + * + * Look up the amount of vram, vram width, and decide how to place + * vram and gart within the GPU's physical address space (CIK). + * Returns 0 for success. + */ +static int gmc_v7_0_mc_init(struct amdgpu_device *adev) +{ + u32 tmp; + int chansize, numchan; + + /* Get VRAM informations */ + tmp = RREG32(mmMC_ARB_RAMCFG); + if (REG_GET_FIELD(tmp, MC_ARB_RAMCFG, CHANSIZE)) { + chansize = 64; + } else { + chansize = 32; + } + tmp = RREG32(mmMC_SHARED_CHMAP); + switch (REG_GET_FIELD(tmp, MC_SHARED_CHMAP, NOOFCHAN)) { + case 0: + default: + numchan = 1; + break; + case 1: + numchan = 2; + break; + case 2: + numchan = 4; + break; + case 3: + numchan = 8; + break; + case 4: + numchan = 3; + break; + case 5: + numchan = 6; + break; + case 6: + numchan = 10; + break; + case 7: + numchan = 12; + break; + case 8: + numchan = 16; + break; + } + adev->mc.vram_width = numchan * chansize; + /* Could aper size report 0 ? */ + adev->mc.aper_base = pci_resource_start(adev->pdev, 0); + adev->mc.aper_size = pci_resource_len(adev->pdev, 0); + /* size in MB on si */ + adev->mc.mc_vram_size = RREG32(mmCONFIG_MEMSIZE) * 1024ULL * 1024ULL; + adev->mc.real_vram_size = RREG32(mmCONFIG_MEMSIZE) * 1024ULL * 1024ULL; + adev->mc.visible_vram_size = adev->mc.aper_size; + + /* unless the user had overridden it, set the gart + * size equal to the 1024 or vram, whichever is larger. + */ + if (amdgpu_gart_size == -1) + adev->mc.gtt_size = max((1024ULL << 20), adev->mc.mc_vram_size); + else + adev->mc.gtt_size = (uint64_t)amdgpu_gart_size << 20; + + gmc_v7_0_vram_gtt_location(adev, &adev->mc); + + return 0; +} + +/* + * GART + * VMID 0 is the physical GPU addresses as used by the kernel. + * VMIDs 1-15 are used for userspace clients and are handled + * by the amdgpu vm/hsa code. + */ + +/** + * gmc_v7_0_gart_flush_gpu_tlb - gart tlb flush callback + * + * @adev: amdgpu_device pointer + * @vmid: vm instance to flush + * + * Flush the TLB for the requested page table (CIK). + */ +static void gmc_v7_0_gart_flush_gpu_tlb(struct amdgpu_device *adev, + uint32_t vmid) +{ + /* flush hdp cache */ + WREG32(mmHDP_MEM_COHERENCY_FLUSH_CNTL, 0); + + /* bits 0-15 are the VM contexts0-15 */ + WREG32(mmVM_INVALIDATE_REQUEST, 1 << vmid); +} + +/** + * gmc_v7_0_gart_set_pte_pde - update the page tables using MMIO + * + * @adev: amdgpu_device pointer + * @cpu_pt_addr: cpu address of the page table + * @gpu_page_idx: entry in the page table to update + * @addr: dst addr to write into pte/pde + * @flags: access flags + * + * Update the page tables using the CPU. + */ +static int gmc_v7_0_gart_set_pte_pde(struct amdgpu_device *adev, + void *cpu_pt_addr, + uint32_t gpu_page_idx, + uint64_t addr, + uint32_t flags) +{ + void __iomem *ptr = (void *)cpu_pt_addr; + uint64_t value; + + value = addr & 0xFFFFFFFFFFFFF000ULL; + value |= flags; + writeq(value, ptr + (gpu_page_idx * 8)); + + return 0; +} + +/** + * gmc_v7_0_gart_enable - gart enable + * + * @adev: amdgpu_device pointer + * + * This sets up the TLBs, programs the page tables for VMID0, + * sets up the hw for VMIDs 1-15 which are allocated on + * demand, and sets up the global locations for the LDS, GDS, + * and GPUVM for FSA64 clients (CIK). + * Returns 0 for success, errors for failure. + */ +static int gmc_v7_0_gart_enable(struct amdgpu_device *adev) +{ + int r, i; + u32 tmp; + + if (adev->gart.robj == NULL) { + dev_err(adev->dev, "No VRAM object for PCIE GART.\n"); + return -EINVAL; + } + r = amdgpu_gart_table_vram_pin(adev); + if (r) + return r; + /* Setup TLB control */ + tmp = RREG32(mmMC_VM_MX_L1_TLB_CNTL); + tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB, 1); + tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_FRAGMENT_PROCESSING, 1); + tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, SYSTEM_ACCESS_MODE, 3); + tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_ADVANCED_DRIVER_MODEL, 1); + tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, SYSTEM_APERTURE_UNMAPPED_ACCESS, 0); + WREG32(mmMC_VM_MX_L1_TLB_CNTL, tmp); + /* Setup L2 cache */ + tmp = RREG32(mmVM_L2_CNTL); + tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_CACHE, 1); + tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_FRAGMENT_PROCESSING, 1); + tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE, 1); + tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE, 1); + tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, EFFECTIVE_L2_QUEUE_SIZE, 7); + tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, CONTEXT1_IDENTITY_ACCESS_MODE, 1); + WREG32(mmVM_L2_CNTL, tmp); + tmp = REG_SET_FIELD(0, VM_L2_CNTL2, INVALIDATE_ALL_L1_TLBS, 1); + tmp = REG_SET_FIELD(tmp, VM_L2_CNTL2, INVALIDATE_L2_CACHE, 1); + WREG32(mmVM_L2_CNTL2, tmp); + tmp = RREG32(mmVM_L2_CNTL3); + tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, L2_CACHE_BIGK_ASSOCIATIVITY, 1); + tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, BANK_SELECT, 4); + tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, L2_CACHE_BIGK_FRAGMENT_SIZE, 4); + WREG32(mmVM_L2_CNTL3, tmp); + /* setup context0 */ + WREG32(mmVM_CONTEXT0_PAGE_TABLE_START_ADDR, adev->mc.gtt_start >> 12); + WREG32(mmVM_CONTEXT0_PAGE_TABLE_END_ADDR, adev->mc.gtt_end >> 12); + WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR, adev->gart.table_addr >> 12); + WREG32(mmVM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR, + (u32)(adev->dummy_page.addr >> 12)); + WREG32(mmVM_CONTEXT0_CNTL2, 0); + tmp = RREG32(mmVM_CONTEXT0_CNTL); + tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, ENABLE_CONTEXT, 1); + tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, PAGE_TABLE_DEPTH, 0); + tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, 1); + WREG32(mmVM_CONTEXT0_CNTL, tmp); + + WREG32(0x575, 0); + WREG32(0x576, 0); + WREG32(0x577, 0); + + /* empty context1-15 */ + /* FIXME start with 4G, once using 2 level pt switch to full + * vm size space + */ + /* set vm size, must be a multiple of 4 */ + WREG32(mmVM_CONTEXT1_PAGE_TABLE_START_ADDR, 0); + WREG32(mmVM_CONTEXT1_PAGE_TABLE_END_ADDR, adev->vm_manager.max_pfn); + for (i = 1; i < 16; i++) { + if (i < 8) + WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + i, + adev->gart.table_addr >> 12); + else + WREG32(mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + i - 8, + adev->gart.table_addr >> 12); + } + + /* enable context1-15 */ + WREG32(mmVM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR, + (u32)(adev->dummy_page.addr >> 12)); + WREG32(mmVM_CONTEXT1_CNTL2, 4); + tmp = RREG32(mmVM_CONTEXT1_CNTL); + tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, ENABLE_CONTEXT, 1); + tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, PAGE_TABLE_DEPTH, 1); + tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT, 1); + tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, 1); + tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT, 1); + tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT, 1); + tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT, 1); + tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, PDE0_PROTECTION_FAULT_ENABLE_DEFAULT, 1); + tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, VALID_PROTECTION_FAULT_ENABLE_INTERRUPT, 1); + tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, VALID_PROTECTION_FAULT_ENABLE_DEFAULT, 1); + tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, READ_PROTECTION_FAULT_ENABLE_INTERRUPT, 1); + tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, READ_PROTECTION_FAULT_ENABLE_DEFAULT, 1); + tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT, 1); + tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, 1); + tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, PAGE_TABLE_BLOCK_SIZE, + amdgpu_vm_block_size - 9); + WREG32(mmVM_CONTEXT1_CNTL, tmp); + + if (adev->asic_type == CHIP_KAVERI) { + tmp = RREG32(mmCHUB_CONTROL); + tmp &= ~BYPASS_VM; + WREG32(mmCHUB_CONTROL, tmp); + } + + gmc_v7_0_gart_flush_gpu_tlb(adev, 0); + DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n", + (unsigned)(adev->mc.gtt_size >> 20), + (unsigned long long)adev->gart.table_addr); + adev->gart.ready = true; + return 0; +} + +static int gmc_v7_0_gart_init(struct amdgpu_device *adev) +{ + int r; + + if (adev->gart.robj) { + WARN(1, "R600 PCIE GART already initialized\n"); + return 0; + } + /* Initialize common gart structure */ + r = amdgpu_gart_init(adev); + if (r) + return r; + adev->gart.table_size = adev->gart.num_gpu_pages * 8; + return amdgpu_gart_table_vram_alloc(adev); +} + +/** + * gmc_v7_0_gart_disable - gart disable + * + * @adev: amdgpu_device pointer + * + * This disables all VM page table (CIK). + */ +static void gmc_v7_0_gart_disable(struct amdgpu_device *adev) +{ + u32 tmp; + + /* Disable all tables */ + WREG32(mmVM_CONTEXT0_CNTL, 0); + WREG32(mmVM_CONTEXT1_CNTL, 0); + /* Setup TLB control */ + tmp = RREG32(mmMC_VM_MX_L1_TLB_CNTL); + tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB, 0); + tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_FRAGMENT_PROCESSING, 0); + tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_ADVANCED_DRIVER_MODEL, 0); + WREG32(mmMC_VM_MX_L1_TLB_CNTL, tmp); + /* Setup L2 cache */ + tmp = RREG32(mmVM_L2_CNTL); + tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_CACHE, 0); + WREG32(mmVM_L2_CNTL, tmp); + WREG32(mmVM_L2_CNTL2, 0); + amdgpu_gart_table_vram_unpin(adev); +} + +/** + * gmc_v7_0_gart_fini - vm fini callback + * + * @adev: amdgpu_device pointer + * + * Tears down the driver GART/VM setup (CIK). + */ +static void gmc_v7_0_gart_fini(struct amdgpu_device *adev) +{ + amdgpu_gart_table_vram_free(adev); + amdgpu_gart_fini(adev); +} + +/* + * vm + * VMID 0 is the physical GPU addresses as used by the kernel. + * VMIDs 1-15 are used for userspace clients and are handled + * by the amdgpu vm/hsa code. + */ +/** + * gmc_v7_0_vm_init - cik vm init callback + * + * @adev: amdgpu_device pointer + * + * Inits cik specific vm parameters (number of VMs, base of vram for + * VMIDs 1-15) (CIK). + * Returns 0 for success. + */ +static int gmc_v7_0_vm_init(struct amdgpu_device *adev) +{ + /* + * number of VMs + * VMID 0 is reserved for System + * amdgpu graphics/compute will use VMIDs 1-7 + * amdkfd will use VMIDs 8-15 + */ + adev->vm_manager.nvm = AMDGPU_NUM_OF_VMIDS; + + /* base offset of vram pages */ + if (adev->flags & AMDGPU_IS_APU) { + u64 tmp = RREG32(mmMC_VM_FB_OFFSET); + tmp <<= 22; + adev->vm_manager.vram_base_offset = tmp; + } else + adev->vm_manager.vram_base_offset = 0; + + return 0; +} + +/** + * gmc_v7_0_vm_fini - cik vm fini callback + * + * @adev: amdgpu_device pointer + * + * Tear down any asic specific VM setup (CIK). + */ +static void gmc_v7_0_vm_fini(struct amdgpu_device *adev) +{ +} + +/** + * gmc_v7_0_vm_decode_fault - print human readable fault info + * + * @adev: amdgpu_device pointer + * @status: VM_CONTEXT1_PROTECTION_FAULT_STATUS register value + * @addr: VM_CONTEXT1_PROTECTION_FAULT_ADDR register value + * + * Print human readable fault information (CIK). + */ +static void gmc_v7_0_vm_decode_fault(struct amdgpu_device *adev, + u32 status, u32 addr, u32 mc_client) +{ + u32 mc_id; + u32 vmid = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS, VMID); + u32 protections = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS, + PROTECTIONS); + char block[5] = { mc_client >> 24, (mc_client >> 16) & 0xff, + (mc_client >> 8) & 0xff, mc_client & 0xff, 0 }; + + mc_id = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS, + MEMORY_CLIENT_ID); + + printk("VM fault (0x%02x, vmid %d) at page %u, %s from '%s' (0x%08x) (%d)\n", + protections, vmid, addr, + REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS, + MEMORY_CLIENT_RW) ? + "write" : "read", block, mc_client, mc_id); +} + + +static const u32 mc_cg_registers[] = { + mmMC_HUB_MISC_HUB_CG, + mmMC_HUB_MISC_SIP_CG, + mmMC_HUB_MISC_VM_CG, + mmMC_XPB_CLK_GAT, + mmATC_MISC_CG, + mmMC_CITF_MISC_WR_CG, + mmMC_CITF_MISC_RD_CG, + mmMC_CITF_MISC_VM_CG, + mmVM_L2_CG, +}; + +static const u32 mc_cg_ls_en[] = { + MC_HUB_MISC_HUB_CG__MEM_LS_ENABLE_MASK, + MC_HUB_MISC_SIP_CG__MEM_LS_ENABLE_MASK, + MC_HUB_MISC_VM_CG__MEM_LS_ENABLE_MASK, + MC_XPB_CLK_GAT__MEM_LS_ENABLE_MASK, + ATC_MISC_CG__MEM_LS_ENABLE_MASK, + MC_CITF_MISC_WR_CG__MEM_LS_ENABLE_MASK, + MC_CITF_MISC_RD_CG__MEM_LS_ENABLE_MASK, + MC_CITF_MISC_VM_CG__MEM_LS_ENABLE_MASK, + VM_L2_CG__MEM_LS_ENABLE_MASK, +}; + +static const u32 mc_cg_en[] = { + MC_HUB_MISC_HUB_CG__ENABLE_MASK, + MC_HUB_MISC_SIP_CG__ENABLE_MASK, + MC_HUB_MISC_VM_CG__ENABLE_MASK, + MC_XPB_CLK_GAT__ENABLE_MASK, + ATC_MISC_CG__ENABLE_MASK, + MC_CITF_MISC_WR_CG__ENABLE_MASK, + MC_CITF_MISC_RD_CG__ENABLE_MASK, + MC_CITF_MISC_VM_CG__ENABLE_MASK, + VM_L2_CG__ENABLE_MASK, +}; + +static void gmc_v7_0_enable_mc_ls(struct amdgpu_device *adev, + bool enable) +{ + int i; + u32 orig, data; + + for (i = 0; i < ARRAY_SIZE(mc_cg_registers); i++) { + orig = data = RREG32(mc_cg_registers[i]); + if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_MC_LS)) + data |= mc_cg_ls_en[i]; + else + data &= ~mc_cg_ls_en[i]; + if (data != orig) + WREG32(mc_cg_registers[i], data); + } +} + +static void gmc_v7_0_enable_mc_mgcg(struct amdgpu_device *adev, + bool enable) +{ + int i; + u32 orig, data; + + for (i = 0; i < ARRAY_SIZE(mc_cg_registers); i++) { + orig = data = RREG32(mc_cg_registers[i]); + if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_MC_MGCG)) + data |= mc_cg_en[i]; + else + data &= ~mc_cg_en[i]; + if (data != orig) + WREG32(mc_cg_registers[i], data); + } +} + +static void gmc_v7_0_enable_bif_mgls(struct amdgpu_device *adev, + bool enable) +{ + u32 orig, data; + + orig = data = RREG32_PCIE(ixPCIE_CNTL2); + + if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_BIF_LS)) { + data = REG_SET_FIELD(data, PCIE_CNTL2, SLV_MEM_LS_EN, 1); + data = REG_SET_FIELD(data, PCIE_CNTL2, MST_MEM_LS_EN, 1); + data = REG_SET_FIELD(data, PCIE_CNTL2, REPLAY_MEM_LS_EN, 1); + data = REG_SET_FIELD(data, PCIE_CNTL2, SLV_MEM_AGGRESSIVE_LS_EN, 1); + } else { + data = REG_SET_FIELD(data, PCIE_CNTL2, SLV_MEM_LS_EN, 0); + data = REG_SET_FIELD(data, PCIE_CNTL2, MST_MEM_LS_EN, 0); + data = REG_SET_FIELD(data, PCIE_CNTL2, REPLAY_MEM_LS_EN, 0); + data = REG_SET_FIELD(data, PCIE_CNTL2, SLV_MEM_AGGRESSIVE_LS_EN, 0); + } + + if (orig != data) + WREG32_PCIE(ixPCIE_CNTL2, data); +} + +static void gmc_v7_0_enable_hdp_mgcg(struct amdgpu_device *adev, + bool enable) +{ + u32 orig, data; + + orig = data = RREG32(mmHDP_HOST_PATH_CNTL); + + if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_HDP_MGCG)) + data = REG_SET_FIELD(data, HDP_HOST_PATH_CNTL, CLOCK_GATING_DIS, 0); + else + data = REG_SET_FIELD(data, HDP_HOST_PATH_CNTL, CLOCK_GATING_DIS, 1); + + if (orig != data) + WREG32(mmHDP_HOST_PATH_CNTL, data); +} + +static void gmc_v7_0_enable_hdp_ls(struct amdgpu_device *adev, + bool enable) +{ + u32 orig, data; + + orig = data = RREG32(mmHDP_MEM_POWER_LS); + + if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_HDP_LS)) + data = REG_SET_FIELD(data, HDP_MEM_POWER_LS, LS_ENABLE, 1); + else + data = REG_SET_FIELD(data, HDP_MEM_POWER_LS, LS_ENABLE, 0); + + if (orig != data) + WREG32(mmHDP_MEM_POWER_LS, data); +} + +static int gmc_v7_0_early_init(struct amdgpu_device *adev) +{ + gmc_v7_0_set_gart_funcs(adev); + gmc_v7_0_set_irq_funcs(adev); + + if (adev->flags & AMDGPU_IS_APU) { + adev->mc.is_gddr5 = false; + } else { + u32 tmp = RREG32(mmMC_SEQ_MISC0); + + if (((tmp & MC_SEQ_MISC0__GDDR5_MASK) >> + MC_SEQ_MISC0__GDDR5__SHIFT) == MC_SEQ_MISC0__GDDR5_VALUE) + adev->mc.is_gddr5 = true; + else + adev->mc.is_gddr5 = false; + } + + return 0; +} + +static int gmc_v7_0_sw_init(struct amdgpu_device *adev) +{ + int r; + int dma_bits; + + r = amdgpu_gem_init(adev); + if (r) + return r; + + r = amdgpu_irq_add_id(adev, 146, &adev->mc.vm_fault); + if (r) + return r; + + r = amdgpu_irq_add_id(adev, 147, &adev->mc.vm_fault); + if (r) + return r; + + /* Adjust VM size here. + * Currently set to 4GB ((1 << 20) 4k pages). + * Max GPUVM size for cayman and SI is 40 bits. + */ + adev->vm_manager.max_pfn = amdgpu_vm_size << 18; + + /* Set the internal MC address mask + * This is the max address of the GPU's + * internal address space. + */ + adev->mc.mc_mask = 0xffffffffffULL; /* 40 bit MC */ + + /* set DMA mask + need_dma32 flags. + * PCIE - can handle 40-bits. + * IGP - can handle 40-bits + * PCI - dma32 for legacy pci gart, 40 bits on newer asics + */ + adev->need_dma32 = false; + dma_bits = adev->need_dma32 ? 32 : 40; + r = pci_set_dma_mask(adev->pdev, DMA_BIT_MASK(dma_bits)); + if (r) { + adev->need_dma32 = true; + dma_bits = 32; + printk(KERN_WARNING "amdgpu: No suitable DMA available.\n"); + } + r = pci_set_consistent_dma_mask(adev->pdev, DMA_BIT_MASK(dma_bits)); + if (r) { + pci_set_consistent_dma_mask(adev->pdev, DMA_BIT_MASK(32)); + printk(KERN_WARNING "amdgpu: No coherent DMA available.\n"); + } + + r = gmc_v7_0_init_microcode(adev); + if (r) { + DRM_ERROR("Failed to load mc firmware!\n"); + return r; + } + + r = gmc_v7_0_mc_init(adev); + if (r) + return r; + + /* Memory manager */ + r = amdgpu_bo_init(adev); + if (r) + return r; + + r = gmc_v7_0_gart_init(adev); + if (r) + return r; + + if (!adev->vm_manager.enabled) { + r = gmc_v7_0_vm_init(adev); + if (r) { + dev_err(adev->dev, "vm manager initialization failed (%d).\n", r); + return r; + } + adev->vm_manager.enabled = true; + } + + return r; +} + +static int gmc_v7_0_sw_fini(struct amdgpu_device *adev) +{ + int i; + + if (adev->vm_manager.enabled) { + for (i = 0; i < AMDGPU_NUM_VM; ++i) + amdgpu_fence_unref(&adev->vm_manager.active[i]); + gmc_v7_0_vm_fini(adev); + adev->vm_manager.enabled = false; + } + gmc_v7_0_gart_fini(adev); + amdgpu_gem_fini(adev); + amdgpu_bo_fini(adev); + + return 0; +} + +static int gmc_v7_0_hw_init(struct amdgpu_device *adev) +{ + int r; + + gmc_v7_0_mc_program(adev); + + if (!(adev->flags & AMDGPU_IS_APU)) { + r = gmc_v7_0_mc_load_microcode(adev); + if (r) { + DRM_ERROR("Failed to load MC firmware!\n"); + return r; + } + } + + r = gmc_v7_0_gart_enable(adev); + if (r) + return r; + + return r; +} + +static int gmc_v7_0_hw_fini(struct amdgpu_device *adev) +{ + gmc_v7_0_gart_disable(adev); + + return 0; +} + +static int gmc_v7_0_suspend(struct amdgpu_device *adev) +{ + int i; + + if (adev->vm_manager.enabled) { + for (i = 0; i < AMDGPU_NUM_VM; ++i) + amdgpu_fence_unref(&adev->vm_manager.active[i]); + gmc_v7_0_vm_fini(adev); + adev->vm_manager.enabled = false; + } + gmc_v7_0_hw_fini(adev); + + return 0; +} + +static int gmc_v7_0_resume(struct amdgpu_device *adev) +{ + int r; + + r = gmc_v7_0_hw_init(adev); + if (r) + return r; + + if (!adev->vm_manager.enabled) { + r = gmc_v7_0_vm_init(adev); + if (r) { + dev_err(adev->dev, "vm manager initialization failed (%d).\n", r); + return r; + } + adev->vm_manager.enabled = true; + } + + return r; +} + +static bool gmc_v7_0_is_idle(struct amdgpu_device *adev) +{ + u32 tmp = RREG32(mmSRBM_STATUS); + + if (tmp & (SRBM_STATUS__MCB_BUSY_MASK | SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK | + SRBM_STATUS__MCC_BUSY_MASK | SRBM_STATUS__MCD_BUSY_MASK | SRBM_STATUS__VMC_BUSY_MASK)) + return false; + + return true; +} + +static int gmc_v7_0_wait_for_idle(struct amdgpu_device *adev) +{ + unsigned i; + u32 tmp; + + for (i = 0; i < adev->usec_timeout; i++) { + /* read MC_STATUS */ + tmp = RREG32(mmSRBM_STATUS) & (SRBM_STATUS__MCB_BUSY_MASK | + SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK | + SRBM_STATUS__MCC_BUSY_MASK | + SRBM_STATUS__MCD_BUSY_MASK | + SRBM_STATUS__VMC_BUSY_MASK); + if (!tmp) + return 0; + udelay(1); + } + return -ETIMEDOUT; + +} + +static void gmc_v7_0_print_status(struct amdgpu_device *adev) +{ + int i, j; + + dev_info(adev->dev, "GMC 8.x registers\n"); + dev_info(adev->dev, " SRBM_STATUS=0x%08X\n", + RREG32(mmSRBM_STATUS)); + dev_info(adev->dev, " SRBM_STATUS2=0x%08X\n", + RREG32(mmSRBM_STATUS2)); + + dev_info(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n", + RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_ADDR)); + dev_info(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n", + RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_STATUS)); + dev_info(adev->dev, " MC_VM_MX_L1_TLB_CNTL=0x%08X\n", + RREG32(mmMC_VM_MX_L1_TLB_CNTL)); + dev_info(adev->dev, " VM_L2_CNTL=0x%08X\n", + RREG32(mmVM_L2_CNTL)); + dev_info(adev->dev, " VM_L2_CNTL2=0x%08X\n", + RREG32(mmVM_L2_CNTL2)); + dev_info(adev->dev, " VM_L2_CNTL3=0x%08X\n", + RREG32(mmVM_L2_CNTL3)); + dev_info(adev->dev, " VM_CONTEXT0_PAGE_TABLE_START_ADDR=0x%08X\n", + RREG32(mmVM_CONTEXT0_PAGE_TABLE_START_ADDR)); + dev_info(adev->dev, " VM_CONTEXT0_PAGE_TABLE_END_ADDR=0x%08X\n", + RREG32(mmVM_CONTEXT0_PAGE_TABLE_END_ADDR)); + dev_info(adev->dev, " VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR=0x%08X\n", + RREG32(mmVM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR)); + dev_info(adev->dev, " VM_CONTEXT0_CNTL2=0x%08X\n", + RREG32(mmVM_CONTEXT0_CNTL2)); + dev_info(adev->dev, " VM_CONTEXT0_CNTL=0x%08X\n", + RREG32(mmVM_CONTEXT0_CNTL)); + dev_info(adev->dev, " 0x15D4=0x%08X\n", + RREG32(0x575)); + dev_info(adev->dev, " 0x15D8=0x%08X\n", + RREG32(0x576)); + dev_info(adev->dev, " 0x15DC=0x%08X\n", + RREG32(0x577)); + dev_info(adev->dev, " VM_CONTEXT1_PAGE_TABLE_START_ADDR=0x%08X\n", + RREG32(mmVM_CONTEXT1_PAGE_TABLE_START_ADDR)); + dev_info(adev->dev, " VM_CONTEXT1_PAGE_TABLE_END_ADDR=0x%08X\n", + RREG32(mmVM_CONTEXT1_PAGE_TABLE_END_ADDR)); + dev_info(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR=0x%08X\n", + RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR)); + dev_info(adev->dev, " VM_CONTEXT1_CNTL2=0x%08X\n", + RREG32(mmVM_CONTEXT1_CNTL2)); + dev_info(adev->dev, " VM_CONTEXT1_CNTL=0x%08X\n", + RREG32(mmVM_CONTEXT1_CNTL)); + for (i = 0; i < 16; i++) { + if (i < 8) + dev_info(adev->dev, " VM_CONTEXT%d_PAGE_TABLE_BASE_ADDR=0x%08X\n", + i, RREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + i)); + else + dev_info(adev->dev, " VM_CONTEXT%d_PAGE_TABLE_BASE_ADDR=0x%08X\n", + i, RREG32(mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + i - 8)); + } + dev_info(adev->dev, " MC_VM_SYSTEM_APERTURE_LOW_ADDR=0x%08X\n", + RREG32(mmMC_VM_SYSTEM_APERTURE_LOW_ADDR)); + dev_info(adev->dev, " MC_VM_SYSTEM_APERTURE_HIGH_ADDR=0x%08X\n", + RREG32(mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR)); + dev_info(adev->dev, " MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR=0x%08X\n", + RREG32(mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR)); + dev_info(adev->dev, " MC_VM_FB_LOCATION=0x%08X\n", + RREG32(mmMC_VM_FB_LOCATION)); + dev_info(adev->dev, " MC_VM_AGP_BASE=0x%08X\n", + RREG32(mmMC_VM_AGP_BASE)); + dev_info(adev->dev, " MC_VM_AGP_TOP=0x%08X\n", + RREG32(mmMC_VM_AGP_TOP)); + dev_info(adev->dev, " MC_VM_AGP_BOT=0x%08X\n", + RREG32(mmMC_VM_AGP_BOT)); + + if (adev->asic_type == CHIP_KAVERI) { + dev_info(adev->dev, " CHUB_CONTROL=0x%08X\n", + RREG32(mmCHUB_CONTROL)); + } + + dev_info(adev->dev, " HDP_REG_COHERENCY_FLUSH_CNTL=0x%08X\n", + RREG32(mmHDP_REG_COHERENCY_FLUSH_CNTL)); + dev_info(adev->dev, " HDP_NONSURFACE_BASE=0x%08X\n", + RREG32(mmHDP_NONSURFACE_BASE)); + dev_info(adev->dev, " HDP_NONSURFACE_INFO=0x%08X\n", + RREG32(mmHDP_NONSURFACE_INFO)); + dev_info(adev->dev, " HDP_NONSURFACE_SIZE=0x%08X\n", + RREG32(mmHDP_NONSURFACE_SIZE)); + dev_info(adev->dev, " HDP_MISC_CNTL=0x%08X\n", + RREG32(mmHDP_MISC_CNTL)); + dev_info(adev->dev, " HDP_HOST_PATH_CNTL=0x%08X\n", + RREG32(mmHDP_HOST_PATH_CNTL)); + + for (i = 0, j = 0; i < 32; i++, j += 0x6) { + dev_info(adev->dev, " %d:\n", i); + dev_info(adev->dev, " 0x%04X=0x%08X\n", + 0xb05 + j, RREG32(0xb05 + j)); + dev_info(adev->dev, " 0x%04X=0x%08X\n", + 0xb06 + j, RREG32(0xb06 + j)); + dev_info(adev->dev, " 0x%04X=0x%08X\n", + 0xb07 + j, RREG32(0xb07 + j)); + dev_info(adev->dev, " 0x%04X=0x%08X\n", + 0xb08 + j, RREG32(0xb08 + j)); + dev_info(adev->dev, " 0x%04X=0x%08X\n", + 0xb09 + j, RREG32(0xb09 + j)); + } + + dev_info(adev->dev, " BIF_FB_EN=0x%08X\n", + RREG32(mmBIF_FB_EN)); +} + +static int gmc_v7_0_soft_reset(struct amdgpu_device *adev) +{ + struct amdgpu_mode_mc_save save; + u32 srbm_soft_reset = 0; + u32 tmp = RREG32(mmSRBM_STATUS); + + if (tmp & SRBM_STATUS__VMC_BUSY_MASK) + srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, + SRBM_SOFT_RESET, SOFT_RESET_VMC, 1); + + if (tmp & (SRBM_STATUS__MCB_BUSY_MASK | SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK | + SRBM_STATUS__MCC_BUSY_MASK | SRBM_STATUS__MCD_BUSY_MASK)) { + if (!(adev->flags & AMDGPU_IS_APU)) + srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, + SRBM_SOFT_RESET, SOFT_RESET_MC, 1); + } + + if (srbm_soft_reset) { + gmc_v7_0_print_status(adev); + + gmc_v7_0_mc_stop(adev, &save); + if (gmc_v7_0_wait_for_idle(adev)) { + dev_warn(adev->dev, "Wait for GMC idle timed out !\n"); + } + + + tmp = RREG32(mmSRBM_SOFT_RESET); + tmp |= srbm_soft_reset; + dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp); + WREG32(mmSRBM_SOFT_RESET, tmp); + tmp = RREG32(mmSRBM_SOFT_RESET); + + udelay(50); + + tmp &= ~srbm_soft_reset; + WREG32(mmSRBM_SOFT_RESET, tmp); + tmp = RREG32(mmSRBM_SOFT_RESET); + + /* Wait a little for things to settle down */ + udelay(50); + + gmc_v7_0_mc_resume(adev, &save); + udelay(50); + + gmc_v7_0_print_status(adev); + } + + return 0; +} + +static int gmc_v7_0_vm_fault_interrupt_state(struct amdgpu_device *adev, + struct amdgpu_irq_src *src, + unsigned type, + enum amdgpu_interrupt_state state) +{ + u32 tmp; + u32 bits = (VM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | + VM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | + VM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | + VM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | + VM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | + VM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK); + + switch (state) { + case AMDGPU_IRQ_STATE_DISABLE: + /* system context */ + tmp = RREG32(mmVM_CONTEXT0_CNTL); + tmp &= ~bits; + WREG32(mmVM_CONTEXT0_CNTL, tmp); + /* VMs */ + tmp = RREG32(mmVM_CONTEXT1_CNTL); + tmp &= ~bits; + WREG32(mmVM_CONTEXT1_CNTL, tmp); + break; + case AMDGPU_IRQ_STATE_ENABLE: + /* system context */ + tmp = RREG32(mmVM_CONTEXT0_CNTL); + tmp |= bits; + WREG32(mmVM_CONTEXT0_CNTL, tmp); + /* VMs */ + tmp = RREG32(mmVM_CONTEXT1_CNTL); + tmp |= bits; + WREG32(mmVM_CONTEXT1_CNTL, tmp); + break; + default: + break; + } + + return 0; +} + +static int gmc_v7_0_process_interrupt(struct amdgpu_device *adev, + struct amdgpu_irq_src *source, + struct amdgpu_iv_entry *entry) +{ + u32 addr, status, mc_client; + + addr = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_ADDR); + status = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_STATUS); + mc_client = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_MCCLIENT); + dev_err(adev->dev, "GPU fault detected: %d 0x%08x\n", + entry->src_id, entry->src_data); + dev_err(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n", + addr); + dev_err(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n", + status); + gmc_v7_0_vm_decode_fault(adev, status, addr, mc_client); + /* reset addr and status */ + WREG32_P(mmVM_CONTEXT1_CNTL2, 1, ~1); + + return 0; +} + +static int gmc_v7_0_set_clockgating_state(struct amdgpu_device *adev, + enum amdgpu_clockgating_state state) +{ + bool gate = false; + + if (state == AMDGPU_CG_STATE_GATE) + gate = true; + + if (!(adev->flags & AMDGPU_IS_APU)) { + gmc_v7_0_enable_mc_mgcg(adev, gate); + gmc_v7_0_enable_mc_ls(adev, gate); + } + gmc_v7_0_enable_bif_mgls(adev, gate); + gmc_v7_0_enable_hdp_mgcg(adev, gate); + gmc_v7_0_enable_hdp_ls(adev, gate); + + return 0; +} + +static int gmc_v7_0_set_powergating_state(struct amdgpu_device *adev, + enum amdgpu_powergating_state state) +{ + return 0; +} + +const struct amdgpu_ip_funcs gmc_v7_0_ip_funcs = { + .early_init = gmc_v7_0_early_init, + .late_init = NULL, + .sw_init = gmc_v7_0_sw_init, + .sw_fini = gmc_v7_0_sw_fini, + .hw_init = gmc_v7_0_hw_init, + .hw_fini = gmc_v7_0_hw_fini, + .suspend = gmc_v7_0_suspend, + .resume = gmc_v7_0_resume, + .is_idle = gmc_v7_0_is_idle, + .wait_for_idle = gmc_v7_0_wait_for_idle, + .soft_reset = gmc_v7_0_soft_reset, + .print_status = gmc_v7_0_print_status, + .set_clockgating_state = gmc_v7_0_set_clockgating_state, + .set_powergating_state = gmc_v7_0_set_powergating_state, +}; + +static const struct amdgpu_gart_funcs gmc_v7_0_gart_funcs = { + .flush_gpu_tlb = gmc_v7_0_gart_flush_gpu_tlb, + .set_pte_pde = gmc_v7_0_gart_set_pte_pde, +}; + +static const struct amdgpu_irq_src_funcs gmc_v7_0_irq_funcs = { + .set = gmc_v7_0_vm_fault_interrupt_state, + .process = gmc_v7_0_process_interrupt, +}; + +static void gmc_v7_0_set_gart_funcs(struct amdgpu_device *adev) +{ + if (adev->gart.gart_funcs == NULL) + adev->gart.gart_funcs = &gmc_v7_0_gart_funcs; +} + +static void gmc_v7_0_set_irq_funcs(struct amdgpu_device *adev) +{ + adev->mc.vm_fault.num_types = 1; + adev->mc.vm_fault.funcs = &gmc_v7_0_irq_funcs; +} diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.h b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.h new file mode 100644 index 000000000000..ab1a2fa1afcd --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.h @@ -0,0 +1,36 @@ +/* + * Copyright 2014 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef __GMC_V7_0_H__ +#define __GMC_V7_0_H__ + +extern const struct amdgpu_ip_funcs gmc_v7_0_ip_funcs; + +/* XXX these shouldn't be exported */ +void gmc_v7_0_mc_stop(struct amdgpu_device *adev, + struct amdgpu_mode_mc_save *save); +void gmc_v7_0_mc_resume(struct amdgpu_device *adev, + struct amdgpu_mode_mc_save *save); +int gmc_v7_0_mc_wait_for_idle(struct amdgpu_device *adev); + +#endif diff --git a/drivers/gpu/drm/amd/amdgpu/kv_dpm.c b/drivers/gpu/drm/amd/amdgpu/kv_dpm.c new file mode 100644 index 000000000000..cd902419e6a1 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/kv_dpm.c @@ -0,0 +1,3336 @@ +/* + * Copyright 2013 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#include "drmP.h" +#include "amdgpu.h" +#include "amdgpu_pm.h" +#include "cikd.h" +#include "atom.h" +#include "amdgpu_atombios.h" +#include "amdgpu_dpm.h" +#include "kv_dpm.h" +#include "gfx_v7_0.h" +#include + +#include "smu/smu_7_0_0_d.h" +#include "smu/smu_7_0_0_sh_mask.h" + +#include "gca/gfx_7_2_d.h" +#include "gca/gfx_7_2_sh_mask.h" + +#define KV_MAX_DEEPSLEEP_DIVIDER_ID 5 +#define KV_MINIMUM_ENGINE_CLOCK 800 +#define SMC_RAM_END 0x40000 + +static void kv_dpm_set_dpm_funcs(struct amdgpu_device *adev); +static void kv_dpm_set_irq_funcs(struct amdgpu_device *adev); +static int kv_enable_nb_dpm(struct amdgpu_device *adev, + bool enable); +static void kv_init_graphics_levels(struct amdgpu_device *adev); +static int kv_calculate_ds_divider(struct amdgpu_device *adev); +static int kv_calculate_nbps_level_settings(struct amdgpu_device *adev); +static int kv_calculate_dpm_settings(struct amdgpu_device *adev); +static void kv_enable_new_levels(struct amdgpu_device *adev); +static void kv_program_nbps_index_settings(struct amdgpu_device *adev, + struct amdgpu_ps *new_rps); +static int kv_set_enabled_level(struct amdgpu_device *adev, u32 level); +static int kv_set_enabled_levels(struct amdgpu_device *adev); +static int kv_force_dpm_highest(struct amdgpu_device *adev); +static int kv_force_dpm_lowest(struct amdgpu_device *adev); +static void kv_apply_state_adjust_rules(struct amdgpu_device *adev, + struct amdgpu_ps *new_rps, + struct amdgpu_ps *old_rps); +static int kv_set_thermal_temperature_range(struct amdgpu_device *adev, + int min_temp, int max_temp); +static int kv_init_fps_limits(struct amdgpu_device *adev); + +static void kv_dpm_powergate_uvd(struct amdgpu_device *adev, bool gate); +static void kv_dpm_powergate_vce(struct amdgpu_device *adev, bool gate); +static void kv_dpm_powergate_samu(struct amdgpu_device *adev, bool gate); +static void kv_dpm_powergate_acp(struct amdgpu_device *adev, bool gate); + + +static u32 kv_convert_vid2_to_vid7(struct amdgpu_device *adev, + struct sumo_vid_mapping_table *vid_mapping_table, + u32 vid_2bit) +{ + struct amdgpu_clock_voltage_dependency_table *vddc_sclk_table = + &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk; + u32 i; + + if (vddc_sclk_table && vddc_sclk_table->count) { + if (vid_2bit < vddc_sclk_table->count) + return vddc_sclk_table->entries[vid_2bit].v; + else + return vddc_sclk_table->entries[vddc_sclk_table->count - 1].v; + } else { + for (i = 0; i < vid_mapping_table->num_entries; i++) { + if (vid_mapping_table->entries[i].vid_2bit == vid_2bit) + return vid_mapping_table->entries[i].vid_7bit; + } + return vid_mapping_table->entries[vid_mapping_table->num_entries - 1].vid_7bit; + } +} + +static u32 kv_convert_vid7_to_vid2(struct amdgpu_device *adev, + struct sumo_vid_mapping_table *vid_mapping_table, + u32 vid_7bit) +{ + struct amdgpu_clock_voltage_dependency_table *vddc_sclk_table = + &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk; + u32 i; + + if (vddc_sclk_table && vddc_sclk_table->count) { + for (i = 0; i < vddc_sclk_table->count; i++) { + if (vddc_sclk_table->entries[i].v == vid_7bit) + return i; + } + return vddc_sclk_table->count - 1; + } else { + for (i = 0; i < vid_mapping_table->num_entries; i++) { + if (vid_mapping_table->entries[i].vid_7bit == vid_7bit) + return vid_mapping_table->entries[i].vid_2bit; + } + + return vid_mapping_table->entries[vid_mapping_table->num_entries - 1].vid_2bit; + } +} + +static void sumo_take_smu_control(struct amdgpu_device *adev, bool enable) +{ +/* This bit selects who handles display phy powergating. + * Clear the bit to let atom handle it. + * Set it to let the driver handle it. + * For now we just let atom handle it. + */ +#if 0 + u32 v = RREG32(mmDOUT_SCRATCH3); + + if (enable) + v |= 0x4; + else + v &= 0xFFFFFFFB; + + WREG32(mmDOUT_SCRATCH3, v); +#endif +} + +static u32 sumo_get_sleep_divider_from_id(u32 id) +{ + return 1 << id; +} + +static void sumo_construct_sclk_voltage_mapping_table(struct amdgpu_device *adev, + struct sumo_sclk_voltage_mapping_table *sclk_voltage_mapping_table, + ATOM_AVAILABLE_SCLK_LIST *table) +{ + u32 i; + u32 n = 0; + u32 prev_sclk = 0; + + for (i = 0; i < SUMO_MAX_HARDWARE_POWERLEVELS; i++) { + if (table[i].ulSupportedSCLK > prev_sclk) { + sclk_voltage_mapping_table->entries[n].sclk_frequency = + table[i].ulSupportedSCLK; + sclk_voltage_mapping_table->entries[n].vid_2bit = + table[i].usVoltageIndex; + prev_sclk = table[i].ulSupportedSCLK; + n++; + } + } + + sclk_voltage_mapping_table->num_max_dpm_entries = n; +} + +static void sumo_construct_vid_mapping_table(struct amdgpu_device *adev, + struct sumo_vid_mapping_table *vid_mapping_table, + ATOM_AVAILABLE_SCLK_LIST *table) +{ + u32 i, j; + + for (i = 0; i < SUMO_MAX_HARDWARE_POWERLEVELS; i++) { + if (table[i].ulSupportedSCLK != 0) { + vid_mapping_table->entries[table[i].usVoltageIndex].vid_7bit = + table[i].usVoltageID; + vid_mapping_table->entries[table[i].usVoltageIndex].vid_2bit = + table[i].usVoltageIndex; + } + } + + for (i = 0; i < SUMO_MAX_NUMBER_VOLTAGES; i++) { + if (vid_mapping_table->entries[i].vid_7bit == 0) { + for (j = i + 1; j < SUMO_MAX_NUMBER_VOLTAGES; j++) { + if (vid_mapping_table->entries[j].vid_7bit != 0) { + vid_mapping_table->entries[i] = + vid_mapping_table->entries[j]; + vid_mapping_table->entries[j].vid_7bit = 0; + break; + } + } + + if (j == SUMO_MAX_NUMBER_VOLTAGES) + break; + } + } + + vid_mapping_table->num_entries = i; +} + +static const struct kv_lcac_config_values sx_local_cac_cfg_kv[] = +{ + { 0, 4, 1 }, + { 1, 4, 1 }, + { 2, 5, 1 }, + { 3, 4, 2 }, + { 4, 1, 1 }, + { 5, 5, 2 }, + { 6, 6, 1 }, + { 7, 9, 2 }, + { 0xffffffff } +}; + +static const struct kv_lcac_config_values mc0_local_cac_cfg_kv[] = +{ + { 0, 4, 1 }, + { 0xffffffff } +}; + +static const struct kv_lcac_config_values mc1_local_cac_cfg_kv[] = +{ + { 0, 4, 1 }, + { 0xffffffff } +}; + +static const struct kv_lcac_config_values mc2_local_cac_cfg_kv[] = +{ + { 0, 4, 1 }, + { 0xffffffff } +}; + +static const struct kv_lcac_config_values mc3_local_cac_cfg_kv[] = +{ + { 0, 4, 1 }, + { 0xffffffff } +}; + +static const struct kv_lcac_config_values cpl_local_cac_cfg_kv[] = +{ + { 0, 4, 1 }, + { 1, 4, 1 }, + { 2, 5, 1 }, + { 3, 4, 1 }, + { 4, 1, 1 }, + { 5, 5, 1 }, + { 6, 6, 1 }, + { 7, 9, 1 }, + { 8, 4, 1 }, + { 9, 2, 1 }, + { 10, 3, 1 }, + { 11, 6, 1 }, + { 12, 8, 2 }, + { 13, 1, 1 }, + { 14, 2, 1 }, + { 15, 3, 1 }, + { 16, 1, 1 }, + { 17, 4, 1 }, + { 18, 3, 1 }, + { 19, 1, 1 }, + { 20, 8, 1 }, + { 21, 5, 1 }, + { 22, 1, 1 }, + { 23, 1, 1 }, + { 24, 4, 1 }, + { 27, 6, 1 }, + { 28, 1, 1 }, + { 0xffffffff } +}; + +static const struct kv_lcac_config_reg sx0_cac_config_reg[] = +{ + { 0xc0400d00, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 } +}; + +static const struct kv_lcac_config_reg mc0_cac_config_reg[] = +{ + { 0xc0400d30, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 } +}; + +static const struct kv_lcac_config_reg mc1_cac_config_reg[] = +{ + { 0xc0400d3c, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 } +}; + +static const struct kv_lcac_config_reg mc2_cac_config_reg[] = +{ + { 0xc0400d48, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 } +}; + +static const struct kv_lcac_config_reg mc3_cac_config_reg[] = +{ + { 0xc0400d54, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 } +}; + +static const struct kv_lcac_config_reg cpl_cac_config_reg[] = +{ + { 0xc0400d80, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 } +}; + +static const struct kv_pt_config_reg didt_config_kv[] = +{ + { 0x10, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND }, + { 0x10, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND }, + { 0x10, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND }, + { 0x10, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND }, + { 0x11, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND }, + { 0x11, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND }, + { 0x11, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND }, + { 0x11, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND }, + { 0x12, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND }, + { 0x12, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND }, + { 0x12, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND }, + { 0x12, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND }, + { 0x2, 0x00003fff, 0, 0x4, KV_CONFIGREG_DIDT_IND }, + { 0x2, 0x03ff0000, 16, 0x80, KV_CONFIGREG_DIDT_IND }, + { 0x2, 0x78000000, 27, 0x3, KV_CONFIGREG_DIDT_IND }, + { 0x1, 0x0000ffff, 0, 0x3FFF, KV_CONFIGREG_DIDT_IND }, + { 0x1, 0xffff0000, 16, 0x3FFF, KV_CONFIGREG_DIDT_IND }, + { 0x0, 0x00000001, 0, 0x0, KV_CONFIGREG_DIDT_IND }, + { 0x30, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND }, + { 0x30, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND }, + { 0x30, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND }, + { 0x30, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND }, + { 0x31, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND }, + { 0x31, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND }, + { 0x31, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND }, + { 0x31, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND }, + { 0x32, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND }, + { 0x32, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND }, + { 0x32, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND }, + { 0x32, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND }, + { 0x22, 0x00003fff, 0, 0x4, KV_CONFIGREG_DIDT_IND }, + { 0x22, 0x03ff0000, 16, 0x80, KV_CONFIGREG_DIDT_IND }, + { 0x22, 0x78000000, 27, 0x3, KV_CONFIGREG_DIDT_IND }, + { 0x21, 0x0000ffff, 0, 0x3FFF, KV_CONFIGREG_DIDT_IND }, + { 0x21, 0xffff0000, 16, 0x3FFF, KV_CONFIGREG_DIDT_IND }, + { 0x20, 0x00000001, 0, 0x0, KV_CONFIGREG_DIDT_IND }, + { 0x50, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND }, + { 0x50, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND }, + { 0x50, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND }, + { 0x50, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND }, + { 0x51, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND }, + { 0x51, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND }, + { 0x51, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND }, + { 0x51, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND }, + { 0x52, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND }, + { 0x52, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND }, + { 0x52, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND }, + { 0x52, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND }, + { 0x42, 0x00003fff, 0, 0x4, KV_CONFIGREG_DIDT_IND }, + { 0x42, 0x03ff0000, 16, 0x80, KV_CONFIGREG_DIDT_IND }, + { 0x42, 0x78000000, 27, 0x3, KV_CONFIGREG_DIDT_IND }, + { 0x41, 0x0000ffff, 0, 0x3FFF, KV_CONFIGREG_DIDT_IND }, + { 0x41, 0xffff0000, 16, 0x3FFF, KV_CONFIGREG_DIDT_IND }, + { 0x40, 0x00000001, 0, 0x0, KV_CONFIGREG_DIDT_IND }, + { 0x70, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND }, + { 0x70, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND }, + { 0x70, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND }, + { 0x70, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND }, + { 0x71, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND }, + { 0x71, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND }, + { 0x71, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND }, + { 0x71, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND }, + { 0x72, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND }, + { 0x72, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND }, + { 0x72, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND }, + { 0x72, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND }, + { 0x62, 0x00003fff, 0, 0x4, KV_CONFIGREG_DIDT_IND }, + { 0x62, 0x03ff0000, 16, 0x80, KV_CONFIGREG_DIDT_IND }, + { 0x62, 0x78000000, 27, 0x3, KV_CONFIGREG_DIDT_IND }, + { 0x61, 0x0000ffff, 0, 0x3FFF, KV_CONFIGREG_DIDT_IND }, + { 0x61, 0xffff0000, 16, 0x3FFF, KV_CONFIGREG_DIDT_IND }, + { 0x60, 0x00000001, 0, 0x0, KV_CONFIGREG_DIDT_IND }, + { 0xFFFFFFFF } +}; + +static struct kv_ps *kv_get_ps(struct amdgpu_ps *rps) +{ + struct kv_ps *ps = rps->ps_priv; + + return ps; +} + +static struct kv_power_info *kv_get_pi(struct amdgpu_device *adev) +{ + struct kv_power_info *pi = adev->pm.dpm.priv; + + return pi; +} + +#if 0 +static void kv_program_local_cac_table(struct amdgpu_device *adev, + const struct kv_lcac_config_values *local_cac_table, + const struct kv_lcac_config_reg *local_cac_reg) +{ + u32 i, count, data; + const struct kv_lcac_config_values *values = local_cac_table; + + while (values->block_id != 0xffffffff) { + count = values->signal_id; + for (i = 0; i < count; i++) { + data = ((values->block_id << local_cac_reg->block_shift) & + local_cac_reg->block_mask); + data |= ((i << local_cac_reg->signal_shift) & + local_cac_reg->signal_mask); + data |= ((values->t << local_cac_reg->t_shift) & + local_cac_reg->t_mask); + data |= ((1 << local_cac_reg->enable_shift) & + local_cac_reg->enable_mask); + WREG32_SMC(local_cac_reg->cntl, data); + } + values++; + } +} +#endif + +static int kv_program_pt_config_registers(struct amdgpu_device *adev, + const struct kv_pt_config_reg *cac_config_regs) +{ + const struct kv_pt_config_reg *config_regs = cac_config_regs; + u32 data; + u32 cache = 0; + + if (config_regs == NULL) + return -EINVAL; + + while (config_regs->offset != 0xFFFFFFFF) { + if (config_regs->type == KV_CONFIGREG_CACHE) { + cache |= ((config_regs->value << config_regs->shift) & config_regs->mask); + } else { + switch (config_regs->type) { + case KV_CONFIGREG_SMC_IND: + data = RREG32_SMC(config_regs->offset); + break; + case KV_CONFIGREG_DIDT_IND: + data = RREG32_DIDT(config_regs->offset); + break; + default: + data = RREG32(config_regs->offset); + break; + } + + data &= ~config_regs->mask; + data |= ((config_regs->value << config_regs->shift) & config_regs->mask); + data |= cache; + cache = 0; + + switch (config_regs->type) { + case KV_CONFIGREG_SMC_IND: + WREG32_SMC(config_regs->offset, data); + break; + case KV_CONFIGREG_DIDT_IND: + WREG32_DIDT(config_regs->offset, data); + break; + default: + WREG32(config_regs->offset, data); + break; + } + } + config_regs++; + } + + return 0; +} + +static void kv_do_enable_didt(struct amdgpu_device *adev, bool enable) +{ + struct kv_power_info *pi = kv_get_pi(adev); + u32 data; + + if (pi->caps_sq_ramping) { + data = RREG32_DIDT(ixDIDT_SQ_CTRL0); + if (enable) + data |= DIDT_SQ_CTRL0__DIDT_CTRL_EN_MASK; + else + data &= ~DIDT_SQ_CTRL0__DIDT_CTRL_EN_MASK; + WREG32_DIDT(ixDIDT_SQ_CTRL0, data); + } + + if (pi->caps_db_ramping) { + data = RREG32_DIDT(ixDIDT_DB_CTRL0); + if (enable) + data |= DIDT_DB_CTRL0__DIDT_CTRL_EN_MASK; + else + data &= ~DIDT_DB_CTRL0__DIDT_CTRL_EN_MASK; + WREG32_DIDT(ixDIDT_DB_CTRL0, data); + } + + if (pi->caps_td_ramping) { + data = RREG32_DIDT(ixDIDT_TD_CTRL0); + if (enable) + data |= DIDT_TD_CTRL0__DIDT_CTRL_EN_MASK; + else + data &= ~DIDT_TD_CTRL0__DIDT_CTRL_EN_MASK; + WREG32_DIDT(ixDIDT_TD_CTRL0, data); + } + + if (pi->caps_tcp_ramping) { + data = RREG32_DIDT(ixDIDT_TCP_CTRL0); + if (enable) + data |= DIDT_TCP_CTRL0__DIDT_CTRL_EN_MASK; + else + data &= ~DIDT_TCP_CTRL0__DIDT_CTRL_EN_MASK; + WREG32_DIDT(ixDIDT_TCP_CTRL0, data); + } +} + +static int kv_enable_didt(struct amdgpu_device *adev, bool enable) +{ + struct kv_power_info *pi = kv_get_pi(adev); + int ret; + + if (pi->caps_sq_ramping || + pi->caps_db_ramping || + pi->caps_td_ramping || + pi->caps_tcp_ramping) { + gfx_v7_0_enter_rlc_safe_mode(adev); + + if (enable) { + ret = kv_program_pt_config_registers(adev, didt_config_kv); + if (ret) { + gfx_v7_0_exit_rlc_safe_mode(adev); + return ret; + } + } + + kv_do_enable_didt(adev, enable); + + gfx_v7_0_exit_rlc_safe_mode(adev); + } + + return 0; +} + +#if 0 +static void kv_initialize_hardware_cac_manager(struct amdgpu_device *adev) +{ + struct kv_power_info *pi = kv_get_pi(adev); + + if (pi->caps_cac) { + WREG32_SMC(ixLCAC_SX0_OVR_SEL, 0); + WREG32_SMC(ixLCAC_SX0_OVR_VAL, 0); + kv_program_local_cac_table(adev, sx_local_cac_cfg_kv, sx0_cac_config_reg); + + WREG32_SMC(ixLCAC_MC0_OVR_SEL, 0); + WREG32_SMC(ixLCAC_MC0_OVR_VAL, 0); + kv_program_local_cac_table(adev, mc0_local_cac_cfg_kv, mc0_cac_config_reg); + + WREG32_SMC(ixLCAC_MC1_OVR_SEL, 0); + WREG32_SMC(ixLCAC_MC1_OVR_VAL, 0); + kv_program_local_cac_table(adev, mc1_local_cac_cfg_kv, mc1_cac_config_reg); + + WREG32_SMC(ixLCAC_MC2_OVR_SEL, 0); + WREG32_SMC(ixLCAC_MC2_OVR_VAL, 0); + kv_program_local_cac_table(adev, mc2_local_cac_cfg_kv, mc2_cac_config_reg); + + WREG32_SMC(ixLCAC_MC3_OVR_SEL, 0); + WREG32_SMC(ixLCAC_MC3_OVR_VAL, 0); + kv_program_local_cac_table(adev, mc3_local_cac_cfg_kv, mc3_cac_config_reg); + + WREG32_SMC(ixLCAC_CPL_OVR_SEL, 0); + WREG32_SMC(ixLCAC_CPL_OVR_VAL, 0); + kv_program_local_cac_table(adev, cpl_local_cac_cfg_kv, cpl_cac_config_reg); + } +} +#endif + +static int kv_enable_smc_cac(struct amdgpu_device *adev, bool enable) +{ + struct kv_power_info *pi = kv_get_pi(adev); + int ret = 0; + + if (pi->caps_cac) { + if (enable) { + ret = amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_EnableCac); + if (ret) + pi->cac_enabled = false; + else + pi->cac_enabled = true; + } else if (pi->cac_enabled) { + amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_DisableCac); + pi->cac_enabled = false; + } + } + + return ret; +} + +static int kv_process_firmware_header(struct amdgpu_device *adev) +{ + struct kv_power_info *pi = kv_get_pi(adev); + u32 tmp; + int ret; + + ret = amdgpu_kv_read_smc_sram_dword(adev, SMU7_FIRMWARE_HEADER_LOCATION + + offsetof(SMU7_Firmware_Header, DpmTable), + &tmp, pi->sram_end); + + if (ret == 0) + pi->dpm_table_start = tmp; + + ret = amdgpu_kv_read_smc_sram_dword(adev, SMU7_FIRMWARE_HEADER_LOCATION + + offsetof(SMU7_Firmware_Header, SoftRegisters), + &tmp, pi->sram_end); + + if (ret == 0) + pi->soft_regs_start = tmp; + + return ret; +} + +static int kv_enable_dpm_voltage_scaling(struct amdgpu_device *adev) +{ + struct kv_power_info *pi = kv_get_pi(adev); + int ret; + + pi->graphics_voltage_change_enable = 1; + + ret = amdgpu_kv_copy_bytes_to_smc(adev, + pi->dpm_table_start + + offsetof(SMU7_Fusion_DpmTable, GraphicsVoltageChangeEnable), + &pi->graphics_voltage_change_enable, + sizeof(u8), pi->sram_end); + + return ret; +} + +static int kv_set_dpm_interval(struct amdgpu_device *adev) +{ + struct kv_power_info *pi = kv_get_pi(adev); + int ret; + + pi->graphics_interval = 1; + + ret = amdgpu_kv_copy_bytes_to_smc(adev, + pi->dpm_table_start + + offsetof(SMU7_Fusion_DpmTable, GraphicsInterval), + &pi->graphics_interval, + sizeof(u8), pi->sram_end); + + return ret; +} + +static int kv_set_dpm_boot_state(struct amdgpu_device *adev) +{ + struct kv_power_info *pi = kv_get_pi(adev); + int ret; + + ret = amdgpu_kv_copy_bytes_to_smc(adev, + pi->dpm_table_start + + offsetof(SMU7_Fusion_DpmTable, GraphicsBootLevel), + &pi->graphics_boot_level, + sizeof(u8), pi->sram_end); + + return ret; +} + +static void kv_program_vc(struct amdgpu_device *adev) +{ + WREG32_SMC(ixCG_FREQ_TRAN_VOTING_0, 0x3FFFC100); +} + +static void kv_clear_vc(struct amdgpu_device *adev) +{ + WREG32_SMC(ixCG_FREQ_TRAN_VOTING_0, 0); +} + +static int kv_set_divider_value(struct amdgpu_device *adev, + u32 index, u32 sclk) +{ + struct kv_power_info *pi = kv_get_pi(adev); + struct atom_clock_dividers dividers; + int ret; + + ret = amdgpu_atombios_get_clock_dividers(adev, COMPUTE_ENGINE_PLL_PARAM, + sclk, false, ÷rs); + if (ret) + return ret; + + pi->graphics_level[index].SclkDid = (u8)dividers.post_div; + pi->graphics_level[index].SclkFrequency = cpu_to_be32(sclk); + + return 0; +} + +static u16 kv_convert_8bit_index_to_voltage(struct amdgpu_device *adev, + u16 voltage) +{ + return 6200 - (voltage * 25); +} + +static u16 kv_convert_2bit_index_to_voltage(struct amdgpu_device *adev, + u32 vid_2bit) +{ + struct kv_power_info *pi = kv_get_pi(adev); + u32 vid_8bit = kv_convert_vid2_to_vid7(adev, + &pi->sys_info.vid_mapping_table, + vid_2bit); + + return kv_convert_8bit_index_to_voltage(adev, (u16)vid_8bit); +} + + +static int kv_set_vid(struct amdgpu_device *adev, u32 index, u32 vid) +{ + struct kv_power_info *pi = kv_get_pi(adev); + + pi->graphics_level[index].VoltageDownH = (u8)pi->voltage_drop_t; + pi->graphics_level[index].MinVddNb = + cpu_to_be32(kv_convert_2bit_index_to_voltage(adev, vid)); + + return 0; +} + +static int kv_set_at(struct amdgpu_device *adev, u32 index, u32 at) +{ + struct kv_power_info *pi = kv_get_pi(adev); + + pi->graphics_level[index].AT = cpu_to_be16((u16)at); + + return 0; +} + +static void kv_dpm_power_level_enable(struct amdgpu_device *adev, + u32 index, bool enable) +{ + struct kv_power_info *pi = kv_get_pi(adev); + + pi->graphics_level[index].EnabledForActivity = enable ? 1 : 0; +} + +static void kv_start_dpm(struct amdgpu_device *adev) +{ + u32 tmp = RREG32_SMC(ixGENERAL_PWRMGT); + + tmp |= GENERAL_PWRMGT__GLOBAL_PWRMGT_EN_MASK; + WREG32_SMC(ixGENERAL_PWRMGT, tmp); + + amdgpu_kv_smc_dpm_enable(adev, true); +} + +static void kv_stop_dpm(struct amdgpu_device *adev) +{ + amdgpu_kv_smc_dpm_enable(adev, false); +} + +static void kv_start_am(struct amdgpu_device *adev) +{ + u32 sclk_pwrmgt_cntl = RREG32_SMC(ixSCLK_PWRMGT_CNTL); + + sclk_pwrmgt_cntl &= ~(SCLK_PWRMGT_CNTL__RESET_SCLK_CNT_MASK | + SCLK_PWRMGT_CNTL__RESET_BUSY_CNT_MASK); + sclk_pwrmgt_cntl |= SCLK_PWRMGT_CNTL__DYNAMIC_PM_EN_MASK; + + WREG32_SMC(ixSCLK_PWRMGT_CNTL, sclk_pwrmgt_cntl); +} + +static void kv_reset_am(struct amdgpu_device *adev) +{ + u32 sclk_pwrmgt_cntl = RREG32_SMC(ixSCLK_PWRMGT_CNTL); + + sclk_pwrmgt_cntl |= (SCLK_PWRMGT_CNTL__RESET_SCLK_CNT_MASK | + SCLK_PWRMGT_CNTL__RESET_BUSY_CNT_MASK); + + WREG32_SMC(ixSCLK_PWRMGT_CNTL, sclk_pwrmgt_cntl); +} + +static int kv_freeze_sclk_dpm(struct amdgpu_device *adev, bool freeze) +{ + return amdgpu_kv_notify_message_to_smu(adev, freeze ? + PPSMC_MSG_SCLKDPM_FreezeLevel : PPSMC_MSG_SCLKDPM_UnfreezeLevel); +} + +static int kv_force_lowest_valid(struct amdgpu_device *adev) +{ + return kv_force_dpm_lowest(adev); +} + +static int kv_unforce_levels(struct amdgpu_device *adev) +{ + if (adev->asic_type == CHIP_KABINI || adev->asic_type == CHIP_MULLINS) + return amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_NoForcedLevel); + else + return kv_set_enabled_levels(adev); +} + +static int kv_update_sclk_t(struct amdgpu_device *adev) +{ + struct kv_power_info *pi = kv_get_pi(adev); + u32 low_sclk_interrupt_t = 0; + int ret = 0; + + if (pi->caps_sclk_throttle_low_notification) { + low_sclk_interrupt_t = cpu_to_be32(pi->low_sclk_interrupt_t); + + ret = amdgpu_kv_copy_bytes_to_smc(adev, + pi->dpm_table_start + + offsetof(SMU7_Fusion_DpmTable, LowSclkInterruptT), + (u8 *)&low_sclk_interrupt_t, + sizeof(u32), pi->sram_end); + } + return ret; +} + +static int kv_program_bootup_state(struct amdgpu_device *adev) +{ + struct kv_power_info *pi = kv_get_pi(adev); + u32 i; + struct amdgpu_clock_voltage_dependency_table *table = + &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk; + + if (table && table->count) { + for (i = pi->graphics_dpm_level_count - 1; i > 0; i--) { + if (table->entries[i].clk == pi->boot_pl.sclk) + break; + } + + pi->graphics_boot_level = (u8)i; + kv_dpm_power_level_enable(adev, i, true); + } else { + struct sumo_sclk_voltage_mapping_table *table = + &pi->sys_info.sclk_voltage_mapping_table; + + if (table->num_max_dpm_entries == 0) + return -EINVAL; + + for (i = pi->graphics_dpm_level_count - 1; i > 0; i--) { + if (table->entries[i].sclk_frequency == pi->boot_pl.sclk) + break; + } + + pi->graphics_boot_level = (u8)i; + kv_dpm_power_level_enable(adev, i, true); + } + return 0; +} + +static int kv_enable_auto_thermal_throttling(struct amdgpu_device *adev) +{ + struct kv_power_info *pi = kv_get_pi(adev); + int ret; + + pi->graphics_therm_throttle_enable = 1; + + ret = amdgpu_kv_copy_bytes_to_smc(adev, + pi->dpm_table_start + + offsetof(SMU7_Fusion_DpmTable, GraphicsThermThrottleEnable), + &pi->graphics_therm_throttle_enable, + sizeof(u8), pi->sram_end); + + return ret; +} + +static int kv_upload_dpm_settings(struct amdgpu_device *adev) +{ + struct kv_power_info *pi = kv_get_pi(adev); + int ret; + + ret = amdgpu_kv_copy_bytes_to_smc(adev, + pi->dpm_table_start + + offsetof(SMU7_Fusion_DpmTable, GraphicsLevel), + (u8 *)&pi->graphics_level, + sizeof(SMU7_Fusion_GraphicsLevel) * SMU7_MAX_LEVELS_GRAPHICS, + pi->sram_end); + + if (ret) + return ret; + + ret = amdgpu_kv_copy_bytes_to_smc(adev, + pi->dpm_table_start + + offsetof(SMU7_Fusion_DpmTable, GraphicsDpmLevelCount), + &pi->graphics_dpm_level_count, + sizeof(u8), pi->sram_end); + + return ret; +} + +static u32 kv_get_clock_difference(u32 a, u32 b) +{ + return (a >= b) ? a - b : b - a; +} + +static u32 kv_get_clk_bypass(struct amdgpu_device *adev, u32 clk) +{ + struct kv_power_info *pi = kv_get_pi(adev); + u32 value; + + if (pi->caps_enable_dfs_bypass) { + if (kv_get_clock_difference(clk, 40000) < 200) + value = 3; + else if (kv_get_clock_difference(clk, 30000) < 200) + value = 2; + else if (kv_get_clock_difference(clk, 20000) < 200) + value = 7; + else if (kv_get_clock_difference(clk, 15000) < 200) + value = 6; + else if (kv_get_clock_difference(clk, 10000) < 200) + value = 8; + else + value = 0; + } else { + value = 0; + } + + return value; +} + +static int kv_populate_uvd_table(struct amdgpu_device *adev) +{ + struct kv_power_info *pi = kv_get_pi(adev); + struct amdgpu_uvd_clock_voltage_dependency_table *table = + &adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table; + struct atom_clock_dividers dividers; + int ret; + u32 i; + + if (table == NULL || table->count == 0) + return 0; + + pi->uvd_level_count = 0; + for (i = 0; i < table->count; i++) { + if (pi->high_voltage_t && + (pi->high_voltage_t < table->entries[i].v)) + break; + + pi->uvd_level[i].VclkFrequency = cpu_to_be32(table->entries[i].vclk); + pi->uvd_level[i].DclkFrequency = cpu_to_be32(table->entries[i].dclk); + pi->uvd_level[i].MinVddNb = cpu_to_be16(table->entries[i].v); + + pi->uvd_level[i].VClkBypassCntl = + (u8)kv_get_clk_bypass(adev, table->entries[i].vclk); + pi->uvd_level[i].DClkBypassCntl = + (u8)kv_get_clk_bypass(adev, table->entries[i].dclk); + + ret = amdgpu_atombios_get_clock_dividers(adev, COMPUTE_ENGINE_PLL_PARAM, + table->entries[i].vclk, false, ÷rs); + if (ret) + return ret; + pi->uvd_level[i].VclkDivider = (u8)dividers.post_div; + + ret = amdgpu_atombios_get_clock_dividers(adev, COMPUTE_ENGINE_PLL_PARAM, + table->entries[i].dclk, false, ÷rs); + if (ret) + return ret; + pi->uvd_level[i].DclkDivider = (u8)dividers.post_div; + + pi->uvd_level_count++; + } + + ret = amdgpu_kv_copy_bytes_to_smc(adev, + pi->dpm_table_start + + offsetof(SMU7_Fusion_DpmTable, UvdLevelCount), + (u8 *)&pi->uvd_level_count, + sizeof(u8), pi->sram_end); + if (ret) + return ret; + + pi->uvd_interval = 1; + + ret = amdgpu_kv_copy_bytes_to_smc(adev, + pi->dpm_table_start + + offsetof(SMU7_Fusion_DpmTable, UVDInterval), + &pi->uvd_interval, + sizeof(u8), pi->sram_end); + if (ret) + return ret; + + ret = amdgpu_kv_copy_bytes_to_smc(adev, + pi->dpm_table_start + + offsetof(SMU7_Fusion_DpmTable, UvdLevel), + (u8 *)&pi->uvd_level, + sizeof(SMU7_Fusion_UvdLevel) * SMU7_MAX_LEVELS_UVD, + pi->sram_end); + + return ret; + +} + +static int kv_populate_vce_table(struct amdgpu_device *adev) +{ + struct kv_power_info *pi = kv_get_pi(adev); + int ret; + u32 i; + struct amdgpu_vce_clock_voltage_dependency_table *table = + &adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table; + struct atom_clock_dividers dividers; + + if (table == NULL || table->count == 0) + return 0; + + pi->vce_level_count = 0; + for (i = 0; i < table->count; i++) { + if (pi->high_voltage_t && + pi->high_voltage_t < table->entries[i].v) + break; + + pi->vce_level[i].Frequency = cpu_to_be32(table->entries[i].evclk); + pi->vce_level[i].MinVoltage = cpu_to_be16(table->entries[i].v); + + pi->vce_level[i].ClkBypassCntl = + (u8)kv_get_clk_bypass(adev, table->entries[i].evclk); + + ret = amdgpu_atombios_get_clock_dividers(adev, COMPUTE_ENGINE_PLL_PARAM, + table->entries[i].evclk, false, ÷rs); + if (ret) + return ret; + pi->vce_level[i].Divider = (u8)dividers.post_div; + + pi->vce_level_count++; + } + + ret = amdgpu_kv_copy_bytes_to_smc(adev, + pi->dpm_table_start + + offsetof(SMU7_Fusion_DpmTable, VceLevelCount), + (u8 *)&pi->vce_level_count, + sizeof(u8), + pi->sram_end); + if (ret) + return ret; + + pi->vce_interval = 1; + + ret = amdgpu_kv_copy_bytes_to_smc(adev, + pi->dpm_table_start + + offsetof(SMU7_Fusion_DpmTable, VCEInterval), + (u8 *)&pi->vce_interval, + sizeof(u8), + pi->sram_end); + if (ret) + return ret; + + ret = amdgpu_kv_copy_bytes_to_smc(adev, + pi->dpm_table_start + + offsetof(SMU7_Fusion_DpmTable, VceLevel), + (u8 *)&pi->vce_level, + sizeof(SMU7_Fusion_ExtClkLevel) * SMU7_MAX_LEVELS_VCE, + pi->sram_end); + + return ret; +} + +static int kv_populate_samu_table(struct amdgpu_device *adev) +{ + struct kv_power_info *pi = kv_get_pi(adev); + struct amdgpu_clock_voltage_dependency_table *table = + &adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table; + struct atom_clock_dividers dividers; + int ret; + u32 i; + + if (table == NULL || table->count == 0) + return 0; + + pi->samu_level_count = 0; + for (i = 0; i < table->count; i++) { + if (pi->high_voltage_t && + pi->high_voltage_t < table->entries[i].v) + break; + + pi->samu_level[i].Frequency = cpu_to_be32(table->entries[i].clk); + pi->samu_level[i].MinVoltage = cpu_to_be16(table->entries[i].v); + + pi->samu_level[i].ClkBypassCntl = + (u8)kv_get_clk_bypass(adev, table->entries[i].clk); + + ret = amdgpu_atombios_get_clock_dividers(adev, COMPUTE_ENGINE_PLL_PARAM, + table->entries[i].clk, false, ÷rs); + if (ret) + return ret; + pi->samu_level[i].Divider = (u8)dividers.post_div; + + pi->samu_level_count++; + } + + ret = amdgpu_kv_copy_bytes_to_smc(adev, + pi->dpm_table_start + + offsetof(SMU7_Fusion_DpmTable, SamuLevelCount), + (u8 *)&pi->samu_level_count, + sizeof(u8), + pi->sram_end); + if (ret) + return ret; + + pi->samu_interval = 1; + + ret = amdgpu_kv_copy_bytes_to_smc(adev, + pi->dpm_table_start + + offsetof(SMU7_Fusion_DpmTable, SAMUInterval), + (u8 *)&pi->samu_interval, + sizeof(u8), + pi->sram_end); + if (ret) + return ret; + + ret = amdgpu_kv_copy_bytes_to_smc(adev, + pi->dpm_table_start + + offsetof(SMU7_Fusion_DpmTable, SamuLevel), + (u8 *)&pi->samu_level, + sizeof(SMU7_Fusion_ExtClkLevel) * SMU7_MAX_LEVELS_SAMU, + pi->sram_end); + if (ret) + return ret; + + return ret; +} + + +static int kv_populate_acp_table(struct amdgpu_device *adev) +{ + struct kv_power_info *pi = kv_get_pi(adev); + struct amdgpu_clock_voltage_dependency_table *table = + &adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table; + struct atom_clock_dividers dividers; + int ret; + u32 i; + + if (table == NULL || table->count == 0) + return 0; + + pi->acp_level_count = 0; + for (i = 0; i < table->count; i++) { + pi->acp_level[i].Frequency = cpu_to_be32(table->entries[i].clk); + pi->acp_level[i].MinVoltage = cpu_to_be16(table->entries[i].v); + + ret = amdgpu_atombios_get_clock_dividers(adev, COMPUTE_ENGINE_PLL_PARAM, + table->entries[i].clk, false, ÷rs); + if (ret) + return ret; + pi->acp_level[i].Divider = (u8)dividers.post_div; + + pi->acp_level_count++; + } + + ret = amdgpu_kv_copy_bytes_to_smc(adev, + pi->dpm_table_start + + offsetof(SMU7_Fusion_DpmTable, AcpLevelCount), + (u8 *)&pi->acp_level_count, + sizeof(u8), + pi->sram_end); + if (ret) + return ret; + + pi->acp_interval = 1; + + ret = amdgpu_kv_copy_bytes_to_smc(adev, + pi->dpm_table_start + + offsetof(SMU7_Fusion_DpmTable, ACPInterval), + (u8 *)&pi->acp_interval, + sizeof(u8), + pi->sram_end); + if (ret) + return ret; + + ret = amdgpu_kv_copy_bytes_to_smc(adev, + pi->dpm_table_start + + offsetof(SMU7_Fusion_DpmTable, AcpLevel), + (u8 *)&pi->acp_level, + sizeof(SMU7_Fusion_ExtClkLevel) * SMU7_MAX_LEVELS_ACP, + pi->sram_end); + if (ret) + return ret; + + return ret; +} + +static void kv_calculate_dfs_bypass_settings(struct amdgpu_device *adev) +{ + struct kv_power_info *pi = kv_get_pi(adev); + u32 i; + struct amdgpu_clock_voltage_dependency_table *table = + &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk; + + if (table && table->count) { + for (i = 0; i < pi->graphics_dpm_level_count; i++) { + if (pi->caps_enable_dfs_bypass) { + if (kv_get_clock_difference(table->entries[i].clk, 40000) < 200) + pi->graphics_level[i].ClkBypassCntl = 3; + else if (kv_get_clock_difference(table->entries[i].clk, 30000) < 200) + pi->graphics_level[i].ClkBypassCntl = 2; + else if (kv_get_clock_difference(table->entries[i].clk, 26600) < 200) + pi->graphics_level[i].ClkBypassCntl = 7; + else if (kv_get_clock_difference(table->entries[i].clk , 20000) < 200) + pi->graphics_level[i].ClkBypassCntl = 6; + else if (kv_get_clock_difference(table->entries[i].clk , 10000) < 200) + pi->graphics_level[i].ClkBypassCntl = 8; + else + pi->graphics_level[i].ClkBypassCntl = 0; + } else { + pi->graphics_level[i].ClkBypassCntl = 0; + } + } + } else { + struct sumo_sclk_voltage_mapping_table *table = + &pi->sys_info.sclk_voltage_mapping_table; + for (i = 0; i < pi->graphics_dpm_level_count; i++) { + if (pi->caps_enable_dfs_bypass) { + if (kv_get_clock_difference(table->entries[i].sclk_frequency, 40000) < 200) + pi->graphics_level[i].ClkBypassCntl = 3; + else if (kv_get_clock_difference(table->entries[i].sclk_frequency, 30000) < 200) + pi->graphics_level[i].ClkBypassCntl = 2; + else if (kv_get_clock_difference(table->entries[i].sclk_frequency, 26600) < 200) + pi->graphics_level[i].ClkBypassCntl = 7; + else if (kv_get_clock_difference(table->entries[i].sclk_frequency, 20000) < 200) + pi->graphics_level[i].ClkBypassCntl = 6; + else if (kv_get_clock_difference(table->entries[i].sclk_frequency, 10000) < 200) + pi->graphics_level[i].ClkBypassCntl = 8; + else + pi->graphics_level[i].ClkBypassCntl = 0; + } else { + pi->graphics_level[i].ClkBypassCntl = 0; + } + } + } +} + +static int kv_enable_ulv(struct amdgpu_device *adev, bool enable) +{ + return amdgpu_kv_notify_message_to_smu(adev, enable ? + PPSMC_MSG_EnableULV : PPSMC_MSG_DisableULV); +} + +static void kv_reset_acp_boot_level(struct amdgpu_device *adev) +{ + struct kv_power_info *pi = kv_get_pi(adev); + + pi->acp_boot_level = 0xff; +} + +static void kv_update_current_ps(struct amdgpu_device *adev, + struct amdgpu_ps *rps) +{ + struct kv_ps *new_ps = kv_get_ps(rps); + struct kv_power_info *pi = kv_get_pi(adev); + + pi->current_rps = *rps; + pi->current_ps = *new_ps; + pi->current_rps.ps_priv = &pi->current_ps; +} + +static void kv_update_requested_ps(struct amdgpu_device *adev, + struct amdgpu_ps *rps) +{ + struct kv_ps *new_ps = kv_get_ps(rps); + struct kv_power_info *pi = kv_get_pi(adev); + + pi->requested_rps = *rps; + pi->requested_ps = *new_ps; + pi->requested_rps.ps_priv = &pi->requested_ps; +} + +static void kv_dpm_enable_bapm(struct amdgpu_device *adev, bool enable) +{ + struct kv_power_info *pi = kv_get_pi(adev); + int ret; + + if (pi->bapm_enable) { + ret = amdgpu_kv_smc_bapm_enable(adev, enable); + if (ret) + DRM_ERROR("amdgpu_kv_smc_bapm_enable failed\n"); + } +} + +static int kv_dpm_enable(struct amdgpu_device *adev) +{ + struct kv_power_info *pi = kv_get_pi(adev); + int ret; + + ret = kv_process_firmware_header(adev); + if (ret) { + DRM_ERROR("kv_process_firmware_header failed\n"); + return ret; + } + kv_init_fps_limits(adev); + kv_init_graphics_levels(adev); + ret = kv_program_bootup_state(adev); + if (ret) { + DRM_ERROR("kv_program_bootup_state failed\n"); + return ret; + } + kv_calculate_dfs_bypass_settings(adev); + ret = kv_upload_dpm_settings(adev); + if (ret) { + DRM_ERROR("kv_upload_dpm_settings failed\n"); + return ret; + } + ret = kv_populate_uvd_table(adev); + if (ret) { + DRM_ERROR("kv_populate_uvd_table failed\n"); + return ret; + } + ret = kv_populate_vce_table(adev); + if (ret) { + DRM_ERROR("kv_populate_vce_table failed\n"); + return ret; + } + ret = kv_populate_samu_table(adev); + if (ret) { + DRM_ERROR("kv_populate_samu_table failed\n"); + return ret; + } + ret = kv_populate_acp_table(adev); + if (ret) { + DRM_ERROR("kv_populate_acp_table failed\n"); + return ret; + } + kv_program_vc(adev); +#if 0 + kv_initialize_hardware_cac_manager(adev); +#endif + kv_start_am(adev); + if (pi->enable_auto_thermal_throttling) { + ret = kv_enable_auto_thermal_throttling(adev); + if (ret) { + DRM_ERROR("kv_enable_auto_thermal_throttling failed\n"); + return ret; + } + } + ret = kv_enable_dpm_voltage_scaling(adev); + if (ret) { + DRM_ERROR("kv_enable_dpm_voltage_scaling failed\n"); + return ret; + } + ret = kv_set_dpm_interval(adev); + if (ret) { + DRM_ERROR("kv_set_dpm_interval failed\n"); + return ret; + } + ret = kv_set_dpm_boot_state(adev); + if (ret) { + DRM_ERROR("kv_set_dpm_boot_state failed\n"); + return ret; + } + ret = kv_enable_ulv(adev, true); + if (ret) { + DRM_ERROR("kv_enable_ulv failed\n"); + return ret; + } + kv_start_dpm(adev); + ret = kv_enable_didt(adev, true); + if (ret) { + DRM_ERROR("kv_enable_didt failed\n"); + return ret; + } + ret = kv_enable_smc_cac(adev, true); + if (ret) { + DRM_ERROR("kv_enable_smc_cac failed\n"); + return ret; + } + + kv_reset_acp_boot_level(adev); + + ret = amdgpu_kv_smc_bapm_enable(adev, false); + if (ret) { + DRM_ERROR("amdgpu_kv_smc_bapm_enable failed\n"); + return ret; + } + + kv_update_current_ps(adev, adev->pm.dpm.boot_ps); + + if (adev->irq.installed && + amdgpu_is_internal_thermal_sensor(adev->pm.int_thermal_type)) { + ret = kv_set_thermal_temperature_range(adev, KV_TEMP_RANGE_MIN, KV_TEMP_RANGE_MAX); + if (ret) { + DRM_ERROR("kv_set_thermal_temperature_range failed\n"); + return ret; + } + amdgpu_irq_get(adev, &adev->pm.dpm.thermal.irq, + AMDGPU_THERMAL_IRQ_LOW_TO_HIGH); + amdgpu_irq_get(adev, &adev->pm.dpm.thermal.irq, + AMDGPU_THERMAL_IRQ_HIGH_TO_LOW); + } + + return ret; +} + +static void kv_dpm_disable(struct amdgpu_device *adev) +{ + amdgpu_irq_put(adev, &adev->pm.dpm.thermal.irq, + AMDGPU_THERMAL_IRQ_LOW_TO_HIGH); + amdgpu_irq_put(adev, &adev->pm.dpm.thermal.irq, + AMDGPU_THERMAL_IRQ_HIGH_TO_LOW); + + amdgpu_kv_smc_bapm_enable(adev, false); + + if (adev->asic_type == CHIP_MULLINS) + kv_enable_nb_dpm(adev, false); + + /* powerup blocks */ + kv_dpm_powergate_acp(adev, false); + kv_dpm_powergate_samu(adev, false); + kv_dpm_powergate_vce(adev, false); + kv_dpm_powergate_uvd(adev, false); + + kv_enable_smc_cac(adev, false); + kv_enable_didt(adev, false); + kv_clear_vc(adev); + kv_stop_dpm(adev); + kv_enable_ulv(adev, false); + kv_reset_am(adev); + + kv_update_current_ps(adev, adev->pm.dpm.boot_ps); +} + +#if 0 +static int kv_write_smc_soft_register(struct amdgpu_device *adev, + u16 reg_offset, u32 value) +{ + struct kv_power_info *pi = kv_get_pi(adev); + + return amdgpu_kv_copy_bytes_to_smc(adev, pi->soft_regs_start + reg_offset, + (u8 *)&value, sizeof(u16), pi->sram_end); +} + +static int kv_read_smc_soft_register(struct amdgpu_device *adev, + u16 reg_offset, u32 *value) +{ + struct kv_power_info *pi = kv_get_pi(adev); + + return amdgpu_kv_read_smc_sram_dword(adev, pi->soft_regs_start + reg_offset, + value, pi->sram_end); +} +#endif + +static void kv_init_sclk_t(struct amdgpu_device *adev) +{ + struct kv_power_info *pi = kv_get_pi(adev); + + pi->low_sclk_interrupt_t = 0; +} + +static int kv_init_fps_limits(struct amdgpu_device *adev) +{ + struct kv_power_info *pi = kv_get_pi(adev); + int ret = 0; + + if (pi->caps_fps) { + u16 tmp; + + tmp = 45; + pi->fps_high_t = cpu_to_be16(tmp); + ret = amdgpu_kv_copy_bytes_to_smc(adev, + pi->dpm_table_start + + offsetof(SMU7_Fusion_DpmTable, FpsHighT), + (u8 *)&pi->fps_high_t, + sizeof(u16), pi->sram_end); + + tmp = 30; + pi->fps_low_t = cpu_to_be16(tmp); + + ret = amdgpu_kv_copy_bytes_to_smc(adev, + pi->dpm_table_start + + offsetof(SMU7_Fusion_DpmTable, FpsLowT), + (u8 *)&pi->fps_low_t, + sizeof(u16), pi->sram_end); + + } + return ret; +} + +static void kv_init_powergate_state(struct amdgpu_device *adev) +{ + struct kv_power_info *pi = kv_get_pi(adev); + + pi->uvd_power_gated = false; + pi->vce_power_gated = false; + pi->samu_power_gated = false; + pi->acp_power_gated = false; + +} + +static int kv_enable_uvd_dpm(struct amdgpu_device *adev, bool enable) +{ + return amdgpu_kv_notify_message_to_smu(adev, enable ? + PPSMC_MSG_UVDDPM_Enable : PPSMC_MSG_UVDDPM_Disable); +} + +static int kv_enable_vce_dpm(struct amdgpu_device *adev, bool enable) +{ + return amdgpu_kv_notify_message_to_smu(adev, enable ? + PPSMC_MSG_VCEDPM_Enable : PPSMC_MSG_VCEDPM_Disable); +} + +static int kv_enable_samu_dpm(struct amdgpu_device *adev, bool enable) +{ + return amdgpu_kv_notify_message_to_smu(adev, enable ? + PPSMC_MSG_SAMUDPM_Enable : PPSMC_MSG_SAMUDPM_Disable); +} + +static int kv_enable_acp_dpm(struct amdgpu_device *adev, bool enable) +{ + return amdgpu_kv_notify_message_to_smu(adev, enable ? + PPSMC_MSG_ACPDPM_Enable : PPSMC_MSG_ACPDPM_Disable); +} + +static int kv_update_uvd_dpm(struct amdgpu_device *adev, bool gate) +{ + struct kv_power_info *pi = kv_get_pi(adev); + struct amdgpu_uvd_clock_voltage_dependency_table *table = + &adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table; + int ret; + u32 mask; + + if (!gate) { + if (table->count) + pi->uvd_boot_level = table->count - 1; + else + pi->uvd_boot_level = 0; + + if (!pi->caps_uvd_dpm || pi->caps_stable_p_state) { + mask = 1 << pi->uvd_boot_level; + } else { + mask = 0x1f; + } + + ret = amdgpu_kv_copy_bytes_to_smc(adev, + pi->dpm_table_start + + offsetof(SMU7_Fusion_DpmTable, UvdBootLevel), + (uint8_t *)&pi->uvd_boot_level, + sizeof(u8), pi->sram_end); + if (ret) + return ret; + + amdgpu_kv_send_msg_to_smc_with_parameter(adev, + PPSMC_MSG_UVDDPM_SetEnabledMask, + mask); + } + + return kv_enable_uvd_dpm(adev, !gate); +} + +static u8 kv_get_vce_boot_level(struct amdgpu_device *adev, u32 evclk) +{ + u8 i; + struct amdgpu_vce_clock_voltage_dependency_table *table = + &adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table; + + for (i = 0; i < table->count; i++) { + if (table->entries[i].evclk >= evclk) + break; + } + + return i; +} + +static int kv_update_vce_dpm(struct amdgpu_device *adev, + struct amdgpu_ps *amdgpu_new_state, + struct amdgpu_ps *amdgpu_current_state) +{ + struct kv_power_info *pi = kv_get_pi(adev); + struct amdgpu_vce_clock_voltage_dependency_table *table = + &adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table; + int ret; + + if (amdgpu_new_state->evclk > 0 && amdgpu_current_state->evclk == 0) { + kv_dpm_powergate_vce(adev, false); + /* turn the clocks on when encoding */ + ret = amdgpu_set_clockgating_state(adev, AMDGPU_IP_BLOCK_TYPE_VCE, + AMDGPU_CG_STATE_UNGATE); + if (ret) + return ret; + if (pi->caps_stable_p_state) + pi->vce_boot_level = table->count - 1; + else + pi->vce_boot_level = kv_get_vce_boot_level(adev, amdgpu_new_state->evclk); + + ret = amdgpu_kv_copy_bytes_to_smc(adev, + pi->dpm_table_start + + offsetof(SMU7_Fusion_DpmTable, VceBootLevel), + (u8 *)&pi->vce_boot_level, + sizeof(u8), + pi->sram_end); + if (ret) + return ret; + + if (pi->caps_stable_p_state) + amdgpu_kv_send_msg_to_smc_with_parameter(adev, + PPSMC_MSG_VCEDPM_SetEnabledMask, + (1 << pi->vce_boot_level)); + + kv_enable_vce_dpm(adev, true); + } else if (amdgpu_new_state->evclk == 0 && amdgpu_current_state->evclk > 0) { + kv_enable_vce_dpm(adev, false); + /* turn the clocks off when not encoding */ + ret = amdgpu_set_clockgating_state(adev, AMDGPU_IP_BLOCK_TYPE_VCE, + AMDGPU_CG_STATE_GATE); + if (ret) + return ret; + kv_dpm_powergate_vce(adev, true); + } + + return 0; +} + +static int kv_update_samu_dpm(struct amdgpu_device *adev, bool gate) +{ + struct kv_power_info *pi = kv_get_pi(adev); + struct amdgpu_clock_voltage_dependency_table *table = + &adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table; + int ret; + + if (!gate) { + if (pi->caps_stable_p_state) + pi->samu_boot_level = table->count - 1; + else + pi->samu_boot_level = 0; + + ret = amdgpu_kv_copy_bytes_to_smc(adev, + pi->dpm_table_start + + offsetof(SMU7_Fusion_DpmTable, SamuBootLevel), + (u8 *)&pi->samu_boot_level, + sizeof(u8), + pi->sram_end); + if (ret) + return ret; + + if (pi->caps_stable_p_state) + amdgpu_kv_send_msg_to_smc_with_parameter(adev, + PPSMC_MSG_SAMUDPM_SetEnabledMask, + (1 << pi->samu_boot_level)); + } + + return kv_enable_samu_dpm(adev, !gate); +} + +static u8 kv_get_acp_boot_level(struct amdgpu_device *adev) +{ + u8 i; + struct amdgpu_clock_voltage_dependency_table *table = + &adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table; + + for (i = 0; i < table->count; i++) { + if (table->entries[i].clk >= 0) /* XXX */ + break; + } + + if (i >= table->count) + i = table->count - 1; + + return i; +} + +static void kv_update_acp_boot_level(struct amdgpu_device *adev) +{ + struct kv_power_info *pi = kv_get_pi(adev); + u8 acp_boot_level; + + if (!pi->caps_stable_p_state) { + acp_boot_level = kv_get_acp_boot_level(adev); + if (acp_boot_level != pi->acp_boot_level) { + pi->acp_boot_level = acp_boot_level; + amdgpu_kv_send_msg_to_smc_with_parameter(adev, + PPSMC_MSG_ACPDPM_SetEnabledMask, + (1 << pi->acp_boot_level)); + } + } +} + +static int kv_update_acp_dpm(struct amdgpu_device *adev, bool gate) +{ + struct kv_power_info *pi = kv_get_pi(adev); + struct amdgpu_clock_voltage_dependency_table *table = + &adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table; + int ret; + + if (!gate) { + if (pi->caps_stable_p_state) + pi->acp_boot_level = table->count - 1; + else + pi->acp_boot_level = kv_get_acp_boot_level(adev); + + ret = amdgpu_kv_copy_bytes_to_smc(adev, + pi->dpm_table_start + + offsetof(SMU7_Fusion_DpmTable, AcpBootLevel), + (u8 *)&pi->acp_boot_level, + sizeof(u8), + pi->sram_end); + if (ret) + return ret; + + if (pi->caps_stable_p_state) + amdgpu_kv_send_msg_to_smc_with_parameter(adev, + PPSMC_MSG_ACPDPM_SetEnabledMask, + (1 << pi->acp_boot_level)); + } + + return kv_enable_acp_dpm(adev, !gate); +} + +static void kv_dpm_powergate_uvd(struct amdgpu_device *adev, bool gate) +{ + struct kv_power_info *pi = kv_get_pi(adev); + int ret; + + if (pi->uvd_power_gated == gate) + return; + + pi->uvd_power_gated = gate; + + if (gate) { + if (pi->caps_uvd_pg) { + /* disable clockgating so we can properly shut down the block */ + ret = amdgpu_set_clockgating_state(adev, AMDGPU_IP_BLOCK_TYPE_UVD, + AMDGPU_CG_STATE_UNGATE); + /* shutdown the UVD block */ + ret = amdgpu_set_powergating_state(adev, AMDGPU_IP_BLOCK_TYPE_UVD, + AMDGPU_PG_STATE_GATE); + /* XXX: check for errors */ + } + kv_update_uvd_dpm(adev, gate); + if (pi->caps_uvd_pg) + /* power off the UVD block */ + amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_UVDPowerOFF); + } else { + if (pi->caps_uvd_pg) { + /* power on the UVD block */ + amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_UVDPowerON); + /* re-init the UVD block */ + ret = amdgpu_set_powergating_state(adev, AMDGPU_IP_BLOCK_TYPE_UVD, + AMDGPU_PG_STATE_UNGATE); + /* enable clockgating. hw will dynamically gate/ungate clocks on the fly */ + ret = amdgpu_set_clockgating_state(adev, AMDGPU_IP_BLOCK_TYPE_UVD, + AMDGPU_CG_STATE_GATE); + /* XXX: check for errors */ + } + kv_update_uvd_dpm(adev, gate); + } +} + +static void kv_dpm_powergate_vce(struct amdgpu_device *adev, bool gate) +{ + struct kv_power_info *pi = kv_get_pi(adev); + int ret; + + if (pi->vce_power_gated == gate) + return; + + pi->vce_power_gated = gate; + + if (gate) { + if (pi->caps_vce_pg) { + /* shutdown the VCE block */ + ret = amdgpu_set_powergating_state(adev, AMDGPU_IP_BLOCK_TYPE_VCE, + AMDGPU_PG_STATE_GATE); + /* XXX: check for errors */ + /* power off the VCE block */ + amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_VCEPowerOFF); + } + } else { + if (pi->caps_vce_pg) { + /* power on the VCE block */ + amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_VCEPowerON); + /* re-init the VCE block */ + ret = amdgpu_set_powergating_state(adev, AMDGPU_IP_BLOCK_TYPE_VCE, + AMDGPU_PG_STATE_UNGATE); + /* XXX: check for errors */ + } + } +} + +static void kv_dpm_powergate_samu(struct amdgpu_device *adev, bool gate) +{ + struct kv_power_info *pi = kv_get_pi(adev); + + if (pi->samu_power_gated == gate) + return; + + pi->samu_power_gated = gate; + + if (gate) { + kv_update_samu_dpm(adev, true); + if (pi->caps_samu_pg) + amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_SAMPowerOFF); + } else { + if (pi->caps_samu_pg) + amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_SAMPowerON); + kv_update_samu_dpm(adev, false); + } +} + +static void kv_dpm_powergate_acp(struct amdgpu_device *adev, bool gate) +{ + struct kv_power_info *pi = kv_get_pi(adev); + + if (pi->acp_power_gated == gate) + return; + + if (adev->asic_type == CHIP_KABINI || adev->asic_type == CHIP_MULLINS) + return; + + pi->acp_power_gated = gate; + + if (gate) { + kv_update_acp_dpm(adev, true); + if (pi->caps_acp_pg) + amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_ACPPowerOFF); + } else { + if (pi->caps_acp_pg) + amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_ACPPowerON); + kv_update_acp_dpm(adev, false); + } +} + +static void kv_set_valid_clock_range(struct amdgpu_device *adev, + struct amdgpu_ps *new_rps) +{ + struct kv_ps *new_ps = kv_get_ps(new_rps); + struct kv_power_info *pi = kv_get_pi(adev); + u32 i; + struct amdgpu_clock_voltage_dependency_table *table = + &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk; + + if (table && table->count) { + for (i = 0; i < pi->graphics_dpm_level_count; i++) { + if ((table->entries[i].clk >= new_ps->levels[0].sclk) || + (i == (pi->graphics_dpm_level_count - 1))) { + pi->lowest_valid = i; + break; + } + } + + for (i = pi->graphics_dpm_level_count - 1; i > 0; i--) { + if (table->entries[i].clk <= new_ps->levels[new_ps->num_levels - 1].sclk) + break; + } + pi->highest_valid = i; + + if (pi->lowest_valid > pi->highest_valid) { + if ((new_ps->levels[0].sclk - table->entries[pi->highest_valid].clk) > + (table->entries[pi->lowest_valid].clk - new_ps->levels[new_ps->num_levels - 1].sclk)) + pi->highest_valid = pi->lowest_valid; + else + pi->lowest_valid = pi->highest_valid; + } + } else { + struct sumo_sclk_voltage_mapping_table *table = + &pi->sys_info.sclk_voltage_mapping_table; + + for (i = 0; i < (int)pi->graphics_dpm_level_count; i++) { + if (table->entries[i].sclk_frequency >= new_ps->levels[0].sclk || + i == (int)(pi->graphics_dpm_level_count - 1)) { + pi->lowest_valid = i; + break; + } + } + + for (i = pi->graphics_dpm_level_count - 1; i > 0; i--) { + if (table->entries[i].sclk_frequency <= + new_ps->levels[new_ps->num_levels - 1].sclk) + break; + } + pi->highest_valid = i; + + if (pi->lowest_valid > pi->highest_valid) { + if ((new_ps->levels[0].sclk - + table->entries[pi->highest_valid].sclk_frequency) > + (table->entries[pi->lowest_valid].sclk_frequency - + new_ps->levels[new_ps->num_levels -1].sclk)) + pi->highest_valid = pi->lowest_valid; + else + pi->lowest_valid = pi->highest_valid; + } + } +} + +static int kv_update_dfs_bypass_settings(struct amdgpu_device *adev, + struct amdgpu_ps *new_rps) +{ + struct kv_ps *new_ps = kv_get_ps(new_rps); + struct kv_power_info *pi = kv_get_pi(adev); + int ret = 0; + u8 clk_bypass_cntl; + + if (pi->caps_enable_dfs_bypass) { + clk_bypass_cntl = new_ps->need_dfs_bypass ? + pi->graphics_level[pi->graphics_boot_level].ClkBypassCntl : 0; + ret = amdgpu_kv_copy_bytes_to_smc(adev, + (pi->dpm_table_start + + offsetof(SMU7_Fusion_DpmTable, GraphicsLevel) + + (pi->graphics_boot_level * sizeof(SMU7_Fusion_GraphicsLevel)) + + offsetof(SMU7_Fusion_GraphicsLevel, ClkBypassCntl)), + &clk_bypass_cntl, + sizeof(u8), pi->sram_end); + } + + return ret; +} + +static int kv_enable_nb_dpm(struct amdgpu_device *adev, + bool enable) +{ + struct kv_power_info *pi = kv_get_pi(adev); + int ret = 0; + + if (enable) { + if (pi->enable_nb_dpm && !pi->nb_dpm_enabled) { + ret = amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_NBDPM_Enable); + if (ret == 0) + pi->nb_dpm_enabled = true; + } + } else { + if (pi->enable_nb_dpm && pi->nb_dpm_enabled) { + ret = amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_NBDPM_Disable); + if (ret == 0) + pi->nb_dpm_enabled = false; + } + } + + return ret; +} + +static int kv_dpm_force_performance_level(struct amdgpu_device *adev, + enum amdgpu_dpm_forced_level level) +{ + int ret; + + if (level == AMDGPU_DPM_FORCED_LEVEL_HIGH) { + ret = kv_force_dpm_highest(adev); + if (ret) + return ret; + } else if (level == AMDGPU_DPM_FORCED_LEVEL_LOW) { + ret = kv_force_dpm_lowest(adev); + if (ret) + return ret; + } else if (level == AMDGPU_DPM_FORCED_LEVEL_AUTO) { + ret = kv_unforce_levels(adev); + if (ret) + return ret; + } + + adev->pm.dpm.forced_level = level; + + return 0; +} + +static int kv_dpm_pre_set_power_state(struct amdgpu_device *adev) +{ + struct kv_power_info *pi = kv_get_pi(adev); + struct amdgpu_ps requested_ps = *adev->pm.dpm.requested_ps; + struct amdgpu_ps *new_ps = &requested_ps; + + kv_update_requested_ps(adev, new_ps); + + kv_apply_state_adjust_rules(adev, + &pi->requested_rps, + &pi->current_rps); + + return 0; +} + +static int kv_dpm_set_power_state(struct amdgpu_device *adev) +{ + struct kv_power_info *pi = kv_get_pi(adev); + struct amdgpu_ps *new_ps = &pi->requested_rps; + struct amdgpu_ps *old_ps = &pi->current_rps; + int ret; + + if (pi->bapm_enable) { + ret = amdgpu_kv_smc_bapm_enable(adev, adev->pm.dpm.ac_power); + if (ret) { + DRM_ERROR("amdgpu_kv_smc_bapm_enable failed\n"); + return ret; + } + } + + if (adev->asic_type == CHIP_KABINI || adev->asic_type == CHIP_MULLINS) { + if (pi->enable_dpm) { + kv_set_valid_clock_range(adev, new_ps); + kv_update_dfs_bypass_settings(adev, new_ps); + ret = kv_calculate_ds_divider(adev); + if (ret) { + DRM_ERROR("kv_calculate_ds_divider failed\n"); + return ret; + } + kv_calculate_nbps_level_settings(adev); + kv_calculate_dpm_settings(adev); + kv_force_lowest_valid(adev); + kv_enable_new_levels(adev); + kv_upload_dpm_settings(adev); + kv_program_nbps_index_settings(adev, new_ps); + kv_unforce_levels(adev); + kv_set_enabled_levels(adev); + kv_force_lowest_valid(adev); + kv_unforce_levels(adev); + + ret = kv_update_vce_dpm(adev, new_ps, old_ps); + if (ret) { + DRM_ERROR("kv_update_vce_dpm failed\n"); + return ret; + } + kv_update_sclk_t(adev); + if (adev->asic_type == CHIP_MULLINS) + kv_enable_nb_dpm(adev, true); + } + } else { + if (pi->enable_dpm) { + kv_set_valid_clock_range(adev, new_ps); + kv_update_dfs_bypass_settings(adev, new_ps); + ret = kv_calculate_ds_divider(adev); + if (ret) { + DRM_ERROR("kv_calculate_ds_divider failed\n"); + return ret; + } + kv_calculate_nbps_level_settings(adev); + kv_calculate_dpm_settings(adev); + kv_freeze_sclk_dpm(adev, true); + kv_upload_dpm_settings(adev); + kv_program_nbps_index_settings(adev, new_ps); + kv_freeze_sclk_dpm(adev, false); + kv_set_enabled_levels(adev); + ret = kv_update_vce_dpm(adev, new_ps, old_ps); + if (ret) { + DRM_ERROR("kv_update_vce_dpm failed\n"); + return ret; + } + kv_update_acp_boot_level(adev); + kv_update_sclk_t(adev); + kv_enable_nb_dpm(adev, true); + } + } + + return 0; +} + +static void kv_dpm_post_set_power_state(struct amdgpu_device *adev) +{ + struct kv_power_info *pi = kv_get_pi(adev); + struct amdgpu_ps *new_ps = &pi->requested_rps; + + kv_update_current_ps(adev, new_ps); +} + +static void kv_dpm_setup_asic(struct amdgpu_device *adev) +{ + sumo_take_smu_control(adev, true); + kv_init_powergate_state(adev); + kv_init_sclk_t(adev); +} + +#if 0 +static void kv_dpm_reset_asic(struct amdgpu_device *adev) +{ + struct kv_power_info *pi = kv_get_pi(adev); + + if (adev->asic_type == CHIP_KABINI || adev->asic_type == CHIP_MULLINS) { + kv_force_lowest_valid(adev); + kv_init_graphics_levels(adev); + kv_program_bootup_state(adev); + kv_upload_dpm_settings(adev); + kv_force_lowest_valid(adev); + kv_unforce_levels(adev); + } else { + kv_init_graphics_levels(adev); + kv_program_bootup_state(adev); + kv_freeze_sclk_dpm(adev, true); + kv_upload_dpm_settings(adev); + kv_freeze_sclk_dpm(adev, false); + kv_set_enabled_level(adev, pi->graphics_boot_level); + } +} +#endif + +static void kv_construct_max_power_limits_table(struct amdgpu_device *adev, + struct amdgpu_clock_and_voltage_limits *table) +{ + struct kv_power_info *pi = kv_get_pi(adev); + + if (pi->sys_info.sclk_voltage_mapping_table.num_max_dpm_entries > 0) { + int idx = pi->sys_info.sclk_voltage_mapping_table.num_max_dpm_entries - 1; + table->sclk = + pi->sys_info.sclk_voltage_mapping_table.entries[idx].sclk_frequency; + table->vddc = + kv_convert_2bit_index_to_voltage(adev, + pi->sys_info.sclk_voltage_mapping_table.entries[idx].vid_2bit); + } + + table->mclk = pi->sys_info.nbp_memory_clock[0]; +} + +static void kv_patch_voltage_values(struct amdgpu_device *adev) +{ + int i; + struct amdgpu_uvd_clock_voltage_dependency_table *uvd_table = + &adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table; + struct amdgpu_vce_clock_voltage_dependency_table *vce_table = + &adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table; + struct amdgpu_clock_voltage_dependency_table *samu_table = + &adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table; + struct amdgpu_clock_voltage_dependency_table *acp_table = + &adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table; + + if (uvd_table->count) { + for (i = 0; i < uvd_table->count; i++) + uvd_table->entries[i].v = + kv_convert_8bit_index_to_voltage(adev, + uvd_table->entries[i].v); + } + + if (vce_table->count) { + for (i = 0; i < vce_table->count; i++) + vce_table->entries[i].v = + kv_convert_8bit_index_to_voltage(adev, + vce_table->entries[i].v); + } + + if (samu_table->count) { + for (i = 0; i < samu_table->count; i++) + samu_table->entries[i].v = + kv_convert_8bit_index_to_voltage(adev, + samu_table->entries[i].v); + } + + if (acp_table->count) { + for (i = 0; i < acp_table->count; i++) + acp_table->entries[i].v = + kv_convert_8bit_index_to_voltage(adev, + acp_table->entries[i].v); + } + +} + +static void kv_construct_boot_state(struct amdgpu_device *adev) +{ + struct kv_power_info *pi = kv_get_pi(adev); + + pi->boot_pl.sclk = pi->sys_info.bootup_sclk; + pi->boot_pl.vddc_index = pi->sys_info.bootup_nb_voltage_index; + pi->boot_pl.ds_divider_index = 0; + pi->boot_pl.ss_divider_index = 0; + pi->boot_pl.allow_gnb_slow = 1; + pi->boot_pl.force_nbp_state = 0; + pi->boot_pl.display_wm = 0; + pi->boot_pl.vce_wm = 0; +} + +static int kv_force_dpm_highest(struct amdgpu_device *adev) +{ + int ret; + u32 enable_mask, i; + + ret = amdgpu_kv_dpm_get_enable_mask(adev, &enable_mask); + if (ret) + return ret; + + for (i = SMU7_MAX_LEVELS_GRAPHICS - 1; i > 0; i--) { + if (enable_mask & (1 << i)) + break; + } + + if (adev->asic_type == CHIP_KABINI || adev->asic_type == CHIP_MULLINS) + return amdgpu_kv_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_DPM_ForceState, i); + else + return kv_set_enabled_level(adev, i); +} + +static int kv_force_dpm_lowest(struct amdgpu_device *adev) +{ + int ret; + u32 enable_mask, i; + + ret = amdgpu_kv_dpm_get_enable_mask(adev, &enable_mask); + if (ret) + return ret; + + for (i = 0; i < SMU7_MAX_LEVELS_GRAPHICS; i++) { + if (enable_mask & (1 << i)) + break; + } + + if (adev->asic_type == CHIP_KABINI || adev->asic_type == CHIP_MULLINS) + return amdgpu_kv_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_DPM_ForceState, i); + else + return kv_set_enabled_level(adev, i); +} + +static u8 kv_get_sleep_divider_id_from_clock(struct amdgpu_device *adev, + u32 sclk, u32 min_sclk_in_sr) +{ + struct kv_power_info *pi = kv_get_pi(adev); + u32 i; + u32 temp; + u32 min = (min_sclk_in_sr > KV_MINIMUM_ENGINE_CLOCK) ? + min_sclk_in_sr : KV_MINIMUM_ENGINE_CLOCK; + + if (sclk < min) + return 0; + + if (!pi->caps_sclk_ds) + return 0; + + for (i = KV_MAX_DEEPSLEEP_DIVIDER_ID; i > 0; i--) { + temp = sclk / sumo_get_sleep_divider_from_id(i); + if (temp >= min) + break; + } + + return (u8)i; +} + +static int kv_get_high_voltage_limit(struct amdgpu_device *adev, int *limit) +{ + struct kv_power_info *pi = kv_get_pi(adev); + struct amdgpu_clock_voltage_dependency_table *table = + &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk; + int i; + + if (table && table->count) { + for (i = table->count - 1; i >= 0; i--) { + if (pi->high_voltage_t && + (kv_convert_8bit_index_to_voltage(adev, table->entries[i].v) <= + pi->high_voltage_t)) { + *limit = i; + return 0; + } + } + } else { + struct sumo_sclk_voltage_mapping_table *table = + &pi->sys_info.sclk_voltage_mapping_table; + + for (i = table->num_max_dpm_entries - 1; i >= 0; i--) { + if (pi->high_voltage_t && + (kv_convert_2bit_index_to_voltage(adev, table->entries[i].vid_2bit) <= + pi->high_voltage_t)) { + *limit = i; + return 0; + } + } + } + + *limit = 0; + return 0; +} + +static void kv_apply_state_adjust_rules(struct amdgpu_device *adev, + struct amdgpu_ps *new_rps, + struct amdgpu_ps *old_rps) +{ + struct kv_ps *ps = kv_get_ps(new_rps); + struct kv_power_info *pi = kv_get_pi(adev); + u32 min_sclk = 10000; /* ??? */ + u32 sclk, mclk = 0; + int i, limit; + bool force_high; + struct amdgpu_clock_voltage_dependency_table *table = + &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk; + u32 stable_p_state_sclk = 0; + struct amdgpu_clock_and_voltage_limits *max_limits = + &adev->pm.dpm.dyn_state.max_clock_voltage_on_ac; + + if (new_rps->vce_active) { + new_rps->evclk = adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].evclk; + new_rps->ecclk = adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].ecclk; + } else { + new_rps->evclk = 0; + new_rps->ecclk = 0; + } + + mclk = max_limits->mclk; + sclk = min_sclk; + + if (pi->caps_stable_p_state) { + stable_p_state_sclk = (max_limits->sclk * 75) / 100; + + for (i = table->count - 1; i >= 0; i++) { + if (stable_p_state_sclk >= table->entries[i].clk) { + stable_p_state_sclk = table->entries[i].clk; + break; + } + } + + if (i > 0) + stable_p_state_sclk = table->entries[0].clk; + + sclk = stable_p_state_sclk; + } + + if (new_rps->vce_active) { + if (sclk < adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].sclk) + sclk = adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].sclk; + } + + ps->need_dfs_bypass = true; + + for (i = 0; i < ps->num_levels; i++) { + if (ps->levels[i].sclk < sclk) + ps->levels[i].sclk = sclk; + } + + if (table && table->count) { + for (i = 0; i < ps->num_levels; i++) { + if (pi->high_voltage_t && + (pi->high_voltage_t < + kv_convert_8bit_index_to_voltage(adev, ps->levels[i].vddc_index))) { + kv_get_high_voltage_limit(adev, &limit); + ps->levels[i].sclk = table->entries[limit].clk; + } + } + } else { + struct sumo_sclk_voltage_mapping_table *table = + &pi->sys_info.sclk_voltage_mapping_table; + + for (i = 0; i < ps->num_levels; i++) { + if (pi->high_voltage_t && + (pi->high_voltage_t < + kv_convert_8bit_index_to_voltage(adev, ps->levels[i].vddc_index))) { + kv_get_high_voltage_limit(adev, &limit); + ps->levels[i].sclk = table->entries[limit].sclk_frequency; + } + } + } + + if (pi->caps_stable_p_state) { + for (i = 0; i < ps->num_levels; i++) { + ps->levels[i].sclk = stable_p_state_sclk; + } + } + + pi->video_start = new_rps->dclk || new_rps->vclk || + new_rps->evclk || new_rps->ecclk; + + if ((new_rps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) == + ATOM_PPLIB_CLASSIFICATION_UI_BATTERY) + pi->battery_state = true; + else + pi->battery_state = false; + + if (adev->asic_type == CHIP_KABINI || adev->asic_type == CHIP_MULLINS) { + ps->dpm0_pg_nb_ps_lo = 0x1; + ps->dpm0_pg_nb_ps_hi = 0x0; + ps->dpmx_nb_ps_lo = 0x1; + ps->dpmx_nb_ps_hi = 0x0; + } else { + ps->dpm0_pg_nb_ps_lo = 0x3; + ps->dpm0_pg_nb_ps_hi = 0x0; + ps->dpmx_nb_ps_lo = 0x3; + ps->dpmx_nb_ps_hi = 0x0; + + if (pi->sys_info.nb_dpm_enable) { + force_high = (mclk >= pi->sys_info.nbp_memory_clock[3]) || + pi->video_start || (adev->pm.dpm.new_active_crtc_count >= 3) || + pi->disable_nb_ps3_in_battery; + ps->dpm0_pg_nb_ps_lo = force_high ? 0x2 : 0x3; + ps->dpm0_pg_nb_ps_hi = 0x2; + ps->dpmx_nb_ps_lo = force_high ? 0x2 : 0x3; + ps->dpmx_nb_ps_hi = 0x2; + } + } +} + +static void kv_dpm_power_level_enabled_for_throttle(struct amdgpu_device *adev, + u32 index, bool enable) +{ + struct kv_power_info *pi = kv_get_pi(adev); + + pi->graphics_level[index].EnabledForThrottle = enable ? 1 : 0; +} + +static int kv_calculate_ds_divider(struct amdgpu_device *adev) +{ + struct kv_power_info *pi = kv_get_pi(adev); + u32 sclk_in_sr = 10000; /* ??? */ + u32 i; + + if (pi->lowest_valid > pi->highest_valid) + return -EINVAL; + + for (i = pi->lowest_valid; i <= pi->highest_valid; i++) { + pi->graphics_level[i].DeepSleepDivId = + kv_get_sleep_divider_id_from_clock(adev, + be32_to_cpu(pi->graphics_level[i].SclkFrequency), + sclk_in_sr); + } + return 0; +} + +static int kv_calculate_nbps_level_settings(struct amdgpu_device *adev) +{ + struct kv_power_info *pi = kv_get_pi(adev); + u32 i; + bool force_high; + struct amdgpu_clock_and_voltage_limits *max_limits = + &adev->pm.dpm.dyn_state.max_clock_voltage_on_ac; + u32 mclk = max_limits->mclk; + + if (pi->lowest_valid > pi->highest_valid) + return -EINVAL; + + if (adev->asic_type == CHIP_KABINI || adev->asic_type == CHIP_MULLINS) { + for (i = pi->lowest_valid; i <= pi->highest_valid; i++) { + pi->graphics_level[i].GnbSlow = 1; + pi->graphics_level[i].ForceNbPs1 = 0; + pi->graphics_level[i].UpH = 0; + } + + if (!pi->sys_info.nb_dpm_enable) + return 0; + + force_high = ((mclk >= pi->sys_info.nbp_memory_clock[3]) || + (adev->pm.dpm.new_active_crtc_count >= 3) || pi->video_start); + + if (force_high) { + for (i = pi->lowest_valid; i <= pi->highest_valid; i++) + pi->graphics_level[i].GnbSlow = 0; + } else { + if (pi->battery_state) + pi->graphics_level[0].ForceNbPs1 = 1; + + pi->graphics_level[1].GnbSlow = 0; + pi->graphics_level[2].GnbSlow = 0; + pi->graphics_level[3].GnbSlow = 0; + pi->graphics_level[4].GnbSlow = 0; + } + } else { + for (i = pi->lowest_valid; i <= pi->highest_valid; i++) { + pi->graphics_level[i].GnbSlow = 1; + pi->graphics_level[i].ForceNbPs1 = 0; + pi->graphics_level[i].UpH = 0; + } + + if (pi->sys_info.nb_dpm_enable && pi->battery_state) { + pi->graphics_level[pi->lowest_valid].UpH = 0x28; + pi->graphics_level[pi->lowest_valid].GnbSlow = 0; + if (pi->lowest_valid != pi->highest_valid) + pi->graphics_level[pi->lowest_valid].ForceNbPs1 = 1; + } + } + return 0; +} + +static int kv_calculate_dpm_settings(struct amdgpu_device *adev) +{ + struct kv_power_info *pi = kv_get_pi(adev); + u32 i; + + if (pi->lowest_valid > pi->highest_valid) + return -EINVAL; + + for (i = pi->lowest_valid; i <= pi->highest_valid; i++) + pi->graphics_level[i].DisplayWatermark = (i == pi->highest_valid) ? 1 : 0; + + return 0; +} + +static void kv_init_graphics_levels(struct amdgpu_device *adev) +{ + struct kv_power_info *pi = kv_get_pi(adev); + u32 i; + struct amdgpu_clock_voltage_dependency_table *table = + &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk; + + if (table && table->count) { + u32 vid_2bit; + + pi->graphics_dpm_level_count = 0; + for (i = 0; i < table->count; i++) { + if (pi->high_voltage_t && + (pi->high_voltage_t < + kv_convert_8bit_index_to_voltage(adev, table->entries[i].v))) + break; + + kv_set_divider_value(adev, i, table->entries[i].clk); + vid_2bit = kv_convert_vid7_to_vid2(adev, + &pi->sys_info.vid_mapping_table, + table->entries[i].v); + kv_set_vid(adev, i, vid_2bit); + kv_set_at(adev, i, pi->at[i]); + kv_dpm_power_level_enabled_for_throttle(adev, i, true); + pi->graphics_dpm_level_count++; + } + } else { + struct sumo_sclk_voltage_mapping_table *table = + &pi->sys_info.sclk_voltage_mapping_table; + + pi->graphics_dpm_level_count = 0; + for (i = 0; i < table->num_max_dpm_entries; i++) { + if (pi->high_voltage_t && + pi->high_voltage_t < + kv_convert_2bit_index_to_voltage(adev, table->entries[i].vid_2bit)) + break; + + kv_set_divider_value(adev, i, table->entries[i].sclk_frequency); + kv_set_vid(adev, i, table->entries[i].vid_2bit); + kv_set_at(adev, i, pi->at[i]); + kv_dpm_power_level_enabled_for_throttle(adev, i, true); + pi->graphics_dpm_level_count++; + } + } + + for (i = 0; i < SMU7_MAX_LEVELS_GRAPHICS; i++) + kv_dpm_power_level_enable(adev, i, false); +} + +static void kv_enable_new_levels(struct amdgpu_device *adev) +{ + struct kv_power_info *pi = kv_get_pi(adev); + u32 i; + + for (i = 0; i < SMU7_MAX_LEVELS_GRAPHICS; i++) { + if (i >= pi->lowest_valid && i <= pi->highest_valid) + kv_dpm_power_level_enable(adev, i, true); + } +} + +static int kv_set_enabled_level(struct amdgpu_device *adev, u32 level) +{ + u32 new_mask = (1 << level); + + return amdgpu_kv_send_msg_to_smc_with_parameter(adev, + PPSMC_MSG_SCLKDPM_SetEnabledMask, + new_mask); +} + +static int kv_set_enabled_levels(struct amdgpu_device *adev) +{ + struct kv_power_info *pi = kv_get_pi(adev); + u32 i, new_mask = 0; + + for (i = pi->lowest_valid; i <= pi->highest_valid; i++) + new_mask |= (1 << i); + + return amdgpu_kv_send_msg_to_smc_with_parameter(adev, + PPSMC_MSG_SCLKDPM_SetEnabledMask, + new_mask); +} + +static void kv_program_nbps_index_settings(struct amdgpu_device *adev, + struct amdgpu_ps *new_rps) +{ + struct kv_ps *new_ps = kv_get_ps(new_rps); + struct kv_power_info *pi = kv_get_pi(adev); + u32 nbdpmconfig1; + + if (adev->asic_type == CHIP_KABINI || adev->asic_type == CHIP_MULLINS) + return; + + if (pi->sys_info.nb_dpm_enable) { + nbdpmconfig1 = RREG32_SMC(ixNB_DPM_CONFIG_1); + nbdpmconfig1 &= ~(NB_DPM_CONFIG_1__Dpm0PgNbPsLo_MASK | + NB_DPM_CONFIG_1__Dpm0PgNbPsHi_MASK | + NB_DPM_CONFIG_1__DpmXNbPsLo_MASK | + NB_DPM_CONFIG_1__DpmXNbPsHi_MASK); + nbdpmconfig1 |= (new_ps->dpm0_pg_nb_ps_lo << NB_DPM_CONFIG_1__Dpm0PgNbPsLo__SHIFT) | + (new_ps->dpm0_pg_nb_ps_hi << NB_DPM_CONFIG_1__Dpm0PgNbPsHi__SHIFT) | + (new_ps->dpmx_nb_ps_lo << NB_DPM_CONFIG_1__DpmXNbPsLo__SHIFT) | + (new_ps->dpmx_nb_ps_hi << NB_DPM_CONFIG_1__DpmXNbPsHi__SHIFT); + WREG32_SMC(ixNB_DPM_CONFIG_1, nbdpmconfig1); + } +} + +static int kv_set_thermal_temperature_range(struct amdgpu_device *adev, + int min_temp, int max_temp) +{ + int low_temp = 0 * 1000; + int high_temp = 255 * 1000; + u32 tmp; + + if (low_temp < min_temp) + low_temp = min_temp; + if (high_temp > max_temp) + high_temp = max_temp; + if (high_temp < low_temp) { + DRM_ERROR("invalid thermal range: %d - %d\n", low_temp, high_temp); + return -EINVAL; + } + + tmp = RREG32_SMC(ixCG_THERMAL_INT_CTRL); + tmp &= ~(CG_THERMAL_INT_CTRL__DIG_THERM_INTH_MASK | + CG_THERMAL_INT_CTRL__DIG_THERM_INTL_MASK); + tmp |= ((49 + (high_temp / 1000)) << CG_THERMAL_INT_CTRL__DIG_THERM_INTH__SHIFT) | + ((49 + (low_temp / 1000)) << CG_THERMAL_INT_CTRL__DIG_THERM_INTL__SHIFT); + WREG32_SMC(ixCG_THERMAL_INT_CTRL, tmp); + + adev->pm.dpm.thermal.min_temp = low_temp; + adev->pm.dpm.thermal.max_temp = high_temp; + + return 0; +} + +union igp_info { + struct _ATOM_INTEGRATED_SYSTEM_INFO info; + struct _ATOM_INTEGRATED_SYSTEM_INFO_V2 info_2; + struct _ATOM_INTEGRATED_SYSTEM_INFO_V5 info_5; + struct _ATOM_INTEGRATED_SYSTEM_INFO_V6 info_6; + struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_7 info_7; + struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_8 info_8; +}; + +static int kv_parse_sys_info_table(struct amdgpu_device *adev) +{ + struct kv_power_info *pi = kv_get_pi(adev); + struct amdgpu_mode_info *mode_info = &adev->mode_info; + int index = GetIndexIntoMasterTable(DATA, IntegratedSystemInfo); + union igp_info *igp_info; + u8 frev, crev; + u16 data_offset; + int i; + + if (amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL, + &frev, &crev, &data_offset)) { + igp_info = (union igp_info *)(mode_info->atom_context->bios + + data_offset); + + if (crev != 8) { + DRM_ERROR("Unsupported IGP table: %d %d\n", frev, crev); + return -EINVAL; + } + pi->sys_info.bootup_sclk = le32_to_cpu(igp_info->info_8.ulBootUpEngineClock); + pi->sys_info.bootup_uma_clk = le32_to_cpu(igp_info->info_8.ulBootUpUMAClock); + pi->sys_info.bootup_nb_voltage_index = + le16_to_cpu(igp_info->info_8.usBootUpNBVoltage); + if (igp_info->info_8.ucHtcTmpLmt == 0) + pi->sys_info.htc_tmp_lmt = 203; + else + pi->sys_info.htc_tmp_lmt = igp_info->info_8.ucHtcTmpLmt; + if (igp_info->info_8.ucHtcHystLmt == 0) + pi->sys_info.htc_hyst_lmt = 5; + else + pi->sys_info.htc_hyst_lmt = igp_info->info_8.ucHtcHystLmt; + if (pi->sys_info.htc_tmp_lmt <= pi->sys_info.htc_hyst_lmt) { + DRM_ERROR("The htcTmpLmt should be larger than htcHystLmt.\n"); + } + + if (le32_to_cpu(igp_info->info_8.ulSystemConfig) & (1 << 3)) + pi->sys_info.nb_dpm_enable = true; + else + pi->sys_info.nb_dpm_enable = false; + + for (i = 0; i < KV_NUM_NBPSTATES; i++) { + pi->sys_info.nbp_memory_clock[i] = + le32_to_cpu(igp_info->info_8.ulNbpStateMemclkFreq[i]); + pi->sys_info.nbp_n_clock[i] = + le32_to_cpu(igp_info->info_8.ulNbpStateNClkFreq[i]); + } + if (le32_to_cpu(igp_info->info_8.ulGPUCapInfo) & + SYS_INFO_GPUCAPS__ENABEL_DFS_BYPASS) + pi->caps_enable_dfs_bypass = true; + + sumo_construct_sclk_voltage_mapping_table(adev, + &pi->sys_info.sclk_voltage_mapping_table, + igp_info->info_8.sAvail_SCLK); + + sumo_construct_vid_mapping_table(adev, + &pi->sys_info.vid_mapping_table, + igp_info->info_8.sAvail_SCLK); + + kv_construct_max_power_limits_table(adev, + &adev->pm.dpm.dyn_state.max_clock_voltage_on_ac); + } + return 0; +} + +union power_info { + struct _ATOM_POWERPLAY_INFO info; + struct _ATOM_POWERPLAY_INFO_V2 info_2; + struct _ATOM_POWERPLAY_INFO_V3 info_3; + struct _ATOM_PPLIB_POWERPLAYTABLE pplib; + struct _ATOM_PPLIB_POWERPLAYTABLE2 pplib2; + struct _ATOM_PPLIB_POWERPLAYTABLE3 pplib3; +}; + +union pplib_clock_info { + struct _ATOM_PPLIB_R600_CLOCK_INFO r600; + struct _ATOM_PPLIB_RS780_CLOCK_INFO rs780; + struct _ATOM_PPLIB_EVERGREEN_CLOCK_INFO evergreen; + struct _ATOM_PPLIB_SUMO_CLOCK_INFO sumo; +}; + +union pplib_power_state { + struct _ATOM_PPLIB_STATE v1; + struct _ATOM_PPLIB_STATE_V2 v2; +}; + +static void kv_patch_boot_state(struct amdgpu_device *adev, + struct kv_ps *ps) +{ + struct kv_power_info *pi = kv_get_pi(adev); + + ps->num_levels = 1; + ps->levels[0] = pi->boot_pl; +} + +static void kv_parse_pplib_non_clock_info(struct amdgpu_device *adev, + struct amdgpu_ps *rps, + struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info, + u8 table_rev) +{ + struct kv_ps *ps = kv_get_ps(rps); + + rps->caps = le32_to_cpu(non_clock_info->ulCapsAndSettings); + rps->class = le16_to_cpu(non_clock_info->usClassification); + rps->class2 = le16_to_cpu(non_clock_info->usClassification2); + + if (ATOM_PPLIB_NONCLOCKINFO_VER1 < table_rev) { + rps->vclk = le32_to_cpu(non_clock_info->ulVCLK); + rps->dclk = le32_to_cpu(non_clock_info->ulDCLK); + } else { + rps->vclk = 0; + rps->dclk = 0; + } + + if (rps->class & ATOM_PPLIB_CLASSIFICATION_BOOT) { + adev->pm.dpm.boot_ps = rps; + kv_patch_boot_state(adev, ps); + } + if (rps->class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE) + adev->pm.dpm.uvd_ps = rps; +} + +static void kv_parse_pplib_clock_info(struct amdgpu_device *adev, + struct amdgpu_ps *rps, int index, + union pplib_clock_info *clock_info) +{ + struct kv_power_info *pi = kv_get_pi(adev); + struct kv_ps *ps = kv_get_ps(rps); + struct kv_pl *pl = &ps->levels[index]; + u32 sclk; + + sclk = le16_to_cpu(clock_info->sumo.usEngineClockLow); + sclk |= clock_info->sumo.ucEngineClockHigh << 16; + pl->sclk = sclk; + pl->vddc_index = clock_info->sumo.vddcIndex; + + ps->num_levels = index + 1; + + if (pi->caps_sclk_ds) { + pl->ds_divider_index = 5; + pl->ss_divider_index = 5; + } +} + +static int kv_parse_power_table(struct amdgpu_device *adev) +{ + struct amdgpu_mode_info *mode_info = &adev->mode_info; + struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info; + union pplib_power_state *power_state; + int i, j, k, non_clock_array_index, clock_array_index; + union pplib_clock_info *clock_info; + struct _StateArray *state_array; + struct _ClockInfoArray *clock_info_array; + struct _NonClockInfoArray *non_clock_info_array; + union power_info *power_info; + int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo); + u16 data_offset; + u8 frev, crev; + u8 *power_state_offset; + struct kv_ps *ps; + + if (!amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL, + &frev, &crev, &data_offset)) + return -EINVAL; + power_info = (union power_info *)(mode_info->atom_context->bios + data_offset); + + amdgpu_add_thermal_controller(adev); + + state_array = (struct _StateArray *) + (mode_info->atom_context->bios + data_offset + + le16_to_cpu(power_info->pplib.usStateArrayOffset)); + clock_info_array = (struct _ClockInfoArray *) + (mode_info->atom_context->bios + data_offset + + le16_to_cpu(power_info->pplib.usClockInfoArrayOffset)); + non_clock_info_array = (struct _NonClockInfoArray *) + (mode_info->atom_context->bios + data_offset + + le16_to_cpu(power_info->pplib.usNonClockInfoArrayOffset)); + + adev->pm.dpm.ps = kzalloc(sizeof(struct amdgpu_ps) * + state_array->ucNumEntries, GFP_KERNEL); + if (!adev->pm.dpm.ps) + return -ENOMEM; + power_state_offset = (u8 *)state_array->states; + for (i = 0; i < state_array->ucNumEntries; i++) { + u8 *idx; + power_state = (union pplib_power_state *)power_state_offset; + non_clock_array_index = power_state->v2.nonClockInfoIndex; + non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *) + &non_clock_info_array->nonClockInfo[non_clock_array_index]; + ps = kzalloc(sizeof(struct kv_ps), GFP_KERNEL); + if (ps == NULL) { + kfree(adev->pm.dpm.ps); + return -ENOMEM; + } + adev->pm.dpm.ps[i].ps_priv = ps; + k = 0; + idx = (u8 *)&power_state->v2.clockInfoIndex[0]; + for (j = 0; j < power_state->v2.ucNumDPMLevels; j++) { + clock_array_index = idx[j]; + if (clock_array_index >= clock_info_array->ucNumEntries) + continue; + if (k >= SUMO_MAX_HARDWARE_POWERLEVELS) + break; + clock_info = (union pplib_clock_info *) + ((u8 *)&clock_info_array->clockInfo[0] + + (clock_array_index * clock_info_array->ucEntrySize)); + kv_parse_pplib_clock_info(adev, + &adev->pm.dpm.ps[i], k, + clock_info); + k++; + } + kv_parse_pplib_non_clock_info(adev, &adev->pm.dpm.ps[i], + non_clock_info, + non_clock_info_array->ucEntrySize); + power_state_offset += 2 + power_state->v2.ucNumDPMLevels; + } + adev->pm.dpm.num_ps = state_array->ucNumEntries; + + /* fill in the vce power states */ + for (i = 0; i < AMDGPU_MAX_VCE_LEVELS; i++) { + u32 sclk; + clock_array_index = adev->pm.dpm.vce_states[i].clk_idx; + clock_info = (union pplib_clock_info *) + &clock_info_array->clockInfo[clock_array_index * clock_info_array->ucEntrySize]; + sclk = le16_to_cpu(clock_info->sumo.usEngineClockLow); + sclk |= clock_info->sumo.ucEngineClockHigh << 16; + adev->pm.dpm.vce_states[i].sclk = sclk; + adev->pm.dpm.vce_states[i].mclk = 0; + } + + return 0; +} + +static int kv_dpm_init(struct amdgpu_device *adev) +{ + struct kv_power_info *pi; + int ret, i; + + pi = kzalloc(sizeof(struct kv_power_info), GFP_KERNEL); + if (pi == NULL) + return -ENOMEM; + adev->pm.dpm.priv = pi; + + ret = amdgpu_get_platform_caps(adev); + if (ret) + return ret; + + ret = amdgpu_parse_extended_power_table(adev); + if (ret) + return ret; + + for (i = 0; i < SUMO_MAX_HARDWARE_POWERLEVELS; i++) + pi->at[i] = TRINITY_AT_DFLT; + + pi->sram_end = SMC_RAM_END; + + pi->enable_nb_dpm = true; + + pi->caps_power_containment = true; + pi->caps_cac = true; + pi->enable_didt = false; + if (pi->enable_didt) { + pi->caps_sq_ramping = true; + pi->caps_db_ramping = true; + pi->caps_td_ramping = true; + pi->caps_tcp_ramping = true; + } + + pi->caps_sclk_ds = true; + pi->enable_auto_thermal_throttling = true; + pi->disable_nb_ps3_in_battery = false; + if (amdgpu_bapm == 0) + pi->bapm_enable = false; + else + pi->bapm_enable = true; + pi->voltage_drop_t = 0; + pi->caps_sclk_throttle_low_notification = false; + pi->caps_fps = false; /* true? */ + pi->caps_uvd_pg = (adev->pg_flags & AMDGPU_PG_SUPPORT_UVD) ? true : false; + pi->caps_uvd_dpm = true; + pi->caps_vce_pg = (adev->pg_flags & AMDGPU_PG_SUPPORT_VCE) ? true : false; + pi->caps_samu_pg = (adev->pg_flags & AMDGPU_PG_SUPPORT_SAMU) ? true : false; + pi->caps_acp_pg = (adev->pg_flags & AMDGPU_PG_SUPPORT_ACP) ? true : false; + pi->caps_stable_p_state = false; + + ret = kv_parse_sys_info_table(adev); + if (ret) + return ret; + + kv_patch_voltage_values(adev); + kv_construct_boot_state(adev); + + ret = kv_parse_power_table(adev); + if (ret) + return ret; + + pi->enable_dpm = true; + + return 0; +} + +static void +kv_dpm_debugfs_print_current_performance_level(struct amdgpu_device *adev, + struct seq_file *m) +{ + struct kv_power_info *pi = kv_get_pi(adev); + u32 current_index = + (RREG32_SMC(ixTARGET_AND_CURRENT_PROFILE_INDEX) & + TARGET_AND_CURRENT_PROFILE_INDEX__CURR_SCLK_INDEX_MASK) >> + TARGET_AND_CURRENT_PROFILE_INDEX__CURR_SCLK_INDEX__SHIFT; + u32 sclk, tmp; + u16 vddc; + + if (current_index >= SMU__NUM_SCLK_DPM_STATE) { + seq_printf(m, "invalid dpm profile %d\n", current_index); + } else { + sclk = be32_to_cpu(pi->graphics_level[current_index].SclkFrequency); + tmp = (RREG32_SMC(ixSMU_VOLTAGE_STATUS) & + SMU_VOLTAGE_STATUS__SMU_VOLTAGE_CURRENT_LEVEL_MASK) >> + SMU_VOLTAGE_STATUS__SMU_VOLTAGE_CURRENT_LEVEL__SHIFT; + vddc = kv_convert_8bit_index_to_voltage(adev, (u16)tmp); + seq_printf(m, "uvd %sabled\n", pi->uvd_power_gated ? "dis" : "en"); + seq_printf(m, "vce %sabled\n", pi->vce_power_gated ? "dis" : "en"); + seq_printf(m, "power level %d sclk: %u vddc: %u\n", + current_index, sclk, vddc); + } +} + +static void +kv_dpm_print_power_state(struct amdgpu_device *adev, + struct amdgpu_ps *rps) +{ + int i; + struct kv_ps *ps = kv_get_ps(rps); + + amdgpu_dpm_print_class_info(rps->class, rps->class2); + amdgpu_dpm_print_cap_info(rps->caps); + printk("\tuvd vclk: %d dclk: %d\n", rps->vclk, rps->dclk); + for (i = 0; i < ps->num_levels; i++) { + struct kv_pl *pl = &ps->levels[i]; + printk("\t\tpower level %d sclk: %u vddc: %u\n", + i, pl->sclk, + kv_convert_8bit_index_to_voltage(adev, pl->vddc_index)); + } + amdgpu_dpm_print_ps_status(adev, rps); +} + +static void kv_dpm_fini(struct amdgpu_device *adev) +{ + int i; + + for (i = 0; i < adev->pm.dpm.num_ps; i++) { + kfree(adev->pm.dpm.ps[i].ps_priv); + } + kfree(adev->pm.dpm.ps); + kfree(adev->pm.dpm.priv); + amdgpu_free_extended_power_table(adev); +} + +static void kv_dpm_display_configuration_changed(struct amdgpu_device *adev) +{ + +} + +static u32 kv_dpm_get_sclk(struct amdgpu_device *adev, bool low) +{ + struct kv_power_info *pi = kv_get_pi(adev); + struct kv_ps *requested_state = kv_get_ps(&pi->requested_rps); + + if (low) + return requested_state->levels[0].sclk; + else + return requested_state->levels[requested_state->num_levels - 1].sclk; +} + +static u32 kv_dpm_get_mclk(struct amdgpu_device *adev, bool low) +{ + struct kv_power_info *pi = kv_get_pi(adev); + + return pi->sys_info.bootup_uma_clk; +} + +/* get temperature in millidegrees */ +static int kv_dpm_get_temp(struct amdgpu_device *adev) +{ + u32 temp; + int actual_temp = 0; + + temp = RREG32_SMC(0xC0300E0C); + + if (temp) + actual_temp = (temp / 8) - 49; + else + actual_temp = 0; + + actual_temp = actual_temp * 1000; + + return actual_temp; +} + +static int kv_dpm_early_init(struct amdgpu_device *adev) +{ + kv_dpm_set_dpm_funcs(adev); + kv_dpm_set_irq_funcs(adev); + + return 0; +} + +static int kv_dpm_late_init(struct amdgpu_device *adev) +{ + if (!amdgpu_dpm) + return 0; + + /* powerdown unused blocks for now */ + kv_dpm_powergate_acp(adev, true); + kv_dpm_powergate_samu(adev, true); + kv_dpm_powergate_vce(adev, true); + kv_dpm_powergate_uvd(adev, true); + + return 0; +} + +static int kv_dpm_sw_init(struct amdgpu_device *adev) +{ + int ret; + + ret = amdgpu_irq_add_id(adev, 230, &adev->pm.dpm.thermal.irq); + if (ret) + return ret; + + ret = amdgpu_irq_add_id(adev, 231, &adev->pm.dpm.thermal.irq); + if (ret) + return ret; + + /* default to balanced state */ + adev->pm.dpm.state = POWER_STATE_TYPE_BALANCED; + adev->pm.dpm.user_state = POWER_STATE_TYPE_BALANCED; + adev->pm.dpm.forced_level = AMDGPU_DPM_FORCED_LEVEL_AUTO; + adev->pm.default_sclk = adev->clock.default_sclk; + adev->pm.default_mclk = adev->clock.default_mclk; + adev->pm.current_sclk = adev->clock.default_sclk; + adev->pm.current_mclk = adev->clock.default_mclk; + adev->pm.int_thermal_type = THERMAL_TYPE_NONE; + + if (amdgpu_dpm == 0) + return 0; + + INIT_WORK(&adev->pm.dpm.thermal.work, amdgpu_dpm_thermal_work_handler); + mutex_lock(&adev->pm.mutex); + ret = kv_dpm_init(adev); + if (ret) + goto dpm_failed; + adev->pm.dpm.current_ps = adev->pm.dpm.requested_ps = adev->pm.dpm.boot_ps; + if (amdgpu_dpm == 1) + amdgpu_pm_print_power_states(adev); + ret = amdgpu_pm_sysfs_init(adev); + if (ret) + goto dpm_failed; + mutex_unlock(&adev->pm.mutex); + DRM_INFO("amdgpu: dpm initialized\n"); + + return 0; + +dpm_failed: + kv_dpm_fini(adev); + mutex_unlock(&adev->pm.mutex); + DRM_ERROR("amdgpu: dpm initialization failed\n"); + return ret; +} + +static int kv_dpm_sw_fini(struct amdgpu_device *adev) +{ + mutex_lock(&adev->pm.mutex); + amdgpu_pm_sysfs_fini(adev); + kv_dpm_fini(adev); + mutex_unlock(&adev->pm.mutex); + + return 0; +} + +static int kv_dpm_hw_init(struct amdgpu_device *adev) +{ + int ret; + + if (!amdgpu_dpm) + return 0; + + mutex_lock(&adev->pm.mutex); + kv_dpm_setup_asic(adev); + ret = kv_dpm_enable(adev); + if (ret) + adev->pm.dpm_enabled = false; + else + adev->pm.dpm_enabled = true; + mutex_unlock(&adev->pm.mutex); + + return ret; +} + +static int kv_dpm_hw_fini(struct amdgpu_device *adev) +{ + if (adev->pm.dpm_enabled) { + mutex_lock(&adev->pm.mutex); + kv_dpm_disable(adev); + mutex_unlock(&adev->pm.mutex); + } + + return 0; +} + +static int kv_dpm_suspend(struct amdgpu_device *adev) +{ + if (adev->pm.dpm_enabled) { + mutex_lock(&adev->pm.mutex); + /* disable dpm */ + kv_dpm_disable(adev); + /* reset the power state */ + adev->pm.dpm.current_ps = adev->pm.dpm.requested_ps = adev->pm.dpm.boot_ps; + mutex_unlock(&adev->pm.mutex); + } + return 0; +} + +static int kv_dpm_resume(struct amdgpu_device *adev) +{ + int ret; + + if (adev->pm.dpm_enabled) { + /* asic init will reset to the boot state */ + mutex_lock(&adev->pm.mutex); + kv_dpm_setup_asic(adev); + ret = kv_dpm_enable(adev); + if (ret) + adev->pm.dpm_enabled = false; + else + adev->pm.dpm_enabled = true; + mutex_unlock(&adev->pm.mutex); + if (adev->pm.dpm_enabled) + amdgpu_pm_compute_clocks(adev); + } + return 0; +} + +static bool kv_dpm_is_idle(struct amdgpu_device *adev) +{ + /* XXX */ + return true; +} + +static int kv_dpm_wait_for_idle(struct amdgpu_device *adev) +{ + /* XXX */ + return 0; +} + +static void kv_dpm_print_status(struct amdgpu_device *adev) +{ + dev_info(adev->dev, "KV/KB DPM registers\n"); + dev_info(adev->dev, " DIDT_SQ_CTRL0=0x%08X\n", + RREG32_DIDT(ixDIDT_SQ_CTRL0)); + dev_info(adev->dev, " DIDT_DB_CTRL0=0x%08X\n", + RREG32_DIDT(ixDIDT_DB_CTRL0)); + dev_info(adev->dev, " DIDT_TD_CTRL0=0x%08X\n", + RREG32_DIDT(ixDIDT_TD_CTRL0)); + dev_info(adev->dev, " DIDT_TCP_CTRL0=0x%08X\n", + RREG32_DIDT(ixDIDT_TCP_CTRL0)); + dev_info(adev->dev, " LCAC_SX0_OVR_SEL=0x%08X\n", + RREG32_SMC(ixLCAC_SX0_OVR_SEL)); + dev_info(adev->dev, " LCAC_SX0_OVR_VAL=0x%08X\n", + RREG32_SMC(ixLCAC_SX0_OVR_VAL)); + dev_info(adev->dev, " LCAC_MC0_OVR_SEL=0x%08X\n", + RREG32_SMC(ixLCAC_MC0_OVR_SEL)); + dev_info(adev->dev, " LCAC_MC0_OVR_VAL=0x%08X\n", + RREG32_SMC(ixLCAC_MC0_OVR_VAL)); + dev_info(adev->dev, " LCAC_MC1_OVR_SEL=0x%08X\n", + RREG32_SMC(ixLCAC_MC1_OVR_SEL)); + dev_info(adev->dev, " LCAC_MC1_OVR_VAL=0x%08X\n", + RREG32_SMC(ixLCAC_MC1_OVR_VAL)); + dev_info(adev->dev, " LCAC_MC2_OVR_SEL=0x%08X\n", + RREG32_SMC(ixLCAC_MC2_OVR_SEL)); + dev_info(adev->dev, " LCAC_MC2_OVR_VAL=0x%08X\n", + RREG32_SMC(ixLCAC_MC2_OVR_VAL)); + dev_info(adev->dev, " LCAC_MC3_OVR_SEL=0x%08X\n", + RREG32_SMC(ixLCAC_MC3_OVR_SEL)); + dev_info(adev->dev, " LCAC_MC3_OVR_VAL=0x%08X\n", + RREG32_SMC(ixLCAC_MC3_OVR_VAL)); + dev_info(adev->dev, " LCAC_CPL_OVR_SEL=0x%08X\n", + RREG32_SMC(ixLCAC_CPL_OVR_SEL)); + dev_info(adev->dev, " LCAC_CPL_OVR_VAL=0x%08X\n", + RREG32_SMC(ixLCAC_CPL_OVR_VAL)); + dev_info(adev->dev, " CG_FREQ_TRAN_VOTING_0=0x%08X\n", + RREG32_SMC(ixCG_FREQ_TRAN_VOTING_0)); + dev_info(adev->dev, " GENERAL_PWRMGT=0x%08X\n", + RREG32_SMC(ixGENERAL_PWRMGT)); + dev_info(adev->dev, " SCLK_PWRMGT_CNTL=0x%08X\n", + RREG32_SMC(ixSCLK_PWRMGT_CNTL)); + dev_info(adev->dev, " SMC_MESSAGE_0=0x%08X\n", + RREG32(mmSMC_MESSAGE_0)); + dev_info(adev->dev, " SMC_RESP_0=0x%08X\n", + RREG32(mmSMC_RESP_0)); + dev_info(adev->dev, " SMC_MSG_ARG_0=0x%08X\n", + RREG32(mmSMC_MSG_ARG_0)); + dev_info(adev->dev, " SMC_IND_INDEX_0=0x%08X\n", + RREG32(mmSMC_IND_INDEX_0)); + dev_info(adev->dev, " SMC_IND_DATA_0=0x%08X\n", + RREG32(mmSMC_IND_DATA_0)); + dev_info(adev->dev, " SMC_IND_ACCESS_CNTL=0x%08X\n", + RREG32(mmSMC_IND_ACCESS_CNTL)); +} + +static int kv_dpm_soft_reset(struct amdgpu_device *adev) +{ + return 0; +} + +static int kv_dpm_set_interrupt_state(struct amdgpu_device *adev, + struct amdgpu_irq_src *src, + unsigned type, + enum amdgpu_interrupt_state state) +{ + u32 cg_thermal_int; + + switch (type) { + case AMDGPU_THERMAL_IRQ_LOW_TO_HIGH: + switch (state) { + case AMDGPU_IRQ_STATE_DISABLE: + cg_thermal_int = RREG32_SMC(ixCG_THERMAL_INT_CTRL); + cg_thermal_int &= ~CG_THERMAL_INT_CTRL__THERM_INTH_MASK_MASK; + WREG32_SMC(ixCG_THERMAL_INT_CTRL, cg_thermal_int); + break; + case AMDGPU_IRQ_STATE_ENABLE: + cg_thermal_int = RREG32_SMC(ixCG_THERMAL_INT_CTRL); + cg_thermal_int |= CG_THERMAL_INT_CTRL__THERM_INTH_MASK_MASK; + WREG32_SMC(ixCG_THERMAL_INT_CTRL, cg_thermal_int); + break; + default: + break; + } + break; + + case AMDGPU_THERMAL_IRQ_HIGH_TO_LOW: + switch (state) { + case AMDGPU_IRQ_STATE_DISABLE: + cg_thermal_int = RREG32_SMC(ixCG_THERMAL_INT_CTRL); + cg_thermal_int &= ~CG_THERMAL_INT_CTRL__THERM_INTL_MASK_MASK; + WREG32_SMC(ixCG_THERMAL_INT_CTRL, cg_thermal_int); + break; + case AMDGPU_IRQ_STATE_ENABLE: + cg_thermal_int = RREG32_SMC(ixCG_THERMAL_INT_CTRL); + cg_thermal_int |= CG_THERMAL_INT_CTRL__THERM_INTL_MASK_MASK; + WREG32_SMC(ixCG_THERMAL_INT_CTRL, cg_thermal_int); + break; + default: + break; + } + break; + + default: + break; + } + return 0; +} + +static int kv_dpm_process_interrupt(struct amdgpu_device *adev, + struct amdgpu_irq_src *source, + struct amdgpu_iv_entry *entry) +{ + bool queue_thermal = false; + + if (entry == NULL) + return -EINVAL; + + switch (entry->src_id) { + case 230: /* thermal low to high */ + DRM_DEBUG("IH: thermal low to high\n"); + adev->pm.dpm.thermal.high_to_low = false; + queue_thermal = true; + break; + case 231: /* thermal high to low */ + DRM_DEBUG("IH: thermal high to low\n"); + adev->pm.dpm.thermal.high_to_low = true; + queue_thermal = true; + break; + default: + break; + } + + if (queue_thermal) + schedule_work(&adev->pm.dpm.thermal.work); + + return 0; +} + +static int kv_dpm_set_clockgating_state(struct amdgpu_device *adev, + enum amdgpu_clockgating_state state) +{ + return 0; +} + +static int kv_dpm_set_powergating_state(struct amdgpu_device *adev, + enum amdgpu_powergating_state state) +{ + return 0; +} + +const struct amdgpu_ip_funcs kv_dpm_ip_funcs = { + .early_init = kv_dpm_early_init, + .late_init = kv_dpm_late_init, + .sw_init = kv_dpm_sw_init, + .sw_fini = kv_dpm_sw_fini, + .hw_init = kv_dpm_hw_init, + .hw_fini = kv_dpm_hw_fini, + .suspend = kv_dpm_suspend, + .resume = kv_dpm_resume, + .is_idle = kv_dpm_is_idle, + .wait_for_idle = kv_dpm_wait_for_idle, + .soft_reset = kv_dpm_soft_reset, + .print_status = kv_dpm_print_status, + .set_clockgating_state = kv_dpm_set_clockgating_state, + .set_powergating_state = kv_dpm_set_powergating_state, +}; + +static const struct amdgpu_dpm_funcs kv_dpm_funcs = { + .get_temperature = &kv_dpm_get_temp, + .pre_set_power_state = &kv_dpm_pre_set_power_state, + .set_power_state = &kv_dpm_set_power_state, + .post_set_power_state = &kv_dpm_post_set_power_state, + .display_configuration_changed = &kv_dpm_display_configuration_changed, + .get_sclk = &kv_dpm_get_sclk, + .get_mclk = &kv_dpm_get_mclk, + .print_power_state = &kv_dpm_print_power_state, + .debugfs_print_current_performance_level = &kv_dpm_debugfs_print_current_performance_level, + .force_performance_level = &kv_dpm_force_performance_level, + .powergate_uvd = &kv_dpm_powergate_uvd, + .enable_bapm = &kv_dpm_enable_bapm, +}; + +static void kv_dpm_set_dpm_funcs(struct amdgpu_device *adev) +{ + if (adev->pm.funcs == NULL) + adev->pm.funcs = &kv_dpm_funcs; +} + +static const struct amdgpu_irq_src_funcs kv_dpm_irq_funcs = { + .set = kv_dpm_set_interrupt_state, + .process = kv_dpm_process_interrupt, +}; + +static void kv_dpm_set_irq_funcs(struct amdgpu_device *adev) +{ + adev->pm.dpm.thermal.irq.num_types = AMDGPU_THERMAL_IRQ_LAST; + adev->pm.dpm.thermal.irq.funcs = &kv_dpm_irq_funcs; +} diff --git a/drivers/gpu/drm/amd/amdgpu/kv_dpm.h b/drivers/gpu/drm/amd/amdgpu/kv_dpm.h new file mode 100644 index 000000000000..6df0ed41317c --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/kv_dpm.h @@ -0,0 +1,229 @@ +/* + * Copyright 2013 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#ifndef __KV_DPM_H__ +#define __KV_DPM_H__ + +#define SMU__NUM_SCLK_DPM_STATE 8 +#define SMU__NUM_MCLK_DPM_LEVELS 4 +#define SMU__NUM_LCLK_DPM_LEVELS 8 +#define SMU__NUM_PCIE_DPM_LEVELS 0 /* ??? */ +#include "smu7_fusion.h" +#include "ppsmc.h" + +#define SUMO_MAX_HARDWARE_POWERLEVELS 5 + +#define SUMO_MAX_NUMBER_VOLTAGES 4 + +struct sumo_vid_mapping_entry { + u16 vid_2bit; + u16 vid_7bit; +}; + +struct sumo_vid_mapping_table { + u32 num_entries; + struct sumo_vid_mapping_entry entries[SUMO_MAX_NUMBER_VOLTAGES]; +}; + +struct sumo_sclk_voltage_mapping_entry { + u32 sclk_frequency; + u16 vid_2bit; + u16 rsv; +}; + +struct sumo_sclk_voltage_mapping_table { + u32 num_max_dpm_entries; + struct sumo_sclk_voltage_mapping_entry entries[SUMO_MAX_HARDWARE_POWERLEVELS]; +}; + +#define TRINITY_AT_DFLT 30 + +#define KV_NUM_NBPSTATES 4 + +enum kv_pt_config_reg_type { + KV_CONFIGREG_MMR = 0, + KV_CONFIGREG_SMC_IND, + KV_CONFIGREG_DIDT_IND, + KV_CONFIGREG_CACHE, + KV_CONFIGREG_MAX +}; + +struct kv_pt_config_reg { + u32 offset; + u32 mask; + u32 shift; + u32 value; + enum kv_pt_config_reg_type type; +}; + +struct kv_lcac_config_values { + u32 block_id; + u32 signal_id; + u32 t; +}; + +struct kv_lcac_config_reg { + u32 cntl; + u32 block_mask; + u32 block_shift; + u32 signal_mask; + u32 signal_shift; + u32 t_mask; + u32 t_shift; + u32 enable_mask; + u32 enable_shift; +}; + +struct kv_pl { + u32 sclk; + u8 vddc_index; + u8 ds_divider_index; + u8 ss_divider_index; + u8 allow_gnb_slow; + u8 force_nbp_state; + u8 display_wm; + u8 vce_wm; +}; + +struct kv_ps { + struct kv_pl levels[SUMO_MAX_HARDWARE_POWERLEVELS]; + u32 num_levels; + bool need_dfs_bypass; + u8 dpm0_pg_nb_ps_lo; + u8 dpm0_pg_nb_ps_hi; + u8 dpmx_nb_ps_lo; + u8 dpmx_nb_ps_hi; +}; + +struct kv_sys_info { + u32 bootup_uma_clk; + u32 bootup_sclk; + u32 dentist_vco_freq; + u32 nb_dpm_enable; + u32 nbp_memory_clock[KV_NUM_NBPSTATES]; + u32 nbp_n_clock[KV_NUM_NBPSTATES]; + u16 bootup_nb_voltage_index; + u8 htc_tmp_lmt; + u8 htc_hyst_lmt; + struct sumo_sclk_voltage_mapping_table sclk_voltage_mapping_table; + struct sumo_vid_mapping_table vid_mapping_table; + u32 uma_channel_number; +}; + +struct kv_power_info { + u32 at[SUMO_MAX_HARDWARE_POWERLEVELS]; + u32 voltage_drop_t; + struct kv_sys_info sys_info; + struct kv_pl boot_pl; + bool enable_nb_ps_policy; + bool disable_nb_ps3_in_battery; + bool video_start; + bool battery_state; + u32 lowest_valid; + u32 highest_valid; + u16 high_voltage_t; + bool cac_enabled; + bool bapm_enable; + /* smc offsets */ + u32 sram_end; + u32 dpm_table_start; + u32 soft_regs_start; + /* dpm SMU tables */ + u8 graphics_dpm_level_count; + u8 uvd_level_count; + u8 vce_level_count; + u8 acp_level_count; + u8 samu_level_count; + u16 fps_high_t; + SMU7_Fusion_GraphicsLevel graphics_level[SMU__NUM_SCLK_DPM_STATE]; + SMU7_Fusion_ACPILevel acpi_level; + SMU7_Fusion_UvdLevel uvd_level[SMU7_MAX_LEVELS_UVD]; + SMU7_Fusion_ExtClkLevel vce_level[SMU7_MAX_LEVELS_VCE]; + SMU7_Fusion_ExtClkLevel acp_level[SMU7_MAX_LEVELS_ACP]; + SMU7_Fusion_ExtClkLevel samu_level[SMU7_MAX_LEVELS_SAMU]; + u8 uvd_boot_level; + u8 vce_boot_level; + u8 acp_boot_level; + u8 samu_boot_level; + u8 uvd_interval; + u8 vce_interval; + u8 acp_interval; + u8 samu_interval; + u8 graphics_boot_level; + u8 graphics_interval; + u8 graphics_therm_throttle_enable; + u8 graphics_voltage_change_enable; + u8 graphics_clk_slow_enable; + u8 graphics_clk_slow_divider; + u8 fps_low_t; + u32 low_sclk_interrupt_t; + bool uvd_power_gated; + bool vce_power_gated; + bool acp_power_gated; + bool samu_power_gated; + bool nb_dpm_enabled; + /* flags */ + bool enable_didt; + bool enable_dpm; + bool enable_auto_thermal_throttling; + bool enable_nb_dpm; + /* caps */ + bool caps_cac; + bool caps_power_containment; + bool caps_sq_ramping; + bool caps_db_ramping; + bool caps_td_ramping; + bool caps_tcp_ramping; + bool caps_sclk_throttle_low_notification; + bool caps_fps; + bool caps_uvd_dpm; + bool caps_uvd_pg; + bool caps_vce_pg; + bool caps_samu_pg; + bool caps_acp_pg; + bool caps_stable_p_state; + bool caps_enable_dfs_bypass; + bool caps_sclk_ds; + struct amdgpu_ps current_rps; + struct kv_ps current_ps; + struct amdgpu_ps requested_rps; + struct kv_ps requested_ps; +}; + +/* XXX are these ok? */ +#define KV_TEMP_RANGE_MIN (90 * 1000) +#define KV_TEMP_RANGE_MAX (120 * 1000) + +/* kv_smc.c */ +int amdgpu_kv_notify_message_to_smu(struct amdgpu_device *adev, u32 id); +int amdgpu_kv_dpm_get_enable_mask(struct amdgpu_device *adev, u32 *enable_mask); +int amdgpu_kv_send_msg_to_smc_with_parameter(struct amdgpu_device *adev, + PPSMC_Msg msg, u32 parameter); +int amdgpu_kv_read_smc_sram_dword(struct amdgpu_device *adev, u32 smc_address, + u32 *value, u32 limit); +int amdgpu_kv_smc_dpm_enable(struct amdgpu_device *adev, bool enable); +int amdgpu_kv_smc_bapm_enable(struct amdgpu_device *adev, bool enable); +int amdgpu_kv_copy_bytes_to_smc(struct amdgpu_device *adev, + u32 smc_start_address, + const u8 *src, u32 byte_count, u32 limit); + +#endif diff --git a/drivers/gpu/drm/amd/amdgpu/kv_smc.c b/drivers/gpu/drm/amd/amdgpu/kv_smc.c new file mode 100644 index 000000000000..e6b7b42acfe1 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/kv_smc.c @@ -0,0 +1,219 @@ +/* + * Copyright 2013 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Alex Deucher + */ + +#include "drmP.h" +#include "amdgpu.h" +#include "cikd.h" +#include "kv_dpm.h" + +#include "smu/smu_7_0_0_d.h" +#include "smu/smu_7_0_0_sh_mask.h" + +int amdgpu_kv_notify_message_to_smu(struct amdgpu_device *adev, u32 id) +{ + u32 i; + u32 tmp = 0; + + WREG32(mmSMC_MESSAGE_0, id & SMC_MESSAGE_0__SMC_MSG_MASK); + + for (i = 0; i < adev->usec_timeout; i++) { + if ((RREG32(mmSMC_RESP_0) & SMC_RESP_0__SMC_RESP_MASK) != 0) + break; + udelay(1); + } + tmp = RREG32(mmSMC_RESP_0) & SMC_RESP_0__SMC_RESP_MASK; + + if (tmp != 1) { + if (tmp == 0xFF) + return -EINVAL; + else if (tmp == 0xFE) + return -EINVAL; + } + + return 0; +} + +int amdgpu_kv_dpm_get_enable_mask(struct amdgpu_device *adev, u32 *enable_mask) +{ + int ret; + + ret = amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_SCLKDPM_GetEnabledMask); + + if (ret == 0) + *enable_mask = RREG32_SMC(ixSMC_SYSCON_MSG_ARG_0); + + return ret; +} + +int amdgpu_kv_send_msg_to_smc_with_parameter(struct amdgpu_device *adev, + PPSMC_Msg msg, u32 parameter) +{ + + WREG32(mmSMC_MSG_ARG_0, parameter); + + return amdgpu_kv_notify_message_to_smu(adev, msg); +} + +static int kv_set_smc_sram_address(struct amdgpu_device *adev, + u32 smc_address, u32 limit) +{ + if (smc_address & 3) + return -EINVAL; + if ((smc_address + 3) > limit) + return -EINVAL; + + WREG32(mmSMC_IND_INDEX_0, smc_address); + WREG32_P(mmSMC_IND_ACCESS_CNTL, 0, + ~SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_0_MASK); + + return 0; +} + +int amdgpu_kv_read_smc_sram_dword(struct amdgpu_device *adev, u32 smc_address, + u32 *value, u32 limit) +{ + int ret; + + ret = kv_set_smc_sram_address(adev, smc_address, limit); + if (ret) + return ret; + + *value = RREG32(mmSMC_IND_DATA_0); + return 0; +} + +int amdgpu_kv_smc_dpm_enable(struct amdgpu_device *adev, bool enable) +{ + if (enable) + return amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_DPM_Enable); + else + return amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_DPM_Disable); +} + +int amdgpu_kv_smc_bapm_enable(struct amdgpu_device *adev, bool enable) +{ + if (enable) + return amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_EnableBAPM); + else + return amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_DisableBAPM); +} + +int amdgpu_kv_copy_bytes_to_smc(struct amdgpu_device *adev, + u32 smc_start_address, + const u8 *src, u32 byte_count, u32 limit) +{ + int ret; + u32 data, original_data, addr, extra_shift, t_byte, count, mask; + + if ((smc_start_address + byte_count) > limit) + return -EINVAL; + + addr = smc_start_address; + t_byte = addr & 3; + + /* RMW for the initial bytes */ + if (t_byte != 0) { + addr -= t_byte; + + ret = kv_set_smc_sram_address(adev, addr, limit); + if (ret) + return ret; + + original_data = RREG32(mmSMC_IND_DATA_0); + + data = 0; + mask = 0; + count = 4; + while (count > 0) { + if (t_byte > 0) { + mask = (mask << 8) | 0xff; + t_byte--; + } else if (byte_count > 0) { + data = (data << 8) + *src++; + byte_count--; + mask <<= 8; + } else { + data <<= 8; + mask = (mask << 8) | 0xff; + } + count--; + } + + data |= original_data & mask; + + ret = kv_set_smc_sram_address(adev, addr, limit); + if (ret) + return ret; + + WREG32(mmSMC_IND_DATA_0, data); + + addr += 4; + } + + while (byte_count >= 4) { + /* SMC address space is BE */ + data = (src[0] << 24) + (src[1] << 16) + (src[2] << 8) + src[3]; + + ret = kv_set_smc_sram_address(adev, addr, limit); + if (ret) + return ret; + + WREG32(mmSMC_IND_DATA_0, data); + + src += 4; + byte_count -= 4; + addr += 4; + } + + /* RMW for the final bytes */ + if (byte_count > 0) { + data = 0; + + ret = kv_set_smc_sram_address(adev, addr, limit); + if (ret) + return ret; + + original_data = RREG32(mmSMC_IND_DATA_0); + + extra_shift = 8 * (4 - byte_count); + + while (byte_count > 0) { + /* SMC address space is BE */ + data = (data << 8) + *src++; + byte_count--; + } + + data <<= extra_shift; + + data |= (original_data & ~((~0UL) << extra_shift)); + + ret = kv_set_smc_sram_address(adev, addr, limit); + if (ret) + return ret; + + WREG32(mmSMC_IND_DATA_0, data); + } + return 0; +} + diff --git a/drivers/gpu/drm/amd/amdgpu/smu7.h b/drivers/gpu/drm/amd/amdgpu/smu7.h new file mode 100644 index 000000000000..75a380a15292 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/smu7.h @@ -0,0 +1,170 @@ +/* + * Copyright 2013 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef SMU7_H +#define SMU7_H + +#pragma pack(push, 1) + +#define SMU7_CONTEXT_ID_SMC 1 +#define SMU7_CONTEXT_ID_VBIOS 2 + + +#define SMU7_CONTEXT_ID_SMC 1 +#define SMU7_CONTEXT_ID_VBIOS 2 + +#define SMU7_MAX_LEVELS_VDDC 8 +#define SMU7_MAX_LEVELS_VDDCI 4 +#define SMU7_MAX_LEVELS_MVDD 4 +#define SMU7_MAX_LEVELS_VDDNB 8 + +#define SMU7_MAX_LEVELS_GRAPHICS SMU__NUM_SCLK_DPM_STATE // SCLK + SQ DPM + ULV +#define SMU7_MAX_LEVELS_MEMORY SMU__NUM_MCLK_DPM_LEVELS // MCLK Levels DPM +#define SMU7_MAX_LEVELS_GIO SMU__NUM_LCLK_DPM_LEVELS // LCLK Levels +#define SMU7_MAX_LEVELS_LINK SMU__NUM_PCIE_DPM_LEVELS // PCIe speed and number of lanes. +#define SMU7_MAX_LEVELS_UVD 8 // VCLK/DCLK levels for UVD. +#define SMU7_MAX_LEVELS_VCE 8 // ECLK levels for VCE. +#define SMU7_MAX_LEVELS_ACP 8 // ACLK levels for ACP. +#define SMU7_MAX_LEVELS_SAMU 8 // SAMCLK levels for SAMU. +#define SMU7_MAX_ENTRIES_SMIO 32 // Number of entries in SMIO table. + +#define DPM_NO_LIMIT 0 +#define DPM_NO_UP 1 +#define DPM_GO_DOWN 2 +#define DPM_GO_UP 3 + +#define SMU7_FIRST_DPM_GRAPHICS_LEVEL 0 +#define SMU7_FIRST_DPM_MEMORY_LEVEL 0 + +#define GPIO_CLAMP_MODE_VRHOT 1 +#define GPIO_CLAMP_MODE_THERM 2 +#define GPIO_CLAMP_MODE_DC 4 + +#define SCRATCH_B_TARG_PCIE_INDEX_SHIFT 0 +#define SCRATCH_B_TARG_PCIE_INDEX_MASK (0x7< + */ + +#include +#include +#include "amdgpu.h" +#include "amdgpu_uvd.h" +#include "cikd.h" + +#include "uvd/uvd_4_2_d.h" +#include "uvd/uvd_4_2_sh_mask.h" + +#include "oss/oss_2_0_d.h" +#include "oss/oss_2_0_sh_mask.h" + +static void uvd_v4_2_mc_resume(struct amdgpu_device *adev); +static void uvd_v4_2_init_cg(struct amdgpu_device *adev); +static void uvd_v4_2_set_ring_funcs(struct amdgpu_device *adev); +static void uvd_v4_2_set_irq_funcs(struct amdgpu_device *adev); +static int uvd_v4_2_start(struct amdgpu_device *adev); +static void uvd_v4_2_stop(struct amdgpu_device *adev); + +/** + * uvd_v4_2_ring_get_rptr - get read pointer + * + * @ring: amdgpu_ring pointer + * + * Returns the current hardware read pointer + */ +static uint32_t uvd_v4_2_ring_get_rptr(struct amdgpu_ring *ring) +{ + struct amdgpu_device *adev = ring->adev; + + return RREG32(mmUVD_RBC_RB_RPTR); +} + +/** + * uvd_v4_2_ring_get_wptr - get write pointer + * + * @ring: amdgpu_ring pointer + * + * Returns the current hardware write pointer + */ +static uint32_t uvd_v4_2_ring_get_wptr(struct amdgpu_ring *ring) +{ + struct amdgpu_device *adev = ring->adev; + + return RREG32(mmUVD_RBC_RB_WPTR); +} + +/** + * uvd_v4_2_ring_set_wptr - set write pointer + * + * @ring: amdgpu_ring pointer + * + * Commits the write pointer to the hardware + */ +static void uvd_v4_2_ring_set_wptr(struct amdgpu_ring *ring) +{ + struct amdgpu_device *adev = ring->adev; + + WREG32(mmUVD_RBC_RB_WPTR, ring->wptr); +} + +static int uvd_v4_2_early_init(struct amdgpu_device *adev) +{ + uvd_v4_2_set_ring_funcs(adev); + uvd_v4_2_set_irq_funcs(adev); + + return 0; +} + +static int uvd_v4_2_sw_init(struct amdgpu_device *adev) +{ + struct amdgpu_ring *ring; + int r; + + /* UVD TRAP */ + r = amdgpu_irq_add_id(adev, 124, &adev->uvd.irq); + if (r) + return r; + + r = amdgpu_uvd_sw_init(adev); + if (r) + return r; + + r = amdgpu_uvd_resume(adev); + if (r) + return r; + + ring = &adev->uvd.ring; + sprintf(ring->name, "uvd"); + r = amdgpu_ring_init(adev, ring, 4096, CP_PACKET2, 0xf, + &adev->uvd.irq, 0, AMDGPU_RING_TYPE_UVD); + + return r; +} + +static int uvd_v4_2_sw_fini(struct amdgpu_device *adev) +{ + int r; + + r = amdgpu_uvd_suspend(adev); + if (r) + return r; + + r = amdgpu_uvd_sw_fini(adev); + if (r) + return r; + + return r; +} + +/** + * uvd_v4_2_hw_init - start and test UVD block + * + * @adev: amdgpu_device pointer + * + * Initialize the hardware, boot up the VCPU and do some testing + */ +static int uvd_v4_2_hw_init(struct amdgpu_device *adev) +{ + struct amdgpu_ring *ring = &adev->uvd.ring; + uint32_t tmp; + int r; + + /* raise clocks while booting up the VCPU */ + amdgpu_asic_set_uvd_clocks(adev, 53300, 40000); + + r = uvd_v4_2_start(adev); + if (r) + goto done; + + ring->ready = true; + r = amdgpu_ring_test_ring(ring); + if (r) { + ring->ready = false; + goto done; + } + + r = amdgpu_ring_lock(ring, 10); + if (r) { + DRM_ERROR("amdgpu: ring failed to lock UVD ring (%d).\n", r); + goto done; + } + + tmp = PACKET0(mmUVD_SEMA_WAIT_FAULT_TIMEOUT_CNTL, 0); + amdgpu_ring_write(ring, tmp); + amdgpu_ring_write(ring, 0xFFFFF); + + tmp = PACKET0(mmUVD_SEMA_WAIT_INCOMPLETE_TIMEOUT_CNTL, 0); + amdgpu_ring_write(ring, tmp); + amdgpu_ring_write(ring, 0xFFFFF); + + tmp = PACKET0(mmUVD_SEMA_SIGNAL_INCOMPLETE_TIMEOUT_CNTL, 0); + amdgpu_ring_write(ring, tmp); + amdgpu_ring_write(ring, 0xFFFFF); + + /* Clear timeout status bits */ + amdgpu_ring_write(ring, PACKET0(mmUVD_SEMA_TIMEOUT_STATUS, 0)); + amdgpu_ring_write(ring, 0x8); + + amdgpu_ring_write(ring, PACKET0(mmUVD_SEMA_CNTL, 0)); + amdgpu_ring_write(ring, 3); + + amdgpu_ring_unlock_commit(ring); + +done: + /* lower clocks again */ + amdgpu_asic_set_uvd_clocks(adev, 0, 0); + + if (!r) + DRM_INFO("UVD initialized successfully.\n"); + + return r; +} + +/** + * uvd_v4_2_hw_fini - stop the hardware block + * + * @adev: amdgpu_device pointer + * + * Stop the UVD block, mark ring as not ready any more + */ +static int uvd_v4_2_hw_fini(struct amdgpu_device *adev) +{ + struct amdgpu_ring *ring = &adev->uvd.ring; + + uvd_v4_2_stop(adev); + ring->ready = false; + + return 0; +} + +static int uvd_v4_2_suspend(struct amdgpu_device *adev) +{ + int r; + + r = uvd_v4_2_hw_fini(adev); + if (r) + return r; + + r = amdgpu_uvd_suspend(adev); + if (r) + return r; + + return r; +} + +static int uvd_v4_2_resume(struct amdgpu_device *adev) +{ + int r; + + r = amdgpu_uvd_resume(adev); + if (r) + return r; + + r = uvd_v4_2_hw_init(adev); + if (r) + return r; + + return r; +} + +/** + * uvd_v4_2_start - start UVD block + * + * @adev: amdgpu_device pointer + * + * Setup and start the UVD block + */ +static int uvd_v4_2_start(struct amdgpu_device *adev) +{ + struct amdgpu_ring *ring = &adev->uvd.ring; + uint32_t rb_bufsz; + int i, j, r; + + /* disable byte swapping */ + u32 lmi_swap_cntl = 0; + u32 mp_swap_cntl = 0; + + uvd_v4_2_mc_resume(adev); + + /* disable clock gating */ + WREG32(mmUVD_CGC_GATE, 0); + + /* disable interupt */ + WREG32_P(mmUVD_MASTINT_EN, 0, ~(1 << 1)); + + /* Stall UMC and register bus before resetting VCPU */ + WREG32_P(mmUVD_LMI_CTRL2, 1 << 8, ~(1 << 8)); + mdelay(1); + + /* put LMI, VCPU, RBC etc... into reset */ + WREG32(mmUVD_SOFT_RESET, UVD_SOFT_RESET__LMI_SOFT_RESET_MASK | + UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK | UVD_SOFT_RESET__LBSI_SOFT_RESET_MASK | + UVD_SOFT_RESET__RBC_SOFT_RESET_MASK | UVD_SOFT_RESET__CSM_SOFT_RESET_MASK | + UVD_SOFT_RESET__CXW_SOFT_RESET_MASK | UVD_SOFT_RESET__TAP_SOFT_RESET_MASK | + UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK); + mdelay(5); + + /* take UVD block out of reset */ + WREG32_P(mmSRBM_SOFT_RESET, 0, ~SRBM_SOFT_RESET__SOFT_RESET_UVD_MASK); + mdelay(5); + + /* initialize UVD memory controller */ + WREG32(mmUVD_LMI_CTRL, 0x40 | (1 << 8) | (1 << 13) | + (1 << 21) | (1 << 9) | (1 << 20)); + +#ifdef __BIG_ENDIAN + /* swap (8 in 32) RB and IB */ + lmi_swap_cntl = 0xa; + mp_swap_cntl = 0; +#endif + WREG32(mmUVD_LMI_SWAP_CNTL, lmi_swap_cntl); + WREG32(mmUVD_MP_SWAP_CNTL, mp_swap_cntl); + + WREG32(mmUVD_MPC_SET_MUXA0, 0x40c2040); + WREG32(mmUVD_MPC_SET_MUXA1, 0x0); + WREG32(mmUVD_MPC_SET_MUXB0, 0x40c2040); + WREG32(mmUVD_MPC_SET_MUXB1, 0x0); + WREG32(mmUVD_MPC_SET_ALU, 0); + WREG32(mmUVD_MPC_SET_MUX, 0x88); + + /* take all subblocks out of reset, except VCPU */ + WREG32(mmUVD_SOFT_RESET, UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK); + mdelay(5); + + /* enable VCPU clock */ + WREG32(mmUVD_VCPU_CNTL, 1 << 9); + + /* enable UMC */ + WREG32_P(mmUVD_LMI_CTRL2, 0, ~(1 << 8)); + + /* boot up the VCPU */ + WREG32(mmUVD_SOFT_RESET, 0); + mdelay(10); + + for (i = 0; i < 10; ++i) { + uint32_t status; + for (j = 0; j < 100; ++j) { + status = RREG32(mmUVD_STATUS); + if (status & 2) + break; + mdelay(10); + } + r = 0; + if (status & 2) + break; + + DRM_ERROR("UVD not responding, trying to reset the VCPU!!!\n"); + WREG32_P(mmUVD_SOFT_RESET, UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK, + ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK); + mdelay(10); + WREG32_P(mmUVD_SOFT_RESET, 0, ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK); + mdelay(10); + r = -1; + } + + if (r) { + DRM_ERROR("UVD not responding, giving up!!!\n"); + return r; + } + + /* enable interupt */ + WREG32_P(mmUVD_MASTINT_EN, 3<<1, ~(3 << 1)); + + /* force RBC into idle state */ + WREG32(mmUVD_RBC_RB_CNTL, 0x11010101); + + /* Set the write pointer delay */ + WREG32(mmUVD_RBC_RB_WPTR_CNTL, 0); + + /* programm the 4GB memory segment for rptr and ring buffer */ + WREG32(mmUVD_LMI_EXT40_ADDR, upper_32_bits(ring->gpu_addr) | + (0x7 << 16) | (0x1 << 31)); + + /* Initialize the ring buffer's read and write pointers */ + WREG32(mmUVD_RBC_RB_RPTR, 0x0); + + ring->wptr = RREG32(mmUVD_RBC_RB_RPTR); + WREG32(mmUVD_RBC_RB_WPTR, ring->wptr); + + /* set the ring address */ + WREG32(mmUVD_RBC_RB_BASE, ring->gpu_addr); + + /* Set ring buffer size */ + rb_bufsz = order_base_2(ring->ring_size); + rb_bufsz = (0x1 << 8) | rb_bufsz; + WREG32_P(mmUVD_RBC_RB_CNTL, rb_bufsz, ~0x11f1f); + + return 0; +} + +/** + * uvd_v4_2_stop - stop UVD block + * + * @adev: amdgpu_device pointer + * + * stop the UVD block + */ +static void uvd_v4_2_stop(struct amdgpu_device *adev) +{ + /* force RBC into idle state */ + WREG32(mmUVD_RBC_RB_CNTL, 0x11010101); + + /* Stall UMC and register bus before resetting VCPU */ + WREG32_P(mmUVD_LMI_CTRL2, 1 << 8, ~(1 << 8)); + mdelay(1); + + /* put VCPU into reset */ + WREG32(mmUVD_SOFT_RESET, UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK); + mdelay(5); + + /* disable VCPU clock */ + WREG32(mmUVD_VCPU_CNTL, 0x0); + + /* Unstall UMC and register bus */ + WREG32_P(mmUVD_LMI_CTRL2, 0, ~(1 << 8)); +} + +/** + * uvd_v4_2_ring_emit_fence - emit an fence & trap command + * + * @ring: amdgpu_ring pointer + * @fence: fence to emit + * + * Write a fence and a trap command to the ring. + */ +static void uvd_v4_2_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq, + bool write64bit) +{ + WARN_ON(write64bit); + + amdgpu_ring_write(ring, PACKET0(mmUVD_CONTEXT_ID, 0)); + amdgpu_ring_write(ring, seq); + amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0)); + amdgpu_ring_write(ring, addr & 0xffffffff); + amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0)); + amdgpu_ring_write(ring, upper_32_bits(addr) & 0xff); + amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0)); + amdgpu_ring_write(ring, 0); + + amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0)); + amdgpu_ring_write(ring, 0); + amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0)); + amdgpu_ring_write(ring, 0); + amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0)); + amdgpu_ring_write(ring, 2); +} + +/** + * uvd_v4_2_ring_emit_semaphore - emit semaphore command + * + * @ring: amdgpu_ring pointer + * @semaphore: semaphore to emit commands for + * @emit_wait: true if we should emit a wait command + * + * Emit a semaphore command (either wait or signal) to the UVD ring. + */ +static bool uvd_v4_2_ring_emit_semaphore(struct amdgpu_ring *ring, + struct amdgpu_semaphore *semaphore, + bool emit_wait) +{ + uint64_t addr = semaphore->gpu_addr; + + amdgpu_ring_write(ring, PACKET0(mmUVD_SEMA_ADDR_LOW, 0)); + amdgpu_ring_write(ring, (addr >> 3) & 0x000FFFFF); + + amdgpu_ring_write(ring, PACKET0(mmUVD_SEMA_ADDR_HIGH, 0)); + amdgpu_ring_write(ring, (addr >> 23) & 0x000FFFFF); + + amdgpu_ring_write(ring, PACKET0(mmUVD_SEMA_CMD, 0)); + amdgpu_ring_write(ring, 0x80 | (emit_wait ? 1 : 0)); + + return true; +} + +/** + * uvd_v4_2_ring_test_ring - register write test + * + * @ring: amdgpu_ring pointer + * + * Test if we can successfully write to the context register + */ +static int uvd_v4_2_ring_test_ring(struct amdgpu_ring *ring) +{ + struct amdgpu_device *adev = ring->adev; + uint32_t tmp = 0; + unsigned i; + int r; + + WREG32(mmUVD_CONTEXT_ID, 0xCAFEDEAD); + r = amdgpu_ring_lock(ring, 3); + if (r) { + DRM_ERROR("amdgpu: cp failed to lock ring %d (%d).\n", + ring->idx, r); + return r; + } + amdgpu_ring_write(ring, PACKET0(mmUVD_CONTEXT_ID, 0)); + amdgpu_ring_write(ring, 0xDEADBEEF); + amdgpu_ring_unlock_commit(ring); + for (i = 0; i < adev->usec_timeout; i++) { + tmp = RREG32(mmUVD_CONTEXT_ID); + if (tmp == 0xDEADBEEF) + break; + DRM_UDELAY(1); + } + + if (i < adev->usec_timeout) { + DRM_INFO("ring test on %d succeeded in %d usecs\n", + ring->idx, i); + } else { + DRM_ERROR("amdgpu: ring %d test failed (0x%08X)\n", + ring->idx, tmp); + r = -EINVAL; + } + return r; +} + +/** + * uvd_v4_2_ring_emit_ib - execute indirect buffer + * + * @ring: amdgpu_ring pointer + * @ib: indirect buffer to execute + * + * Write ring commands to execute the indirect buffer + */ +static void uvd_v4_2_ring_emit_ib(struct amdgpu_ring *ring, + struct amdgpu_ib *ib) +{ + amdgpu_ring_write(ring, PACKET0(mmUVD_RBC_IB_BASE, 0)); + amdgpu_ring_write(ring, ib->gpu_addr); + amdgpu_ring_write(ring, PACKET0(mmUVD_RBC_IB_SIZE, 0)); + amdgpu_ring_write(ring, ib->length_dw); +} + +/** + * uvd_v4_2_ring_test_ib - test ib execution + * + * @ring: amdgpu_ring pointer + * + * Test if we can successfully execute an IB + */ +static int uvd_v4_2_ring_test_ib(struct amdgpu_ring *ring) +{ + struct amdgpu_device *adev = ring->adev; + struct amdgpu_fence *fence = NULL; + int r; + + r = amdgpu_asic_set_uvd_clocks(adev, 53300, 40000); + if (r) { + DRM_ERROR("amdgpu: failed to raise UVD clocks (%d).\n", r); + return r; + } + + r = amdgpu_uvd_get_create_msg(ring, 1, NULL); + if (r) { + DRM_ERROR("amdgpu: failed to get create msg (%d).\n", r); + goto error; + } + + r = amdgpu_uvd_get_destroy_msg(ring, 1, &fence); + if (r) { + DRM_ERROR("amdgpu: failed to get destroy ib (%d).\n", r); + goto error; + } + + r = amdgpu_fence_wait(fence, false); + if (r) { + DRM_ERROR("amdgpu: fence wait failed (%d).\n", r); + goto error; + } + DRM_INFO("ib test on ring %d succeeded\n", ring->idx); +error: + amdgpu_fence_unref(&fence); + amdgpu_asic_set_uvd_clocks(adev, 0, 0); + return r; +} + +/** + * uvd_v4_2_mc_resume - memory controller programming + * + * @adev: amdgpu_device pointer + * + * Let the UVD memory controller know it's offsets + */ +static void uvd_v4_2_mc_resume(struct amdgpu_device *adev) +{ + uint64_t addr; + uint32_t size; + + /* programm the VCPU memory controller bits 0-27 */ + addr = (adev->uvd.gpu_addr + AMDGPU_UVD_FIRMWARE_OFFSET) >> 3; + size = AMDGPU_GPU_PAGE_ALIGN(adev->uvd.fw->size + 4) >> 3; + WREG32(mmUVD_VCPU_CACHE_OFFSET0, addr); + WREG32(mmUVD_VCPU_CACHE_SIZE0, size); + + addr += size; + size = AMDGPU_UVD_STACK_SIZE >> 3; + WREG32(mmUVD_VCPU_CACHE_OFFSET1, addr); + WREG32(mmUVD_VCPU_CACHE_SIZE1, size); + + addr += size; + size = AMDGPU_UVD_HEAP_SIZE >> 3; + WREG32(mmUVD_VCPU_CACHE_OFFSET2, addr); + WREG32(mmUVD_VCPU_CACHE_SIZE2, size); + + /* bits 28-31 */ + addr = (adev->uvd.gpu_addr >> 28) & 0xF; + WREG32(mmUVD_LMI_ADDR_EXT, (addr << 12) | (addr << 0)); + + /* bits 32-39 */ + addr = (adev->uvd.gpu_addr >> 32) & 0xFF; + WREG32(mmUVD_LMI_EXT40_ADDR, addr | (0x9 << 16) | (0x1 << 31)); + + uvd_v4_2_init_cg(adev); +} + +static void uvd_v4_2_enable_mgcg(struct amdgpu_device *adev, + bool enable) +{ + u32 orig, data; + + if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_UVD_MGCG)) { + data = RREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL); + data = 0xfff; + WREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL, data); + + orig = data = RREG32(mmUVD_CGC_CTRL); + data |= UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK; + if (orig != data) + WREG32(mmUVD_CGC_CTRL, data); + } else { + data = RREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL); + data &= ~0xfff; + WREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL, data); + + orig = data = RREG32(mmUVD_CGC_CTRL); + data &= ~UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK; + if (orig != data) + WREG32(mmUVD_CGC_CTRL, data); + } +} + +static void uvd_v4_2_set_dcm(struct amdgpu_device *adev, + bool sw_mode) +{ + u32 tmp, tmp2; + + tmp = RREG32(mmUVD_CGC_CTRL); + tmp &= ~(UVD_CGC_CTRL__CLK_OFF_DELAY_MASK | UVD_CGC_CTRL__CLK_GATE_DLY_TIMER_MASK); + tmp |= UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK | + (1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT) | + (4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT); + + if (sw_mode) { + tmp &= ~0x7ffff800; + tmp2 = UVD_CGC_CTRL2__DYN_OCLK_RAMP_EN_MASK | + UVD_CGC_CTRL2__DYN_RCLK_RAMP_EN_MASK | + (7 << UVD_CGC_CTRL2__GATER_DIV_ID__SHIFT); + } else { + tmp |= 0x7ffff800; + tmp2 = 0; + } + + WREG32(mmUVD_CGC_CTRL, tmp); + WREG32_UVD_CTX(ixUVD_CGC_CTRL2, tmp2); +} + +static void uvd_v4_2_init_cg(struct amdgpu_device *adev) +{ + bool hw_mode = true; + + if (hw_mode) { + uvd_v4_2_set_dcm(adev, false); + } else { + u32 tmp = RREG32(mmUVD_CGC_CTRL); + tmp &= ~UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK; + WREG32(mmUVD_CGC_CTRL, tmp); + } +} + +static bool uvd_v4_2_is_idle(struct amdgpu_device *adev) +{ + return !(RREG32(mmSRBM_STATUS) & SRBM_STATUS__UVD_BUSY_MASK); +} + +static int uvd_v4_2_wait_for_idle(struct amdgpu_device *adev) +{ + unsigned i; + + for (i = 0; i < adev->usec_timeout; i++) { + if (!(RREG32(mmSRBM_STATUS) & SRBM_STATUS__UVD_BUSY_MASK)) + return 0; + } + return -ETIMEDOUT; +} + +static int uvd_v4_2_soft_reset(struct amdgpu_device *adev) +{ + uvd_v4_2_stop(adev); + + WREG32_P(mmSRBM_SOFT_RESET, SRBM_SOFT_RESET__SOFT_RESET_UVD_MASK, + ~SRBM_SOFT_RESET__SOFT_RESET_UVD_MASK); + mdelay(5); + + return uvd_v4_2_start(adev); +} + +static void uvd_v4_2_print_status(struct amdgpu_device *adev) +{ + dev_info(adev->dev, "UVD 4.2 registers\n"); + dev_info(adev->dev, " UVD_SEMA_ADDR_LOW=0x%08X\n", + RREG32(mmUVD_SEMA_ADDR_LOW)); + dev_info(adev->dev, " UVD_SEMA_ADDR_HIGH=0x%08X\n", + RREG32(mmUVD_SEMA_ADDR_HIGH)); + dev_info(adev->dev, " UVD_SEMA_CMD=0x%08X\n", + RREG32(mmUVD_SEMA_CMD)); + dev_info(adev->dev, " UVD_GPCOM_VCPU_CMD=0x%08X\n", + RREG32(mmUVD_GPCOM_VCPU_CMD)); + dev_info(adev->dev, " UVD_GPCOM_VCPU_DATA0=0x%08X\n", + RREG32(mmUVD_GPCOM_VCPU_DATA0)); + dev_info(adev->dev, " UVD_GPCOM_VCPU_DATA1=0x%08X\n", + RREG32(mmUVD_GPCOM_VCPU_DATA1)); + dev_info(adev->dev, " UVD_ENGINE_CNTL=0x%08X\n", + RREG32(mmUVD_ENGINE_CNTL)); + dev_info(adev->dev, " UVD_UDEC_ADDR_CONFIG=0x%08X\n", + RREG32(mmUVD_UDEC_ADDR_CONFIG)); + dev_info(adev->dev, " UVD_UDEC_DB_ADDR_CONFIG=0x%08X\n", + RREG32(mmUVD_UDEC_DB_ADDR_CONFIG)); + dev_info(adev->dev, " UVD_UDEC_DBW_ADDR_CONFIG=0x%08X\n", + RREG32(mmUVD_UDEC_DBW_ADDR_CONFIG)); + dev_info(adev->dev, " UVD_SEMA_CNTL=0x%08X\n", + RREG32(mmUVD_SEMA_CNTL)); + dev_info(adev->dev, " UVD_LMI_EXT40_ADDR=0x%08X\n", + RREG32(mmUVD_LMI_EXT40_ADDR)); + dev_info(adev->dev, " UVD_CTX_INDEX=0x%08X\n", + RREG32(mmUVD_CTX_INDEX)); + dev_info(adev->dev, " UVD_CTX_DATA=0x%08X\n", + RREG32(mmUVD_CTX_DATA)); + dev_info(adev->dev, " UVD_CGC_GATE=0x%08X\n", + RREG32(mmUVD_CGC_GATE)); + dev_info(adev->dev, " UVD_CGC_CTRL=0x%08X\n", + RREG32(mmUVD_CGC_CTRL)); + dev_info(adev->dev, " UVD_LMI_CTRL2=0x%08X\n", + RREG32(mmUVD_LMI_CTRL2)); + dev_info(adev->dev, " UVD_MASTINT_EN=0x%08X\n", + RREG32(mmUVD_MASTINT_EN)); + dev_info(adev->dev, " UVD_LMI_ADDR_EXT=0x%08X\n", + RREG32(mmUVD_LMI_ADDR_EXT)); + dev_info(adev->dev, " UVD_LMI_CTRL=0x%08X\n", + RREG32(mmUVD_LMI_CTRL)); + dev_info(adev->dev, " UVD_LMI_SWAP_CNTL=0x%08X\n", + RREG32(mmUVD_LMI_SWAP_CNTL)); + dev_info(adev->dev, " UVD_MP_SWAP_CNTL=0x%08X\n", + RREG32(mmUVD_MP_SWAP_CNTL)); + dev_info(adev->dev, " UVD_MPC_SET_MUXA0=0x%08X\n", + RREG32(mmUVD_MPC_SET_MUXA0)); + dev_info(adev->dev, " UVD_MPC_SET_MUXA1=0x%08X\n", + RREG32(mmUVD_MPC_SET_MUXA1)); + dev_info(adev->dev, " UVD_MPC_SET_MUXB0=0x%08X\n", + RREG32(mmUVD_MPC_SET_MUXB0)); + dev_info(adev->dev, " UVD_MPC_SET_MUXB1=0x%08X\n", + RREG32(mmUVD_MPC_SET_MUXB1)); + dev_info(adev->dev, " UVD_MPC_SET_MUX=0x%08X\n", + RREG32(mmUVD_MPC_SET_MUX)); + dev_info(adev->dev, " UVD_MPC_SET_ALU=0x%08X\n", + RREG32(mmUVD_MPC_SET_ALU)); + dev_info(adev->dev, " UVD_VCPU_CACHE_OFFSET0=0x%08X\n", + RREG32(mmUVD_VCPU_CACHE_OFFSET0)); + dev_info(adev->dev, " UVD_VCPU_CACHE_SIZE0=0x%08X\n", + RREG32(mmUVD_VCPU_CACHE_SIZE0)); + dev_info(adev->dev, " UVD_VCPU_CACHE_OFFSET1=0x%08X\n", + RREG32(mmUVD_VCPU_CACHE_OFFSET1)); + dev_info(adev->dev, " UVD_VCPU_CACHE_SIZE1=0x%08X\n", + RREG32(mmUVD_VCPU_CACHE_SIZE1)); + dev_info(adev->dev, " UVD_VCPU_CACHE_OFFSET2=0x%08X\n", + RREG32(mmUVD_VCPU_CACHE_OFFSET2)); + dev_info(adev->dev, " UVD_VCPU_CACHE_SIZE2=0x%08X\n", + RREG32(mmUVD_VCPU_CACHE_SIZE2)); + dev_info(adev->dev, " UVD_VCPU_CNTL=0x%08X\n", + RREG32(mmUVD_VCPU_CNTL)); + dev_info(adev->dev, " UVD_SOFT_RESET=0x%08X\n", + RREG32(mmUVD_SOFT_RESET)); + dev_info(adev->dev, " UVD_RBC_IB_BASE=0x%08X\n", + RREG32(mmUVD_RBC_IB_BASE)); + dev_info(adev->dev, " UVD_RBC_IB_SIZE=0x%08X\n", + RREG32(mmUVD_RBC_IB_SIZE)); + dev_info(adev->dev, " UVD_RBC_RB_BASE=0x%08X\n", + RREG32(mmUVD_RBC_RB_BASE)); + dev_info(adev->dev, " UVD_RBC_RB_RPTR=0x%08X\n", + RREG32(mmUVD_RBC_RB_RPTR)); + dev_info(adev->dev, " UVD_RBC_RB_WPTR=0x%08X\n", + RREG32(mmUVD_RBC_RB_WPTR)); + dev_info(adev->dev, " UVD_RBC_RB_WPTR_CNTL=0x%08X\n", + RREG32(mmUVD_RBC_RB_WPTR_CNTL)); + dev_info(adev->dev, " UVD_RBC_RB_CNTL=0x%08X\n", + RREG32(mmUVD_RBC_RB_CNTL)); + dev_info(adev->dev, " UVD_STATUS=0x%08X\n", + RREG32(mmUVD_STATUS)); + dev_info(adev->dev, " UVD_SEMA_TIMEOUT_STATUS=0x%08X\n", + RREG32(mmUVD_SEMA_TIMEOUT_STATUS)); + dev_info(adev->dev, " UVD_SEMA_WAIT_INCOMPLETE_TIMEOUT_CNTL=0x%08X\n", + RREG32(mmUVD_SEMA_WAIT_INCOMPLETE_TIMEOUT_CNTL)); + dev_info(adev->dev, " UVD_SEMA_WAIT_FAULT_TIMEOUT_CNTL=0x%08X\n", + RREG32(mmUVD_SEMA_WAIT_FAULT_TIMEOUT_CNTL)); + dev_info(adev->dev, " UVD_SEMA_SIGNAL_INCOMPLETE_TIMEOUT_CNTL=0x%08X\n", + RREG32(mmUVD_SEMA_SIGNAL_INCOMPLETE_TIMEOUT_CNTL)); + dev_info(adev->dev, " UVD_CONTEXT_ID=0x%08X\n", + RREG32(mmUVD_CONTEXT_ID)); +} + +static int uvd_v4_2_set_interrupt_state(struct amdgpu_device *adev, + struct amdgpu_irq_src *source, + unsigned type, + enum amdgpu_interrupt_state state) +{ + // TODO + return 0; +} + +static int uvd_v4_2_process_interrupt(struct amdgpu_device *adev, + struct amdgpu_irq_src *source, + struct amdgpu_iv_entry *entry) +{ + DRM_DEBUG("IH: UVD TRAP\n"); + amdgpu_fence_process(&adev->uvd.ring); + return 0; +} + +static int uvd_v4_2_set_clockgating_state(struct amdgpu_device *adev, + enum amdgpu_clockgating_state state) +{ + bool gate = false; + + if (state == AMDGPU_CG_STATE_GATE) + gate = true; + + uvd_v4_2_enable_mgcg(adev, gate); + + return 0; +} + +static int uvd_v4_2_set_powergating_state(struct amdgpu_device *adev, + enum amdgpu_powergating_state state) +{ + /* This doesn't actually powergate the UVD block. + * That's done in the dpm code via the SMC. This + * just re-inits the block as necessary. The actual + * gating still happens in the dpm code. We should + * revisit this when there is a cleaner line between + * the smc and the hw blocks + */ + if (state == AMDGPU_PG_STATE_GATE) { + uvd_v4_2_stop(adev); + return 0; + } else { + return uvd_v4_2_start(adev); + } +} + +const struct amdgpu_ip_funcs uvd_v4_2_ip_funcs = { + .early_init = uvd_v4_2_early_init, + .late_init = NULL, + .sw_init = uvd_v4_2_sw_init, + .sw_fini = uvd_v4_2_sw_fini, + .hw_init = uvd_v4_2_hw_init, + .hw_fini = uvd_v4_2_hw_fini, + .suspend = uvd_v4_2_suspend, + .resume = uvd_v4_2_resume, + .is_idle = uvd_v4_2_is_idle, + .wait_for_idle = uvd_v4_2_wait_for_idle, + .soft_reset = uvd_v4_2_soft_reset, + .print_status = uvd_v4_2_print_status, + .set_clockgating_state = uvd_v4_2_set_clockgating_state, + .set_powergating_state = uvd_v4_2_set_powergating_state, +}; + +static const struct amdgpu_ring_funcs uvd_v4_2_ring_funcs = { + .get_rptr = uvd_v4_2_ring_get_rptr, + .get_wptr = uvd_v4_2_ring_get_wptr, + .set_wptr = uvd_v4_2_ring_set_wptr, + .parse_cs = amdgpu_uvd_ring_parse_cs, + .emit_ib = uvd_v4_2_ring_emit_ib, + .emit_fence = uvd_v4_2_ring_emit_fence, + .emit_semaphore = uvd_v4_2_ring_emit_semaphore, + .test_ring = uvd_v4_2_ring_test_ring, + .test_ib = uvd_v4_2_ring_test_ib, + .is_lockup = amdgpu_ring_test_lockup, +}; + +static void uvd_v4_2_set_ring_funcs(struct amdgpu_device *adev) +{ + adev->uvd.ring.funcs = &uvd_v4_2_ring_funcs; +} + +static const struct amdgpu_irq_src_funcs uvd_v4_2_irq_funcs = { + .set = uvd_v4_2_set_interrupt_state, + .process = uvd_v4_2_process_interrupt, +}; + +static void uvd_v4_2_set_irq_funcs(struct amdgpu_device *adev) +{ + adev->uvd.irq.num_types = 1; + adev->uvd.irq.funcs = &uvd_v4_2_irq_funcs; +} diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.h b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.h new file mode 100644 index 000000000000..323a6d828dfe --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.h @@ -0,0 +1,29 @@ +/* + * Copyright 2014 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef __UVD_V4_2_H__ +#define __UVD_V4_2_H__ + +extern const struct amdgpu_ip_funcs uvd_v4_2_ip_funcs; + +#endif diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c new file mode 100644 index 000000000000..b47c16da6bf8 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c @@ -0,0 +1,642 @@ +/* + * Copyright 2013 Advanced Micro Devices, Inc. + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE + * USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + * Authors: Christian König + */ + +#include +#include +#include "amdgpu.h" +#include "amdgpu_vce.h" +#include "cikd.h" + +#include "vce/vce_2_0_d.h" +#include "vce/vce_2_0_sh_mask.h" + +#include "oss/oss_2_0_d.h" +#include "oss/oss_2_0_sh_mask.h" + +static void vce_v2_0_mc_resume(struct amdgpu_device *adev); +static void vce_v2_0_set_ring_funcs(struct amdgpu_device *adev); +static void vce_v2_0_set_irq_funcs(struct amdgpu_device *adev); + +/** + * vce_v2_0_ring_get_rptr - get read pointer + * + * @ring: amdgpu_ring pointer + * + * Returns the current hardware read pointer + */ +static uint32_t vce_v2_0_ring_get_rptr(struct amdgpu_ring *ring) +{ + struct amdgpu_device *adev = ring->adev; + + if (ring == &adev->vce.ring[0]) + return RREG32(mmVCE_RB_RPTR); + else + return RREG32(mmVCE_RB_RPTR2); +} + +/** + * vce_v2_0_ring_get_wptr - get write pointer + * + * @ring: amdgpu_ring pointer + * + * Returns the current hardware write pointer + */ +static uint32_t vce_v2_0_ring_get_wptr(struct amdgpu_ring *ring) +{ + struct amdgpu_device *adev = ring->adev; + + if (ring == &adev->vce.ring[0]) + return RREG32(mmVCE_RB_WPTR); + else + return RREG32(mmVCE_RB_WPTR2); +} + +/** + * vce_v2_0_ring_set_wptr - set write pointer + * + * @ring: amdgpu_ring pointer + * + * Commits the write pointer to the hardware + */ +static void vce_v2_0_ring_set_wptr(struct amdgpu_ring *ring) +{ + struct amdgpu_device *adev = ring->adev; + + if (ring == &adev->vce.ring[0]) + WREG32(mmVCE_RB_WPTR, ring->wptr); + else + WREG32(mmVCE_RB_WPTR2, ring->wptr); +} + +/** + * vce_v2_0_start - start VCE block + * + * @adev: amdgpu_device pointer + * + * Setup and start the VCE block + */ +static int vce_v2_0_start(struct amdgpu_device *adev) +{ + struct amdgpu_ring *ring; + int i, j, r; + + vce_v2_0_mc_resume(adev); + + /* set BUSY flag */ + WREG32_P(mmVCE_STATUS, 1, ~1); + + ring = &adev->vce.ring[0]; + WREG32(mmVCE_RB_RPTR, ring->wptr); + WREG32(mmVCE_RB_WPTR, ring->wptr); + WREG32(mmVCE_RB_BASE_LO, ring->gpu_addr); + WREG32(mmVCE_RB_BASE_HI, upper_32_bits(ring->gpu_addr)); + WREG32(mmVCE_RB_SIZE, ring->ring_size / 4); + + ring = &adev->vce.ring[1]; + WREG32(mmVCE_RB_RPTR2, ring->wptr); + WREG32(mmVCE_RB_WPTR2, ring->wptr); + WREG32(mmVCE_RB_BASE_LO2, ring->gpu_addr); + WREG32(mmVCE_RB_BASE_HI2, upper_32_bits(ring->gpu_addr)); + WREG32(mmVCE_RB_SIZE2, ring->ring_size / 4); + + WREG32_P(mmVCE_VCPU_CNTL, VCE_VCPU_CNTL__CLK_EN_MASK, ~VCE_VCPU_CNTL__CLK_EN_MASK); + + WREG32_P(mmVCE_SOFT_RESET, + VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK, + ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK); + + mdelay(100); + + WREG32_P(mmVCE_SOFT_RESET, 0, ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK); + + for (i = 0; i < 10; ++i) { + uint32_t status; + for (j = 0; j < 100; ++j) { + status = RREG32(mmVCE_STATUS); + if (status & 2) + break; + mdelay(10); + } + r = 0; + if (status & 2) + break; + + DRM_ERROR("VCE not responding, trying to reset the ECPU!!!\n"); + WREG32_P(mmVCE_SOFT_RESET, VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK, + ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK); + mdelay(10); + WREG32_P(mmVCE_SOFT_RESET, 0, ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK); + mdelay(10); + r = -1; + } + + /* clear BUSY flag */ + WREG32_P(mmVCE_STATUS, 0, ~1); + + if (r) { + DRM_ERROR("VCE not responding, giving up!!!\n"); + return r; + } + + return 0; +} + +static int vce_v2_0_early_init(struct amdgpu_device *adev) +{ + vce_v2_0_set_ring_funcs(adev); + vce_v2_0_set_irq_funcs(adev); + + return 0; +} + +static int vce_v2_0_sw_init(struct amdgpu_device *adev) +{ + struct amdgpu_ring *ring; + int r; + + /* VCE */ + r = amdgpu_irq_add_id(adev, 167, &adev->vce.irq); + if (r) + return r; + + r = amdgpu_vce_sw_init(adev); + if (r) + return r; + + r = amdgpu_vce_resume(adev); + if (r) + return r; + + ring = &adev->vce.ring[0]; + sprintf(ring->name, "vce0"); + r = amdgpu_ring_init(adev, ring, 4096, VCE_CMD_NO_OP, 0xf, + &adev->vce.irq, 0, AMDGPU_RING_TYPE_VCE); + if (r) + return r; + + ring = &adev->vce.ring[1]; + sprintf(ring->name, "vce1"); + r = amdgpu_ring_init(adev, ring, 4096, VCE_CMD_NO_OP, 0xf, + &adev->vce.irq, 0, AMDGPU_RING_TYPE_VCE); + if (r) + return r; + + return r; +} + +static int vce_v2_0_sw_fini(struct amdgpu_device *adev) +{ + int r; + + r = amdgpu_vce_suspend(adev); + if (r) + return r; + + r = amdgpu_vce_sw_fini(adev); + if (r) + return r; + + return r; +} + +static int vce_v2_0_hw_init(struct amdgpu_device *adev) +{ + struct amdgpu_ring *ring; + int r; + + r = vce_v2_0_start(adev); + if (r) + return r; + + ring = &adev->vce.ring[0]; + ring->ready = true; + r = amdgpu_ring_test_ring(ring); + if (r) { + ring->ready = false; + return r; + } + + ring = &adev->vce.ring[1]; + ring->ready = true; + r = amdgpu_ring_test_ring(ring); + if (r) { + ring->ready = false; + return r; + } + + DRM_INFO("VCE initialized successfully.\n"); + + return 0; +} + +static int vce_v2_0_hw_fini(struct amdgpu_device *adev) +{ + // TODO + return 0; +} + +static int vce_v2_0_suspend(struct amdgpu_device *adev) +{ + int r; + + r = vce_v2_0_hw_fini(adev); + if (r) + return r; + + r = amdgpu_vce_suspend(adev); + if (r) + return r; + + return r; +} + +static int vce_v2_0_resume(struct amdgpu_device *adev) +{ + int r; + + r = amdgpu_vce_resume(adev); + if (r) + return r; + + r = vce_v2_0_hw_init(adev); + if (r) + return r; + + return r; +} + +static void vce_v2_0_set_sw_cg(struct amdgpu_device *adev, bool gated) +{ + u32 tmp; + + if (gated) { + tmp = RREG32(mmVCE_CLOCK_GATING_B); + tmp |= 0xe70000; + WREG32(mmVCE_CLOCK_GATING_B, tmp); + + tmp = RREG32(mmVCE_UENC_CLOCK_GATING); + tmp |= 0xff000000; + WREG32(mmVCE_UENC_CLOCK_GATING, tmp); + + tmp = RREG32(mmVCE_UENC_REG_CLOCK_GATING); + tmp &= ~0x3fc; + WREG32(mmVCE_UENC_REG_CLOCK_GATING, tmp); + + WREG32(mmVCE_CGTT_CLK_OVERRIDE, 0); + } else { + tmp = RREG32(mmVCE_CLOCK_GATING_B); + tmp |= 0xe7; + tmp &= ~0xe70000; + WREG32(mmVCE_CLOCK_GATING_B, tmp); + + tmp = RREG32(mmVCE_UENC_CLOCK_GATING); + tmp |= 0x1fe000; + tmp &= ~0xff000000; + WREG32(mmVCE_UENC_CLOCK_GATING, tmp); + + tmp = RREG32(mmVCE_UENC_REG_CLOCK_GATING); + tmp |= 0x3fc; + WREG32(mmVCE_UENC_REG_CLOCK_GATING, tmp); + } +} + +static void vce_v2_0_set_dyn_cg(struct amdgpu_device *adev, bool gated) +{ + u32 orig, tmp; + + tmp = RREG32(mmVCE_CLOCK_GATING_B); + tmp &= ~0x00060006; + if (gated) { + tmp |= 0xe10000; + } else { + tmp |= 0xe1; + tmp &= ~0xe10000; + } + WREG32(mmVCE_CLOCK_GATING_B, tmp); + + orig = tmp = RREG32(mmVCE_UENC_CLOCK_GATING); + tmp &= ~0x1fe000; + tmp &= ~0xff000000; + if (tmp != orig) + WREG32(mmVCE_UENC_CLOCK_GATING, tmp); + + orig = tmp = RREG32(mmVCE_UENC_REG_CLOCK_GATING); + tmp &= ~0x3fc; + if (tmp != orig) + WREG32(mmVCE_UENC_REG_CLOCK_GATING, tmp); + + if (gated) + WREG32(mmVCE_CGTT_CLK_OVERRIDE, 0); +} + +static void vce_v2_0_disable_cg(struct amdgpu_device *adev) +{ + WREG32(mmVCE_CGTT_CLK_OVERRIDE, 7); +} + +static void vce_v2_0_enable_mgcg(struct amdgpu_device *adev, bool enable) +{ + bool sw_cg = false; + + if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_VCE_MGCG)) { + if (sw_cg) + vce_v2_0_set_sw_cg(adev, true); + else + vce_v2_0_set_dyn_cg(adev, true); + } else { + vce_v2_0_disable_cg(adev); + + if (sw_cg) + vce_v2_0_set_sw_cg(adev, false); + else + vce_v2_0_set_dyn_cg(adev, false); + } +} + +static void vce_v2_0_init_cg(struct amdgpu_device *adev) +{ + u32 tmp; + + tmp = RREG32(mmVCE_CLOCK_GATING_A); + tmp &= ~0xfff; + tmp |= ((0 << 0) | (4 << 4)); + tmp |= 0x40000; + WREG32(mmVCE_CLOCK_GATING_A, tmp); + + tmp = RREG32(mmVCE_UENC_CLOCK_GATING); + tmp &= ~0xfff; + tmp |= ((0 << 0) | (4 << 4)); + WREG32(mmVCE_UENC_CLOCK_GATING, tmp); + + tmp = RREG32(mmVCE_CLOCK_GATING_B); + tmp |= 0x10; + tmp &= ~0x100000; + WREG32(mmVCE_CLOCK_GATING_B, tmp); +} + +static void vce_v2_0_mc_resume(struct amdgpu_device *adev) +{ + uint64_t addr = adev->vce.gpu_addr; + uint32_t size; + + WREG32_P(mmVCE_CLOCK_GATING_A, 0, ~(1 << 16)); + WREG32_P(mmVCE_UENC_CLOCK_GATING, 0x1FF000, ~0xFF9FF000); + WREG32_P(mmVCE_UENC_REG_CLOCK_GATING, 0x3F, ~0x3F); + WREG32(mmVCE_CLOCK_GATING_B, 0xf7); + + WREG32(mmVCE_LMI_CTRL, 0x00398000); + WREG32_P(mmVCE_LMI_CACHE_CTRL, 0x0, ~0x1); + WREG32(mmVCE_LMI_SWAP_CNTL, 0); + WREG32(mmVCE_LMI_SWAP_CNTL1, 0); + WREG32(mmVCE_LMI_VM_CTRL, 0); + + addr += AMDGPU_VCE_FIRMWARE_OFFSET; + size = AMDGPU_GPU_PAGE_ALIGN(adev->vce.fw->size); + WREG32(mmVCE_VCPU_CACHE_OFFSET0, addr & 0x7fffffff); + WREG32(mmVCE_VCPU_CACHE_SIZE0, size); + + addr += size; + size = AMDGPU_VCE_STACK_SIZE; + WREG32(mmVCE_VCPU_CACHE_OFFSET1, addr & 0x7fffffff); + WREG32(mmVCE_VCPU_CACHE_SIZE1, size); + + addr += size; + size = AMDGPU_VCE_HEAP_SIZE; + WREG32(mmVCE_VCPU_CACHE_OFFSET2, addr & 0x7fffffff); + WREG32(mmVCE_VCPU_CACHE_SIZE2, size); + + WREG32_P(mmVCE_LMI_CTRL2, 0x0, ~0x100); + + WREG32_P(mmVCE_SYS_INT_EN, VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK, + ~VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK); + + vce_v2_0_init_cg(adev); +} + +static bool vce_v2_0_is_idle(struct amdgpu_device *adev) +{ + return !(RREG32(mmSRBM_STATUS2) & SRBM_STATUS2__VCE_BUSY_MASK); +} + +static int vce_v2_0_wait_for_idle(struct amdgpu_device *adev) +{ + unsigned i; + + for (i = 0; i < adev->usec_timeout; i++) { + if (!(RREG32(mmSRBM_STATUS2) & SRBM_STATUS2__VCE_BUSY_MASK)) + return 0; + } + return -ETIMEDOUT; +} + +static int vce_v2_0_soft_reset(struct amdgpu_device *adev) +{ + WREG32_P(mmSRBM_SOFT_RESET, SRBM_SOFT_RESET__SOFT_RESET_VCE_MASK, + ~SRBM_SOFT_RESET__SOFT_RESET_VCE_MASK); + mdelay(5); + + return vce_v2_0_start(adev); +} + +static void vce_v2_0_print_status(struct amdgpu_device *adev) +{ + dev_info(adev->dev, "VCE 2.0 registers\n"); + dev_info(adev->dev, " VCE_STATUS=0x%08X\n", + RREG32(mmVCE_STATUS)); + dev_info(adev->dev, " VCE_VCPU_CNTL=0x%08X\n", + RREG32(mmVCE_VCPU_CNTL)); + dev_info(adev->dev, " VCE_VCPU_CACHE_OFFSET0=0x%08X\n", + RREG32(mmVCE_VCPU_CACHE_OFFSET0)); + dev_info(adev->dev, " VCE_VCPU_CACHE_SIZE0=0x%08X\n", + RREG32(mmVCE_VCPU_CACHE_SIZE0)); + dev_info(adev->dev, " VCE_VCPU_CACHE_OFFSET1=0x%08X\n", + RREG32(mmVCE_VCPU_CACHE_OFFSET1)); + dev_info(adev->dev, " VCE_VCPU_CACHE_SIZE1=0x%08X\n", + RREG32(mmVCE_VCPU_CACHE_SIZE1)); + dev_info(adev->dev, " VCE_VCPU_CACHE_OFFSET2=0x%08X\n", + RREG32(mmVCE_VCPU_CACHE_OFFSET2)); + dev_info(adev->dev, " VCE_VCPU_CACHE_SIZE2=0x%08X\n", + RREG32(mmVCE_VCPU_CACHE_SIZE2)); + dev_info(adev->dev, " VCE_SOFT_RESET=0x%08X\n", + RREG32(mmVCE_SOFT_RESET)); + dev_info(adev->dev, " VCE_RB_BASE_LO2=0x%08X\n", + RREG32(mmVCE_RB_BASE_LO2)); + dev_info(adev->dev, " VCE_RB_BASE_HI2=0x%08X\n", + RREG32(mmVCE_RB_BASE_HI2)); + dev_info(adev->dev, " VCE_RB_SIZE2=0x%08X\n", + RREG32(mmVCE_RB_SIZE2)); + dev_info(adev->dev, " VCE_RB_RPTR2=0x%08X\n", + RREG32(mmVCE_RB_RPTR2)); + dev_info(adev->dev, " VCE_RB_WPTR2=0x%08X\n", + RREG32(mmVCE_RB_WPTR2)); + dev_info(adev->dev, " VCE_RB_BASE_LO=0x%08X\n", + RREG32(mmVCE_RB_BASE_LO)); + dev_info(adev->dev, " VCE_RB_BASE_HI=0x%08X\n", + RREG32(mmVCE_RB_BASE_HI)); + dev_info(adev->dev, " VCE_RB_SIZE=0x%08X\n", + RREG32(mmVCE_RB_SIZE)); + dev_info(adev->dev, " VCE_RB_RPTR=0x%08X\n", + RREG32(mmVCE_RB_RPTR)); + dev_info(adev->dev, " VCE_RB_WPTR=0x%08X\n", + RREG32(mmVCE_RB_WPTR)); + dev_info(adev->dev, " VCE_CLOCK_GATING_A=0x%08X\n", + RREG32(mmVCE_CLOCK_GATING_A)); + dev_info(adev->dev, " VCE_CLOCK_GATING_B=0x%08X\n", + RREG32(mmVCE_CLOCK_GATING_B)); + dev_info(adev->dev, " VCE_CGTT_CLK_OVERRIDE=0x%08X\n", + RREG32(mmVCE_CGTT_CLK_OVERRIDE)); + dev_info(adev->dev, " VCE_UENC_CLOCK_GATING=0x%08X\n", + RREG32(mmVCE_UENC_CLOCK_GATING)); + dev_info(adev->dev, " VCE_UENC_REG_CLOCK_GATING=0x%08X\n", + RREG32(mmVCE_UENC_REG_CLOCK_GATING)); + dev_info(adev->dev, " VCE_SYS_INT_EN=0x%08X\n", + RREG32(mmVCE_SYS_INT_EN)); + dev_info(adev->dev, " VCE_LMI_CTRL2=0x%08X\n", + RREG32(mmVCE_LMI_CTRL2)); + dev_info(adev->dev, " VCE_LMI_CTRL=0x%08X\n", + RREG32(mmVCE_LMI_CTRL)); + dev_info(adev->dev, " VCE_LMI_VM_CTRL=0x%08X\n", + RREG32(mmVCE_LMI_VM_CTRL)); + dev_info(adev->dev, " VCE_LMI_SWAP_CNTL=0x%08X\n", + RREG32(mmVCE_LMI_SWAP_CNTL)); + dev_info(adev->dev, " VCE_LMI_SWAP_CNTL1=0x%08X\n", + RREG32(mmVCE_LMI_SWAP_CNTL1)); + dev_info(adev->dev, " VCE_LMI_CACHE_CTRL=0x%08X\n", + RREG32(mmVCE_LMI_CACHE_CTRL)); +} + +static int vce_v2_0_set_interrupt_state(struct amdgpu_device *adev, + struct amdgpu_irq_src *source, + unsigned type, + enum amdgpu_interrupt_state state) +{ + uint32_t val = 0; + + if (state == AMDGPU_IRQ_STATE_ENABLE) + val |= VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK; + + WREG32_P(mmVCE_SYS_INT_EN, val, ~VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK); + return 0; +} + +static int vce_v2_0_process_interrupt(struct amdgpu_device *adev, + struct amdgpu_irq_src *source, + struct amdgpu_iv_entry *entry) +{ + DRM_DEBUG("IH: VCE\n"); + switch (entry->src_data) { + case 0: + amdgpu_fence_process(&adev->vce.ring[0]); + break; + case 1: + amdgpu_fence_process(&adev->vce.ring[1]); + break; + default: + DRM_ERROR("Unhandled interrupt: %d %d\n", + entry->src_id, entry->src_data); + break; + } + + return 0; +} + +static int vce_v2_0_set_clockgating_state(struct amdgpu_device *adev, + enum amdgpu_clockgating_state state) +{ + bool gate = false; + + if (state == AMDGPU_CG_STATE_GATE) + gate = true; + + vce_v2_0_enable_mgcg(adev, gate); + + return 0; +} + +static int vce_v2_0_set_powergating_state(struct amdgpu_device *adev, + enum amdgpu_powergating_state state) +{ + /* This doesn't actually powergate the VCE block. + * That's done in the dpm code via the SMC. This + * just re-inits the block as necessary. The actual + * gating still happens in the dpm code. We should + * revisit this when there is a cleaner line between + * the smc and the hw blocks + */ + if (state == AMDGPU_PG_STATE_GATE) + /* XXX do we need a vce_v2_0_stop()? */ + return 0; + else + return vce_v2_0_start(adev); +} + +const struct amdgpu_ip_funcs vce_v2_0_ip_funcs = { + .early_init = vce_v2_0_early_init, + .late_init = NULL, + .sw_init = vce_v2_0_sw_init, + .sw_fini = vce_v2_0_sw_fini, + .hw_init = vce_v2_0_hw_init, + .hw_fini = vce_v2_0_hw_fini, + .suspend = vce_v2_0_suspend, + .resume = vce_v2_0_resume, + .is_idle = vce_v2_0_is_idle, + .wait_for_idle = vce_v2_0_wait_for_idle, + .soft_reset = vce_v2_0_soft_reset, + .print_status = vce_v2_0_print_status, + .set_clockgating_state = vce_v2_0_set_clockgating_state, + .set_powergating_state = vce_v2_0_set_powergating_state, +}; + +static const struct amdgpu_ring_funcs vce_v2_0_ring_funcs = { + .get_rptr = vce_v2_0_ring_get_rptr, + .get_wptr = vce_v2_0_ring_get_wptr, + .set_wptr = vce_v2_0_ring_set_wptr, + .parse_cs = amdgpu_vce_ring_parse_cs, + .emit_ib = amdgpu_vce_ring_emit_ib, + .emit_fence = amdgpu_vce_ring_emit_fence, + .emit_semaphore = amdgpu_vce_ring_emit_semaphore, + .test_ring = amdgpu_vce_ring_test_ring, + .test_ib = amdgpu_vce_ring_test_ib, + .is_lockup = amdgpu_ring_test_lockup, +}; + +static void vce_v2_0_set_ring_funcs(struct amdgpu_device *adev) +{ + adev->vce.ring[0].funcs = &vce_v2_0_ring_funcs; + adev->vce.ring[1].funcs = &vce_v2_0_ring_funcs; +} + +static const struct amdgpu_irq_src_funcs vce_v2_0_irq_funcs = { + .set = vce_v2_0_set_interrupt_state, + .process = vce_v2_0_process_interrupt, +}; + +static void vce_v2_0_set_irq_funcs(struct amdgpu_device *adev) +{ + adev->vce.irq.num_types = 1; + adev->vce.irq.funcs = &vce_v2_0_irq_funcs; +}; diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v2_0.h b/drivers/gpu/drm/amd/amdgpu/vce_v2_0.h new file mode 100644 index 000000000000..8eb1cf227ea6 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/vce_v2_0.h @@ -0,0 +1,29 @@ +/* + * Copyright 2014 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef __VCE_V2_0_H__ +#define __VCE_V2_0_H__ + +extern const struct amdgpu_ip_funcs vce_v2_0_ip_funcs; + +#endif -- cgit v1.2.3 From aaa36a976bbb9b02a54c087ff390c0bad1d18e3e Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Mon, 20 Apr 2015 17:31:14 -0400 Subject: drm/amdgpu: Add initial VI support MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This adds initial support for VI asics. This includes Iceland, Tonga, and Carrizo. Our inital focus as been Carrizo, so there are still gaps in support for Tonga and Iceland, notably power management. Acked-by: Christian König Acked-by: Jammy Zhou Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/Makefile | 38 +- drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 15 +- drivers/gpu/drm/amd/amdgpu/clearstate_vi.h | 944 +++++ drivers/gpu/drm/amd/amdgpu/cz_dpm.c | 1712 ++++++++ drivers/gpu/drm/amd/amdgpu/cz_dpm.h | 235 ++ drivers/gpu/drm/amd/amdgpu/cz_ih.c | 435 ++ drivers/gpu/drm/amd/amdgpu/cz_ih.h | 29 + drivers/gpu/drm/amd/amdgpu/cz_ppsmc.h | 185 + drivers/gpu/drm/amd/amdgpu/cz_smc.c | 962 +++++ drivers/gpu/drm/amd/amdgpu/cz_smumgr.h | 94 + drivers/gpu/drm/amd/amdgpu/dce_v10_0.c | 3871 ++++++++++++++++++ drivers/gpu/drm/amd/amdgpu/dce_v10_0.h | 29 + drivers/gpu/drm/amd/amdgpu/dce_v11_0.c | 3871 ++++++++++++++++++ drivers/gpu/drm/amd/amdgpu/dce_v11_0.h | 29 + drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c | 4286 ++++++++++++++++++++ drivers/gpu/drm/amd/amdgpu/gfx_v8_0.h | 33 + drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c | 1271 ++++++ drivers/gpu/drm/amd/amdgpu/gmc_v8_0.h | 36 + drivers/gpu/drm/amd/amdgpu/iceland_dpm.c | 172 + drivers/gpu/drm/amd/amdgpu/iceland_ih.c | 435 ++ drivers/gpu/drm/amd/amdgpu/iceland_ih.h | 29 + drivers/gpu/drm/amd/amdgpu/iceland_sdma_pkt_open.h | 2167 ++++++++++ drivers/gpu/drm/amd/amdgpu/iceland_smc.c | 675 +++ drivers/gpu/drm/amd/amdgpu/iceland_smumgr.h | 41 + drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c | 1447 +++++++ drivers/gpu/drm/amd/amdgpu/sdma_v2_4.h | 29 + drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c | 1514 +++++++ drivers/gpu/drm/amd/amdgpu/sdma_v3_0.h | 29 + drivers/gpu/drm/amd/amdgpu/smu8.h | 72 + drivers/gpu/drm/amd/amdgpu/smu8_fusion.h | 127 + drivers/gpu/drm/amd/amdgpu/smu_ucode_xfer_cz.h | 147 + drivers/gpu/drm/amd/amdgpu/smu_ucode_xfer_vi.h | 100 + drivers/gpu/drm/amd/amdgpu/tonga_dpm.c | 172 + drivers/gpu/drm/amd/amdgpu/tonga_ih.c | 458 +++ drivers/gpu/drm/amd/amdgpu/tonga_ih.h | 29 + drivers/gpu/drm/amd/amdgpu/tonga_ppsmc.h | 198 + drivers/gpu/drm/amd/amdgpu/tonga_sdma_pkt_open.h | 2240 ++++++++++ drivers/gpu/drm/amd/amdgpu/tonga_smc.c | 852 ++++ drivers/gpu/drm/amd/amdgpu/tonga_smumgr.h | 42 + drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c | 830 ++++ drivers/gpu/drm/amd/amdgpu/uvd_v5_0.h | 29 + drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c | 810 ++++ drivers/gpu/drm/amd/amdgpu/uvd_v6_0.h | 29 + drivers/gpu/drm/amd/amdgpu/vce_v3_0.c | 521 +++ drivers/gpu/drm/amd/amdgpu/vce_v3_0.h | 29 + drivers/gpu/drm/amd/amdgpu/vi.c | 1373 +++++++ drivers/gpu/drm/amd/amdgpu/vi.h | 33 + drivers/gpu/drm/amd/amdgpu/vi_dpm.h | 36 + drivers/gpu/drm/amd/amdgpu/vid.h | 363 ++ 49 files changed, 33097 insertions(+), 6 deletions(-) create mode 100644 drivers/gpu/drm/amd/amdgpu/clearstate_vi.h create mode 100644 drivers/gpu/drm/amd/amdgpu/cz_dpm.c create mode 100644 drivers/gpu/drm/amd/amdgpu/cz_dpm.h create mode 100644 drivers/gpu/drm/amd/amdgpu/cz_ih.c create mode 100644 drivers/gpu/drm/amd/amdgpu/cz_ih.h create mode 100644 drivers/gpu/drm/amd/amdgpu/cz_ppsmc.h create mode 100644 drivers/gpu/drm/amd/amdgpu/cz_smc.c create mode 100644 drivers/gpu/drm/amd/amdgpu/cz_smumgr.h create mode 100644 drivers/gpu/drm/amd/amdgpu/dce_v10_0.c create mode 100644 drivers/gpu/drm/amd/amdgpu/dce_v10_0.h create mode 100644 drivers/gpu/drm/amd/amdgpu/dce_v11_0.c create mode 100644 drivers/gpu/drm/amd/amdgpu/dce_v11_0.h create mode 100644 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c create mode 100644 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.h create mode 100644 drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c create mode 100644 drivers/gpu/drm/amd/amdgpu/gmc_v8_0.h create mode 100644 drivers/gpu/drm/amd/amdgpu/iceland_dpm.c create mode 100644 drivers/gpu/drm/amd/amdgpu/iceland_ih.c create mode 100644 drivers/gpu/drm/amd/amdgpu/iceland_ih.h create mode 100644 drivers/gpu/drm/amd/amdgpu/iceland_sdma_pkt_open.h create mode 100644 drivers/gpu/drm/amd/amdgpu/iceland_smc.c create mode 100644 drivers/gpu/drm/amd/amdgpu/iceland_smumgr.h create mode 100644 drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c create mode 100644 drivers/gpu/drm/amd/amdgpu/sdma_v2_4.h create mode 100644 drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c create mode 100644 drivers/gpu/drm/amd/amdgpu/sdma_v3_0.h create mode 100644 drivers/gpu/drm/amd/amdgpu/smu8.h create mode 100644 drivers/gpu/drm/amd/amdgpu/smu8_fusion.h create mode 100644 drivers/gpu/drm/amd/amdgpu/smu_ucode_xfer_cz.h create mode 100644 drivers/gpu/drm/amd/amdgpu/smu_ucode_xfer_vi.h create mode 100644 drivers/gpu/drm/amd/amdgpu/tonga_dpm.c create mode 100644 drivers/gpu/drm/amd/amdgpu/tonga_ih.c create mode 100644 drivers/gpu/drm/amd/amdgpu/tonga_ih.h create mode 100644 drivers/gpu/drm/amd/amdgpu/tonga_ppsmc.h create mode 100644 drivers/gpu/drm/amd/amdgpu/tonga_sdma_pkt_open.h create mode 100644 drivers/gpu/drm/amd/amdgpu/tonga_smc.c create mode 100644 drivers/gpu/drm/amd/amdgpu/tonga_smumgr.h create mode 100644 drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c create mode 100644 drivers/gpu/drm/amd/amdgpu/uvd_v5_0.h create mode 100644 drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c create mode 100644 drivers/gpu/drm/amd/amdgpu/uvd_v6_0.h create mode 100644 drivers/gpu/drm/amd/amdgpu/vce_v3_0.c create mode 100644 drivers/gpu/drm/amd/amdgpu/vce_v3_0.h create mode 100644 drivers/gpu/drm/amd/amdgpu/vi.c create mode 100644 drivers/gpu/drm/amd/amdgpu/vi.h create mode 100644 drivers/gpu/drm/amd/amdgpu/vi_dpm.h create mode 100644 drivers/gpu/drm/amd/amdgpu/vid.h (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_device.c') diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile index aec28866945f..9a573e87cdd3 100644 --- a/drivers/gpu/drm/amd/amdgpu/Makefile +++ b/drivers/gpu/drm/amd/amdgpu/Makefile @@ -18,29 +18,57 @@ amdgpu-y += amdgpu_device.o amdgpu_kms.o \ amdgpu_prime.o amdgpu_vm.o amdgpu_ib.o amdgpu_pll.o \ amdgpu_ucode.o amdgpu_bo_list.o amdgpu_ctx.o amdgpu_sync.o +# add asic specific block amdgpu-$(CONFIG_DRM_AMDGPU_CIK)+= cik.o gmc_v7_0.o cik_ih.o kv_smc.o kv_dpm.o \ ci_smc.o ci_dpm.o dce_v8_0.o gfx_v7_0.o cik_sdma.o uvd_v4_2.o vce_v2_0.o +amdgpu-y += \ + vi.o + +# add GMC block +amdgpu-y += \ + gmc_v8_0.o + # add IH block amdgpu-y += \ amdgpu_irq.o \ - amdgpu_ih.o + amdgpu_ih.o \ + iceland_ih.o \ + tonga_ih.o \ + cz_ih.o # add SMC block amdgpu-y += \ - amdgpu_dpm.o + amdgpu_dpm.o \ + cz_smc.o cz_dpm.o \ + tonga_smc.o tonga_dpm.o \ + iceland_smc.o iceland_dpm.o + +# add DCE block +amdgpu-y += \ + dce_v10_0.o \ + dce_v11_0.o # add GFX block amdgpu-y += \ - amdgpu_gfx.o + amdgpu_gfx.o \ + gfx_v8_0.o + +# add async DMA block +amdgpu-y += \ + sdma_v2_4.o \ + sdma_v3_0.o # add UVD block amdgpu-y += \ - amdgpu_uvd.o + amdgpu_uvd.o \ + uvd_v5_0.o \ + uvd_v6_0.o # add VCE block amdgpu-y += \ - amdgpu_vce.o + amdgpu_vce.o \ + vce_v3_0.o amdgpu-$(CONFIG_COMPAT) += amdgpu_ioc32.o amdgpu-$(CONFIG_VGA_SWITCHEROO) += amdgpu_atpx_handler.o diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 548e0843d95a..61cf5ad78857 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -41,6 +41,7 @@ #ifdef CONFIG_DRM_AMDGPU_CIK #include "cik.h" #endif +#include "vi.h" #include "bif/bif_4_1_d.h" static int amdgpu_debugfs_regs_init(struct amdgpu_device *adev); @@ -1154,9 +1155,21 @@ int amdgpu_ip_block_version_cmp(struct amdgpu_device *adev, static int amdgpu_early_init(struct amdgpu_device *adev) { - int i, r = -EINVAL; + int i, r; switch (adev->asic_type) { + case CHIP_TOPAZ: + case CHIP_TONGA: + case CHIP_CARRIZO: + if (adev->asic_type == CHIP_CARRIZO) + adev->family = AMDGPU_FAMILY_CZ; + else + adev->family = AMDGPU_FAMILY_VI; + + r = vi_set_ip_blocks(adev); + if (r) + return r; + break; #ifdef CONFIG_DRM_AMDGPU_CIK case CHIP_BONAIRE: case CHIP_HAWAII: diff --git a/drivers/gpu/drm/amd/amdgpu/clearstate_vi.h b/drivers/gpu/drm/amd/amdgpu/clearstate_vi.h new file mode 100644 index 000000000000..1aab9bef9349 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/clearstate_vi.h @@ -0,0 +1,944 @@ +/* + * Copyright 2014 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +static const unsigned int vi_SECT_CONTEXT_def_1[] = +{ + 0x00000000, // DB_RENDER_CONTROL + 0x00000000, // DB_COUNT_CONTROL + 0x00000000, // DB_DEPTH_VIEW + 0x00000000, // DB_RENDER_OVERRIDE + 0x00000000, // DB_RENDER_OVERRIDE2 + 0x00000000, // DB_HTILE_DATA_BASE + 0, // HOLE + 0, // HOLE + 0x00000000, // DB_DEPTH_BOUNDS_MIN + 0x00000000, // DB_DEPTH_BOUNDS_MAX + 0x00000000, // DB_STENCIL_CLEAR + 0x00000000, // DB_DEPTH_CLEAR + 0x00000000, // PA_SC_SCREEN_SCISSOR_TL + 0x40004000, // PA_SC_SCREEN_SCISSOR_BR + 0, // HOLE + 0x00000000, // DB_DEPTH_INFO + 0x00000000, // DB_Z_INFO + 0x00000000, // DB_STENCIL_INFO + 0x00000000, // DB_Z_READ_BASE + 0x00000000, // DB_STENCIL_READ_BASE + 0x00000000, // DB_Z_WRITE_BASE + 0x00000000, // DB_STENCIL_WRITE_BASE + 0x00000000, // DB_DEPTH_SIZE + 0x00000000, // DB_DEPTH_SLICE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0x00000000, // TA_BC_BASE_ADDR + 0x00000000, // TA_BC_BASE_ADDR_HI + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0x00000000, // COHER_DEST_BASE_HI_0 + 0x00000000, // COHER_DEST_BASE_HI_1 + 0x00000000, // COHER_DEST_BASE_HI_2 + 0x00000000, // COHER_DEST_BASE_HI_3 + 0x00000000, // COHER_DEST_BASE_2 + 0x00000000, // COHER_DEST_BASE_3 + 0x00000000, // PA_SC_WINDOW_OFFSET + 0x80000000, // PA_SC_WINDOW_SCISSOR_TL + 0x40004000, // PA_SC_WINDOW_SCISSOR_BR + 0x0000ffff, // PA_SC_CLIPRECT_RULE + 0x00000000, // PA_SC_CLIPRECT_0_TL + 0x40004000, // PA_SC_CLIPRECT_0_BR + 0x00000000, // PA_SC_CLIPRECT_1_TL + 0x40004000, // PA_SC_CLIPRECT_1_BR + 0x00000000, // PA_SC_CLIPRECT_2_TL + 0x40004000, // PA_SC_CLIPRECT_2_BR + 0x00000000, // PA_SC_CLIPRECT_3_TL + 0x40004000, // PA_SC_CLIPRECT_3_BR + 0xaa99aaaa, // PA_SC_EDGERULE + 0x00000000, // PA_SU_HARDWARE_SCREEN_OFFSET + 0xffffffff, // CB_TARGET_MASK + 0xffffffff, // CB_SHADER_MASK + 0x80000000, // PA_SC_GENERIC_SCISSOR_TL + 0x40004000, // PA_SC_GENERIC_SCISSOR_BR + 0x00000000, // COHER_DEST_BASE_0 + 0x00000000, // COHER_DEST_BASE_1 + 0x80000000, // PA_SC_VPORT_SCISSOR_0_TL + 0x40004000, // PA_SC_VPORT_SCISSOR_0_BR + 0x80000000, // PA_SC_VPORT_SCISSOR_1_TL + 0x40004000, // PA_SC_VPORT_SCISSOR_1_BR + 0x80000000, // PA_SC_VPORT_SCISSOR_2_TL + 0x40004000, // PA_SC_VPORT_SCISSOR_2_BR + 0x80000000, // PA_SC_VPORT_SCISSOR_3_TL + 0x40004000, // PA_SC_VPORT_SCISSOR_3_BR + 0x80000000, // PA_SC_VPORT_SCISSOR_4_TL + 0x40004000, // PA_SC_VPORT_SCISSOR_4_BR + 0x80000000, // PA_SC_VPORT_SCISSOR_5_TL + 0x40004000, // PA_SC_VPORT_SCISSOR_5_BR + 0x80000000, // PA_SC_VPORT_SCISSOR_6_TL + 0x40004000, // PA_SC_VPORT_SCISSOR_6_BR + 0x80000000, // PA_SC_VPORT_SCISSOR_7_TL + 0x40004000, // PA_SC_VPORT_SCISSOR_7_BR + 0x80000000, // PA_SC_VPORT_SCISSOR_8_TL + 0x40004000, // PA_SC_VPORT_SCISSOR_8_BR + 0x80000000, // PA_SC_VPORT_SCISSOR_9_TL + 0x40004000, // PA_SC_VPORT_SCISSOR_9_BR + 0x80000000, // PA_SC_VPORT_SCISSOR_10_TL + 0x40004000, // PA_SC_VPORT_SCISSOR_10_BR + 0x80000000, // PA_SC_VPORT_SCISSOR_11_TL + 0x40004000, // PA_SC_VPORT_SCISSOR_11_BR + 0x80000000, // PA_SC_VPORT_SCISSOR_12_TL + 0x40004000, // PA_SC_VPORT_SCISSOR_12_BR + 0x80000000, // PA_SC_VPORT_SCISSOR_13_TL + 0x40004000, // PA_SC_VPORT_SCISSOR_13_BR + 0x80000000, // PA_SC_VPORT_SCISSOR_14_TL + 0x40004000, // PA_SC_VPORT_SCISSOR_14_BR + 0x80000000, // PA_SC_VPORT_SCISSOR_15_TL + 0x40004000, // PA_SC_VPORT_SCISSOR_15_BR + 0x00000000, // PA_SC_VPORT_ZMIN_0 + 0x3f800000, // PA_SC_VPORT_ZMAX_0 + 0x00000000, // PA_SC_VPORT_ZMIN_1 + 0x3f800000, // PA_SC_VPORT_ZMAX_1 + 0x00000000, // PA_SC_VPORT_ZMIN_2 + 0x3f800000, // PA_SC_VPORT_ZMAX_2 + 0x00000000, // PA_SC_VPORT_ZMIN_3 + 0x3f800000, // PA_SC_VPORT_ZMAX_3 + 0x00000000, // PA_SC_VPORT_ZMIN_4 + 0x3f800000, // PA_SC_VPORT_ZMAX_4 + 0x00000000, // PA_SC_VPORT_ZMIN_5 + 0x3f800000, // PA_SC_VPORT_ZMAX_5 + 0x00000000, // PA_SC_VPORT_ZMIN_6 + 0x3f800000, // PA_SC_VPORT_ZMAX_6 + 0x00000000, // PA_SC_VPORT_ZMIN_7 + 0x3f800000, // PA_SC_VPORT_ZMAX_7 + 0x00000000, // PA_SC_VPORT_ZMIN_8 + 0x3f800000, // PA_SC_VPORT_ZMAX_8 + 0x00000000, // PA_SC_VPORT_ZMIN_9 + 0x3f800000, // PA_SC_VPORT_ZMAX_9 + 0x00000000, // PA_SC_VPORT_ZMIN_10 + 0x3f800000, // PA_SC_VPORT_ZMAX_10 + 0x00000000, // PA_SC_VPORT_ZMIN_11 + 0x3f800000, // PA_SC_VPORT_ZMAX_11 + 0x00000000, // PA_SC_VPORT_ZMIN_12 + 0x3f800000, // PA_SC_VPORT_ZMAX_12 + 0x00000000, // PA_SC_VPORT_ZMIN_13 + 0x3f800000, // PA_SC_VPORT_ZMAX_13 + 0x00000000, // PA_SC_VPORT_ZMIN_14 + 0x3f800000, // PA_SC_VPORT_ZMAX_14 + 0x00000000, // PA_SC_VPORT_ZMIN_15 + 0x3f800000, // PA_SC_VPORT_ZMAX_15 +}; +static const unsigned int vi_SECT_CONTEXT_def_2[] = +{ + 0x00000000, // PA_SC_SCREEN_EXTENT_CONTROL + 0, // HOLE + 0x00000000, // CP_PERFMON_CNTX_CNTL + 0x00000000, // CP_RINGID + 0x00000000, // CP_VMID + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0xffffffff, // VGT_MAX_VTX_INDX + 0x00000000, // VGT_MIN_VTX_INDX + 0x00000000, // VGT_INDX_OFFSET + 0x00000000, // VGT_MULTI_PRIM_IB_RESET_INDX + 0, // HOLE + 0x00000000, // CB_BLEND_RED + 0x00000000, // CB_BLEND_GREEN + 0x00000000, // CB_BLEND_BLUE + 0x00000000, // CB_BLEND_ALPHA + 0x00000000, // CB_DCC_CONTROL + 0, // HOLE + 0x00000000, // DB_STENCIL_CONTROL + 0x00000000, // DB_STENCILREFMASK + 0x00000000, // DB_STENCILREFMASK_BF + 0, // HOLE + 0x00000000, // PA_CL_VPORT_XSCALE + 0x00000000, // PA_CL_VPORT_XOFFSET + 0x00000000, // PA_CL_VPORT_YSCALE + 0x00000000, // PA_CL_VPORT_YOFFSET + 0x00000000, // PA_CL_VPORT_ZSCALE + 0x00000000, // PA_CL_VPORT_ZOFFSET + 0x00000000, // PA_CL_VPORT_XSCALE_1 + 0x00000000, // PA_CL_VPORT_XOFFSET_1 + 0x00000000, // PA_CL_VPORT_YSCALE_1 + 0x00000000, // PA_CL_VPORT_YOFFSET_1 + 0x00000000, // PA_CL_VPORT_ZSCALE_1 + 0x00000000, // PA_CL_VPORT_ZOFFSET_1 + 0x00000000, // PA_CL_VPORT_XSCALE_2 + 0x00000000, // PA_CL_VPORT_XOFFSET_2 + 0x00000000, // PA_CL_VPORT_YSCALE_2 + 0x00000000, // PA_CL_VPORT_YOFFSET_2 + 0x00000000, // PA_CL_VPORT_ZSCALE_2 + 0x00000000, // PA_CL_VPORT_ZOFFSET_2 + 0x00000000, // PA_CL_VPORT_XSCALE_3 + 0x00000000, // PA_CL_VPORT_XOFFSET_3 + 0x00000000, // PA_CL_VPORT_YSCALE_3 + 0x00000000, // PA_CL_VPORT_YOFFSET_3 + 0x00000000, // PA_CL_VPORT_ZSCALE_3 + 0x00000000, // PA_CL_VPORT_ZOFFSET_3 + 0x00000000, // PA_CL_VPORT_XSCALE_4 + 0x00000000, // PA_CL_VPORT_XOFFSET_4 + 0x00000000, // PA_CL_VPORT_YSCALE_4 + 0x00000000, // PA_CL_VPORT_YOFFSET_4 + 0x00000000, // PA_CL_VPORT_ZSCALE_4 + 0x00000000, // PA_CL_VPORT_ZOFFSET_4 + 0x00000000, // PA_CL_VPORT_XSCALE_5 + 0x00000000, // PA_CL_VPORT_XOFFSET_5 + 0x00000000, // PA_CL_VPORT_YSCALE_5 + 0x00000000, // PA_CL_VPORT_YOFFSET_5 + 0x00000000, // PA_CL_VPORT_ZSCALE_5 + 0x00000000, // PA_CL_VPORT_ZOFFSET_5 + 0x00000000, // PA_CL_VPORT_XSCALE_6 + 0x00000000, // PA_CL_VPORT_XOFFSET_6 + 0x00000000, // PA_CL_VPORT_YSCALE_6 + 0x00000000, // PA_CL_VPORT_YOFFSET_6 + 0x00000000, // PA_CL_VPORT_ZSCALE_6 + 0x00000000, // PA_CL_VPORT_ZOFFSET_6 + 0x00000000, // PA_CL_VPORT_XSCALE_7 + 0x00000000, // PA_CL_VPORT_XOFFSET_7 + 0x00000000, // PA_CL_VPORT_YSCALE_7 + 0x00000000, // PA_CL_VPORT_YOFFSET_7 + 0x00000000, // PA_CL_VPORT_ZSCALE_7 + 0x00000000, // PA_CL_VPORT_ZOFFSET_7 + 0x00000000, // PA_CL_VPORT_XSCALE_8 + 0x00000000, // PA_CL_VPORT_XOFFSET_8 + 0x00000000, // PA_CL_VPORT_YSCALE_8 + 0x00000000, // PA_CL_VPORT_YOFFSET_8 + 0x00000000, // PA_CL_VPORT_ZSCALE_8 + 0x00000000, // PA_CL_VPORT_ZOFFSET_8 + 0x00000000, // PA_CL_VPORT_XSCALE_9 + 0x00000000, // PA_CL_VPORT_XOFFSET_9 + 0x00000000, // PA_CL_VPORT_YSCALE_9 + 0x00000000, // PA_CL_VPORT_YOFFSET_9 + 0x00000000, // PA_CL_VPORT_ZSCALE_9 + 0x00000000, // PA_CL_VPORT_ZOFFSET_9 + 0x00000000, // PA_CL_VPORT_XSCALE_10 + 0x00000000, // PA_CL_VPORT_XOFFSET_10 + 0x00000000, // PA_CL_VPORT_YSCALE_10 + 0x00000000, // PA_CL_VPORT_YOFFSET_10 + 0x00000000, // PA_CL_VPORT_ZSCALE_10 + 0x00000000, // PA_CL_VPORT_ZOFFSET_10 + 0x00000000, // PA_CL_VPORT_XSCALE_11 + 0x00000000, // PA_CL_VPORT_XOFFSET_11 + 0x00000000, // PA_CL_VPORT_YSCALE_11 + 0x00000000, // PA_CL_VPORT_YOFFSET_11 + 0x00000000, // PA_CL_VPORT_ZSCALE_11 + 0x00000000, // PA_CL_VPORT_ZOFFSET_11 + 0x00000000, // PA_CL_VPORT_XSCALE_12 + 0x00000000, // PA_CL_VPORT_XOFFSET_12 + 0x00000000, // PA_CL_VPORT_YSCALE_12 + 0x00000000, // PA_CL_VPORT_YOFFSET_12 + 0x00000000, // PA_CL_VPORT_ZSCALE_12 + 0x00000000, // PA_CL_VPORT_ZOFFSET_12 + 0x00000000, // PA_CL_VPORT_XSCALE_13 + 0x00000000, // PA_CL_VPORT_XOFFSET_13 + 0x00000000, // PA_CL_VPORT_YSCALE_13 + 0x00000000, // PA_CL_VPORT_YOFFSET_13 + 0x00000000, // PA_CL_VPORT_ZSCALE_13 + 0x00000000, // PA_CL_VPORT_ZOFFSET_13 + 0x00000000, // PA_CL_VPORT_XSCALE_14 + 0x00000000, // PA_CL_VPORT_XOFFSET_14 + 0x00000000, // PA_CL_VPORT_YSCALE_14 + 0x00000000, // PA_CL_VPORT_YOFFSET_14 + 0x00000000, // PA_CL_VPORT_ZSCALE_14 + 0x00000000, // PA_CL_VPORT_ZOFFSET_14 + 0x00000000, // PA_CL_VPORT_XSCALE_15 + 0x00000000, // PA_CL_VPORT_XOFFSET_15 + 0x00000000, // PA_CL_VPORT_YSCALE_15 + 0x00000000, // PA_CL_VPORT_YOFFSET_15 + 0x00000000, // PA_CL_VPORT_ZSCALE_15 + 0x00000000, // PA_CL_VPORT_ZOFFSET_15 + 0x00000000, // PA_CL_UCP_0_X + 0x00000000, // PA_CL_UCP_0_Y + 0x00000000, // PA_CL_UCP_0_Z + 0x00000000, // PA_CL_UCP_0_W + 0x00000000, // PA_CL_UCP_1_X + 0x00000000, // PA_CL_UCP_1_Y + 0x00000000, // PA_CL_UCP_1_Z + 0x00000000, // PA_CL_UCP_1_W + 0x00000000, // PA_CL_UCP_2_X + 0x00000000, // PA_CL_UCP_2_Y + 0x00000000, // PA_CL_UCP_2_Z + 0x00000000, // PA_CL_UCP_2_W + 0x00000000, // PA_CL_UCP_3_X + 0x00000000, // PA_CL_UCP_3_Y + 0x00000000, // PA_CL_UCP_3_Z + 0x00000000, // PA_CL_UCP_3_W + 0x00000000, // PA_CL_UCP_4_X + 0x00000000, // PA_CL_UCP_4_Y + 0x00000000, // PA_CL_UCP_4_Z + 0x00000000, // PA_CL_UCP_4_W + 0x00000000, // PA_CL_UCP_5_X + 0x00000000, // PA_CL_UCP_5_Y + 0x00000000, // PA_CL_UCP_5_Z + 0x00000000, // PA_CL_UCP_5_W + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0x00000000, // SPI_PS_INPUT_CNTL_0 + 0x00000000, // SPI_PS_INPUT_CNTL_1 + 0x00000000, // SPI_PS_INPUT_CNTL_2 + 0x00000000, // SPI_PS_INPUT_CNTL_3 + 0x00000000, // SPI_PS_INPUT_CNTL_4 + 0x00000000, // SPI_PS_INPUT_CNTL_5 + 0x00000000, // SPI_PS_INPUT_CNTL_6 + 0x00000000, // SPI_PS_INPUT_CNTL_7 + 0x00000000, // SPI_PS_INPUT_CNTL_8 + 0x00000000, // SPI_PS_INPUT_CNTL_9 + 0x00000000, // SPI_PS_INPUT_CNTL_10 + 0x00000000, // SPI_PS_INPUT_CNTL_11 + 0x00000000, // SPI_PS_INPUT_CNTL_12 + 0x00000000, // SPI_PS_INPUT_CNTL_13 + 0x00000000, // SPI_PS_INPUT_CNTL_14 + 0x00000000, // SPI_PS_INPUT_CNTL_15 + 0x00000000, // SPI_PS_INPUT_CNTL_16 + 0x00000000, // SPI_PS_INPUT_CNTL_17 + 0x00000000, // SPI_PS_INPUT_CNTL_18 + 0x00000000, // SPI_PS_INPUT_CNTL_19 + 0x00000000, // SPI_PS_INPUT_CNTL_20 + 0x00000000, // SPI_PS_INPUT_CNTL_21 + 0x00000000, // SPI_PS_INPUT_CNTL_22 + 0x00000000, // SPI_PS_INPUT_CNTL_23 + 0x00000000, // SPI_PS_INPUT_CNTL_24 + 0x00000000, // SPI_PS_INPUT_CNTL_25 + 0x00000000, // SPI_PS_INPUT_CNTL_26 + 0x00000000, // SPI_PS_INPUT_CNTL_27 + 0x00000000, // SPI_PS_INPUT_CNTL_28 + 0x00000000, // SPI_PS_INPUT_CNTL_29 + 0x00000000, // SPI_PS_INPUT_CNTL_30 + 0x00000000, // SPI_PS_INPUT_CNTL_31 + 0x00000000, // SPI_VS_OUT_CONFIG + 0, // HOLE + 0x00000000, // SPI_PS_INPUT_ENA + 0x00000000, // SPI_PS_INPUT_ADDR + 0x00000000, // SPI_INTERP_CONTROL_0 + 0x00000002, // SPI_PS_IN_CONTROL + 0, // HOLE + 0x00000000, // SPI_BARYC_CNTL + 0, // HOLE + 0x00000000, // SPI_TMPRING_SIZE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0x00000000, // SPI_SHADER_POS_FORMAT + 0x00000000, // SPI_SHADER_Z_FORMAT + 0x00000000, // SPI_SHADER_COL_FORMAT + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0x00000000, // CB_BLEND0_CONTROL + 0x00000000, // CB_BLEND1_CONTROL + 0x00000000, // CB_BLEND2_CONTROL + 0x00000000, // CB_BLEND3_CONTROL + 0x00000000, // CB_BLEND4_CONTROL + 0x00000000, // CB_BLEND5_CONTROL + 0x00000000, // CB_BLEND6_CONTROL + 0x00000000, // CB_BLEND7_CONTROL +}; +static const unsigned int vi_SECT_CONTEXT_def_3[] = +{ + 0x00000000, // PA_CL_POINT_X_RAD + 0x00000000, // PA_CL_POINT_Y_RAD + 0x00000000, // PA_CL_POINT_SIZE + 0x00000000, // PA_CL_POINT_CULL_RAD + 0x00000000, // VGT_DMA_BASE_HI + 0x00000000, // VGT_DMA_BASE +}; +static const unsigned int vi_SECT_CONTEXT_def_4[] = +{ + 0x00000000, // DB_DEPTH_CONTROL + 0x00000000, // DB_EQAA + 0x00000000, // CB_COLOR_CONTROL + 0x00000000, // DB_SHADER_CONTROL + 0x00090000, // PA_CL_CLIP_CNTL + 0x00000004, // PA_SU_SC_MODE_CNTL + 0x00000000, // PA_CL_VTE_CNTL + 0x00000000, // PA_CL_VS_OUT_CNTL + 0x00000000, // PA_CL_NANINF_CNTL + 0x00000000, // PA_SU_LINE_STIPPLE_CNTL + 0x00000000, // PA_SU_LINE_STIPPLE_SCALE + 0x00000000, // PA_SU_PRIM_FILTER_CNTL + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0x00000000, // PA_SU_POINT_SIZE + 0x00000000, // PA_SU_POINT_MINMAX + 0x00000000, // PA_SU_LINE_CNTL + 0x00000000, // PA_SC_LINE_STIPPLE + 0x00000000, // VGT_OUTPUT_PATH_CNTL + 0x00000000, // VGT_HOS_CNTL + 0x00000000, // VGT_HOS_MAX_TESS_LEVEL + 0x00000000, // VGT_HOS_MIN_TESS_LEVEL + 0x00000000, // VGT_HOS_REUSE_DEPTH + 0x00000000, // VGT_GROUP_PRIM_TYPE + 0x00000000, // VGT_GROUP_FIRST_DECR + 0x00000000, // VGT_GROUP_DECR + 0x00000000, // VGT_GROUP_VECT_0_CNTL + 0x00000000, // VGT_GROUP_VECT_1_CNTL + 0x00000000, // VGT_GROUP_VECT_0_FMT_CNTL + 0x00000000, // VGT_GROUP_VECT_1_FMT_CNTL + 0x00000000, // VGT_GS_MODE + 0x00000000, // VGT_GS_ONCHIP_CNTL + 0x00000000, // PA_SC_MODE_CNTL_0 + 0x00000000, // PA_SC_MODE_CNTL_1 + 0x00000000, // VGT_ENHANCE + 0x00000100, // VGT_GS_PER_ES + 0x00000080, // VGT_ES_PER_GS + 0x00000002, // VGT_GS_PER_VS + 0x00000000, // VGT_GSVS_RING_OFFSET_1 + 0x00000000, // VGT_GSVS_RING_OFFSET_2 + 0x00000000, // VGT_GSVS_RING_OFFSET_3 + 0x00000000, // VGT_GS_OUT_PRIM_TYPE + 0x00000000, // IA_ENHANCE +}; +static const unsigned int vi_SECT_CONTEXT_def_5[] = +{ + 0x00000000, // WD_ENHANCE + 0x00000000, // VGT_PRIMITIVEID_EN +}; +static const unsigned int vi_SECT_CONTEXT_def_6[] = +{ + 0x00000000, // VGT_PRIMITIVEID_RESET +}; +static const unsigned int vi_SECT_CONTEXT_def_7[] = +{ + 0x00000000, // VGT_MULTI_PRIM_IB_RESET_EN + 0, // HOLE + 0, // HOLE + 0x00000000, // VGT_INSTANCE_STEP_RATE_0 + 0x00000000, // VGT_INSTANCE_STEP_RATE_1 + 0x000000ff, // IA_MULTI_VGT_PARAM + 0x00000000, // VGT_ESGS_RING_ITEMSIZE + 0x00000000, // VGT_GSVS_RING_ITEMSIZE + 0x00000000, // VGT_REUSE_OFF + 0x00000000, // VGT_VTX_CNT_EN + 0x00000000, // DB_HTILE_SURFACE + 0x00000000, // DB_SRESULTS_COMPARE_STATE0 + 0x00000000, // DB_SRESULTS_COMPARE_STATE1 + 0x00000000, // DB_PRELOAD_CONTROL + 0, // HOLE + 0x00000000, // VGT_STRMOUT_BUFFER_SIZE_0 + 0x00000000, // VGT_STRMOUT_VTX_STRIDE_0 + 0, // HOLE + 0x00000000, // VGT_STRMOUT_BUFFER_OFFSET_0 + 0x00000000, // VGT_STRMOUT_BUFFER_SIZE_1 + 0x00000000, // VGT_STRMOUT_VTX_STRIDE_1 + 0, // HOLE + 0x00000000, // VGT_STRMOUT_BUFFER_OFFSET_1 + 0x00000000, // VGT_STRMOUT_BUFFER_SIZE_2 + 0x00000000, // VGT_STRMOUT_VTX_STRIDE_2 + 0, // HOLE + 0x00000000, // VGT_STRMOUT_BUFFER_OFFSET_2 + 0x00000000, // VGT_STRMOUT_BUFFER_SIZE_3 + 0x00000000, // VGT_STRMOUT_VTX_STRIDE_3 + 0, // HOLE + 0x00000000, // VGT_STRMOUT_BUFFER_OFFSET_3 + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0x00000000, // VGT_STRMOUT_DRAW_OPAQUE_OFFSET + 0x00000000, // VGT_STRMOUT_DRAW_OPAQUE_BUFFER_FILLED_SIZE + 0x00000000, // VGT_STRMOUT_DRAW_OPAQUE_VERTEX_STRIDE + 0, // HOLE + 0x00000000, // VGT_GS_MAX_VERT_OUT + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0x00000000, // VGT_TESS_DISTRIBUTION + 0x00000000, // VGT_SHADER_STAGES_EN + 0x00000000, // VGT_LS_HS_CONFIG + 0x00000000, // VGT_GS_VERT_ITEMSIZE + 0x00000000, // VGT_GS_VERT_ITEMSIZE_1 + 0x00000000, // VGT_GS_VERT_ITEMSIZE_2 + 0x00000000, // VGT_GS_VERT_ITEMSIZE_3 + 0x00000000, // VGT_TF_PARAM + 0x00000000, // DB_ALPHA_TO_MASK + 0x00000000, // VGT_DISPATCH_DRAW_INDEX + 0x00000000, // PA_SU_POLY_OFFSET_DB_FMT_CNTL + 0x00000000, // PA_SU_POLY_OFFSET_CLAMP + 0x00000000, // PA_SU_POLY_OFFSET_FRONT_SCALE + 0x00000000, // PA_SU_POLY_OFFSET_FRONT_OFFSET + 0x00000000, // PA_SU_POLY_OFFSET_BACK_SCALE + 0x00000000, // PA_SU_POLY_OFFSET_BACK_OFFSET + 0x00000000, // VGT_GS_INSTANCE_CNT + 0x00000000, // VGT_STRMOUT_CONFIG + 0x00000000, // VGT_STRMOUT_BUFFER_CONFIG + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0x00000000, // PA_SC_CENTROID_PRIORITY_0 + 0x00000000, // PA_SC_CENTROID_PRIORITY_1 + 0x00001000, // PA_SC_LINE_CNTL + 0x00000000, // PA_SC_AA_CONFIG + 0x00000005, // PA_SU_VTX_CNTL + 0x3f800000, // PA_CL_GB_VERT_CLIP_ADJ + 0x3f800000, // PA_CL_GB_VERT_DISC_ADJ + 0x3f800000, // PA_CL_GB_HORZ_CLIP_ADJ + 0x3f800000, // PA_CL_GB_HORZ_DISC_ADJ + 0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0 + 0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_1 + 0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_2 + 0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_3 + 0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_0 + 0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_1 + 0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_2 + 0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_3 + 0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_0 + 0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_1 + 0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_2 + 0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_3 + 0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_0 + 0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_1 + 0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_2 + 0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_3 + 0xffffffff, // PA_SC_AA_MASK_X0Y0_X1Y0 + 0xffffffff, // PA_SC_AA_MASK_X0Y1_X1Y1 + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0, // HOLE + 0x0000001e, // VGT_VERTEX_REUSE_BLOCK_CNTL + 0x00000020, // VGT_OUT_DEALLOC_CNTL + 0x00000000, // CB_COLOR0_BASE + 0x00000000, // CB_COLOR0_PITCH + 0x00000000, // CB_COLOR0_SLICE + 0x00000000, // CB_COLOR0_VIEW + 0x00000000, // CB_COLOR0_INFO + 0x00000000, // CB_COLOR0_ATTRIB + 0x00000000, // CB_COLOR0_DCC_CONTROL + 0x00000000, // CB_COLOR0_CMASK + 0x00000000, // CB_COLOR0_CMASK_SLICE + 0x00000000, // CB_COLOR0_FMASK + 0x00000000, // CB_COLOR0_FMASK_SLICE + 0x00000000, // CB_COLOR0_CLEAR_WORD0 + 0x00000000, // CB_COLOR0_CLEAR_WORD1 + 0x00000000, // CB_COLOR0_DCC_BASE + 0, // HOLE + 0x00000000, // CB_COLOR1_BASE + 0x00000000, // CB_COLOR1_PITCH + 0x00000000, // CB_COLOR1_SLICE + 0x00000000, // CB_COLOR1_VIEW + 0x00000000, // CB_COLOR1_INFO + 0x00000000, // CB_COLOR1_ATTRIB + 0x00000000, // CB_COLOR1_DCC_CONTROL + 0x00000000, // CB_COLOR1_CMASK + 0x00000000, // CB_COLOR1_CMASK_SLICE + 0x00000000, // CB_COLOR1_FMASK + 0x00000000, // CB_COLOR1_FMASK_SLICE + 0x00000000, // CB_COLOR1_CLEAR_WORD0 + 0x00000000, // CB_COLOR1_CLEAR_WORD1 + 0x00000000, // CB_COLOR1_DCC_BASE + 0, // HOLE + 0x00000000, // CB_COLOR2_BASE + 0x00000000, // CB_COLOR2_PITCH + 0x00000000, // CB_COLOR2_SLICE + 0x00000000, // CB_COLOR2_VIEW + 0x00000000, // CB_COLOR2_INFO + 0x00000000, // CB_COLOR2_ATTRIB + 0x00000000, // CB_COLOR2_DCC_CONTROL + 0x00000000, // CB_COLOR2_CMASK + 0x00000000, // CB_COLOR2_CMASK_SLICE + 0x00000000, // CB_COLOR2_FMASK + 0x00000000, // CB_COLOR2_FMASK_SLICE + 0x00000000, // CB_COLOR2_CLEAR_WORD0 + 0x00000000, // CB_COLOR2_CLEAR_WORD1 + 0x00000000, // CB_COLOR2_DCC_BASE + 0, // HOLE + 0x00000000, // CB_COLOR3_BASE + 0x00000000, // CB_COLOR3_PITCH + 0x00000000, // CB_COLOR3_SLICE + 0x00000000, // CB_COLOR3_VIEW + 0x00000000, // CB_COLOR3_INFO + 0x00000000, // CB_COLOR3_ATTRIB + 0x00000000, // CB_COLOR3_DCC_CONTROL + 0x00000000, // CB_COLOR3_CMASK + 0x00000000, // CB_COLOR3_CMASK_SLICE + 0x00000000, // CB_COLOR3_FMASK + 0x00000000, // CB_COLOR3_FMASK_SLICE + 0x00000000, // CB_COLOR3_CLEAR_WORD0 + 0x00000000, // CB_COLOR3_CLEAR_WORD1 + 0x00000000, // CB_COLOR3_DCC_BASE + 0, // HOLE + 0x00000000, // CB_COLOR4_BASE + 0x00000000, // CB_COLOR4_PITCH + 0x00000000, // CB_COLOR4_SLICE + 0x00000000, // CB_COLOR4_VIEW + 0x00000000, // CB_COLOR4_INFO + 0x00000000, // CB_COLOR4_ATTRIB + 0x00000000, // CB_COLOR4_DCC_CONTROL + 0x00000000, // CB_COLOR4_CMASK + 0x00000000, // CB_COLOR4_CMASK_SLICE + 0x00000000, // CB_COLOR4_FMASK + 0x00000000, // CB_COLOR4_FMASK_SLICE + 0x00000000, // CB_COLOR4_CLEAR_WORD0 + 0x00000000, // CB_COLOR4_CLEAR_WORD1 + 0x00000000, // CB_COLOR4_DCC_BASE + 0, // HOLE + 0x00000000, // CB_COLOR5_BASE + 0x00000000, // CB_COLOR5_PITCH + 0x00000000, // CB_COLOR5_SLICE + 0x00000000, // CB_COLOR5_VIEW + 0x00000000, // CB_COLOR5_INFO + 0x00000000, // CB_COLOR5_ATTRIB + 0x00000000, // CB_COLOR5_DCC_CONTROL + 0x00000000, // CB_COLOR5_CMASK + 0x00000000, // CB_COLOR5_CMASK_SLICE + 0x00000000, // CB_COLOR5_FMASK + 0x00000000, // CB_COLOR5_FMASK_SLICE + 0x00000000, // CB_COLOR5_CLEAR_WORD0 + 0x00000000, // CB_COLOR5_CLEAR_WORD1 + 0x00000000, // CB_COLOR5_DCC_BASE + 0, // HOLE + 0x00000000, // CB_COLOR6_BASE + 0x00000000, // CB_COLOR6_PITCH + 0x00000000, // CB_COLOR6_SLICE + 0x00000000, // CB_COLOR6_VIEW + 0x00000000, // CB_COLOR6_INFO + 0x00000000, // CB_COLOR6_ATTRIB + 0x00000000, // CB_COLOR6_DCC_CONTROL + 0x00000000, // CB_COLOR6_CMASK + 0x00000000, // CB_COLOR6_CMASK_SLICE + 0x00000000, // CB_COLOR6_FMASK + 0x00000000, // CB_COLOR6_FMASK_SLICE + 0x00000000, // CB_COLOR6_CLEAR_WORD0 + 0x00000000, // CB_COLOR6_CLEAR_WORD1 + 0x00000000, // CB_COLOR6_DCC_BASE + 0, // HOLE + 0x00000000, // CB_COLOR7_BASE + 0x00000000, // CB_COLOR7_PITCH + 0x00000000, // CB_COLOR7_SLICE + 0x00000000, // CB_COLOR7_VIEW + 0x00000000, // CB_COLOR7_INFO + 0x00000000, // CB_COLOR7_ATTRIB + 0x00000000, // CB_COLOR7_DCC_CONTROL + 0x00000000, // CB_COLOR7_CMASK + 0x00000000, // CB_COLOR7_CMASK_SLICE + 0x00000000, // CB_COLOR7_FMASK + 0x00000000, // CB_COLOR7_FMASK_SLICE + 0x00000000, // CB_COLOR7_CLEAR_WORD0 + 0x00000000, // CB_COLOR7_CLEAR_WORD1 +}; +static const struct cs_extent_def vi_SECT_CONTEXT_defs[] = +{ + {vi_SECT_CONTEXT_def_1, 0x0000a000, 212 }, + {vi_SECT_CONTEXT_def_2, 0x0000a0d6, 274 }, + {vi_SECT_CONTEXT_def_3, 0x0000a1f5, 6 }, + {vi_SECT_CONTEXT_def_4, 0x0000a200, 157 }, + {vi_SECT_CONTEXT_def_5, 0x0000a2a0, 2 }, + {vi_SECT_CONTEXT_def_6, 0x0000a2a3, 1 }, + {vi_SECT_CONTEXT_def_7, 0x0000a2a5, 233 }, + { 0, 0, 0 } +}; +static const struct cs_section_def vi_cs_data[] = { + { vi_SECT_CONTEXT_defs, SECT_CONTEXT }, + { 0, SECT_NONE } +}; diff --git a/drivers/gpu/drm/amd/amdgpu/cz_dpm.c b/drivers/gpu/drm/amd/amdgpu/cz_dpm.c new file mode 100644 index 000000000000..b5c8485d8a58 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/cz_dpm.c @@ -0,0 +1,1712 @@ +/* + * Copyright 2014 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#include +#include +#include "drmP.h" +#include "amdgpu.h" +#include "amdgpu_pm.h" +#include "amdgpu_atombios.h" +#include "vid.h" +#include "vi_dpm.h" +#include "amdgpu_dpm.h" +#include "cz_dpm.h" +#include "cz_ppsmc.h" +#include "atom.h" + +#include "smu/smu_8_0_d.h" +#include "smu/smu_8_0_sh_mask.h" +#include "gca/gfx_8_0_d.h" +#include "gca/gfx_8_0_sh_mask.h" +#include "gmc/gmc_8_1_d.h" +#include "bif/bif_5_1_d.h" +#include "gfx_v8_0.h" + +static struct cz_ps *cz_get_ps(struct amdgpu_ps *rps) +{ + struct cz_ps *ps = rps->ps_priv; + + return ps; +} + +static struct cz_power_info *cz_get_pi(struct amdgpu_device *adev) +{ + struct cz_power_info *pi = adev->pm.dpm.priv; + + return pi; +} + +static uint16_t cz_convert_8bit_index_to_voltage(struct amdgpu_device *adev, + uint16_t voltage) +{ + uint16_t tmp = 6200 - voltage * 25; + + return tmp; +} + +static void cz_construct_max_power_limits_table(struct amdgpu_device *adev, + struct amdgpu_clock_and_voltage_limits *table) +{ + struct cz_power_info *pi = cz_get_pi(adev); + struct amdgpu_clock_voltage_dependency_table *dep_table = + &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk; + + if (dep_table->count > 0) { + table->sclk = dep_table->entries[dep_table->count - 1].clk; + table->vddc = cz_convert_8bit_index_to_voltage(adev, + dep_table->entries[dep_table->count - 1].v); + } + + table->mclk = pi->sys_info.nbp_memory_clock[0]; + +} + +union igp_info { + struct _ATOM_INTEGRATED_SYSTEM_INFO info; + struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_7 info_7; + struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_8 info_8; + struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_9 info_9; +}; + +static int cz_parse_sys_info_table(struct amdgpu_device *adev) +{ + struct cz_power_info *pi = cz_get_pi(adev); + struct amdgpu_mode_info *mode_info = &adev->mode_info; + int index = GetIndexIntoMasterTable(DATA, IntegratedSystemInfo); + union igp_info *igp_info; + u8 frev, crev; + u16 data_offset; + int i = 0; + + if (amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL, + &frev, &crev, &data_offset)) { + igp_info = (union igp_info *)(mode_info->atom_context->bios + + data_offset); + + if (crev != 9) { + DRM_ERROR("Unsupported IGP table: %d %d\n", frev, crev); + return -EINVAL; + } + pi->sys_info.bootup_sclk = + le32_to_cpu(igp_info->info_9.ulBootUpEngineClock); + pi->sys_info.bootup_uma_clk = + le32_to_cpu(igp_info->info_9.ulBootUpUMAClock); + pi->sys_info.dentist_vco_freq = + le32_to_cpu(igp_info->info_9.ulDentistVCOFreq); + pi->sys_info.bootup_nb_voltage_index = + le16_to_cpu(igp_info->info_9.usBootUpNBVoltage); + + if (igp_info->info_9.ucHtcTmpLmt == 0) + pi->sys_info.htc_tmp_lmt = 203; + else + pi->sys_info.htc_tmp_lmt = igp_info->info_9.ucHtcTmpLmt; + + if (igp_info->info_9.ucHtcHystLmt == 0) + pi->sys_info.htc_hyst_lmt = 5; + else + pi->sys_info.htc_hyst_lmt = igp_info->info_9.ucHtcHystLmt; + + if (pi->sys_info.htc_tmp_lmt <= pi->sys_info.htc_hyst_lmt) { + DRM_ERROR("The htcTmpLmt should be larger than htcHystLmt.\n"); + return -EINVAL; + } + + if (le32_to_cpu(igp_info->info_9.ulSystemConfig) & (1 << 3) && + pi->enable_nb_ps_policy) + pi->sys_info.nb_dpm_enable = true; + else + pi->sys_info.nb_dpm_enable = false; + + for (i = 0; i < CZ_NUM_NBPSTATES; i++) { + if (i < CZ_NUM_NBPMEMORY_CLOCK) + pi->sys_info.nbp_memory_clock[i] = + le32_to_cpu(igp_info->info_9.ulNbpStateMemclkFreq[i]); + pi->sys_info.nbp_n_clock[i] = + le32_to_cpu(igp_info->info_9.ulNbpStateNClkFreq[i]); + } + + for (i = 0; i < CZ_MAX_DISPLAY_CLOCK_LEVEL; i++) + pi->sys_info.display_clock[i] = + le32_to_cpu(igp_info->info_9.sDispClkVoltageMapping[i].ulMaximumSupportedCLK); + + for (i = 0; i < CZ_NUM_NBPSTATES; i++) + pi->sys_info.nbp_voltage_index[i] = + le32_to_cpu(igp_info->info_9.usNBPStateVoltage[i]); + + if (le32_to_cpu(igp_info->info_9.ulGPUCapInfo) & + SYS_INFO_GPUCAPS__ENABEL_DFS_BYPASS) + pi->caps_enable_dfs_bypass = true; + + pi->sys_info.uma_channel_number = + igp_info->info_9.ucUMAChannelNumber; + + cz_construct_max_power_limits_table(adev, + &adev->pm.dpm.dyn_state.max_clock_voltage_on_ac); + } + + return 0; +} + +static void cz_patch_voltage_values(struct amdgpu_device *adev) +{ + int i; + struct amdgpu_uvd_clock_voltage_dependency_table *uvd_table = + &adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table; + struct amdgpu_vce_clock_voltage_dependency_table *vce_table = + &adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table; + struct amdgpu_clock_voltage_dependency_table *acp_table = + &adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table; + + if (uvd_table->count) { + for (i = 0; i < uvd_table->count; i++) + uvd_table->entries[i].v = + cz_convert_8bit_index_to_voltage(adev, + uvd_table->entries[i].v); + } + + if (vce_table->count) { + for (i = 0; i < vce_table->count; i++) + vce_table->entries[i].v = + cz_convert_8bit_index_to_voltage(adev, + vce_table->entries[i].v); + } + + if (acp_table->count) { + for (i = 0; i < acp_table->count; i++) + acp_table->entries[i].v = + cz_convert_8bit_index_to_voltage(adev, + acp_table->entries[i].v); + } + +} + +static void cz_construct_boot_state(struct amdgpu_device *adev) +{ + struct cz_power_info *pi = cz_get_pi(adev); + + pi->boot_pl.sclk = pi->sys_info.bootup_sclk; + pi->boot_pl.vddc_index = pi->sys_info.bootup_nb_voltage_index; + pi->boot_pl.ds_divider_index = 0; + pi->boot_pl.ss_divider_index = 0; + pi->boot_pl.allow_gnb_slow = 1; + pi->boot_pl.force_nbp_state = 0; + pi->boot_pl.display_wm = 0; + pi->boot_pl.vce_wm = 0; + +} + +static void cz_patch_boot_state(struct amdgpu_device *adev, + struct cz_ps *ps) +{ + struct cz_power_info *pi = cz_get_pi(adev); + + ps->num_levels = 1; + ps->levels[0] = pi->boot_pl; +} + +union pplib_clock_info { + struct _ATOM_PPLIB_EVERGREEN_CLOCK_INFO evergreen; + struct _ATOM_PPLIB_SUMO_CLOCK_INFO sumo; + struct _ATOM_PPLIB_CZ_CLOCK_INFO carrizo; +}; + +static void cz_parse_pplib_clock_info(struct amdgpu_device *adev, + struct amdgpu_ps *rps, int index, + union pplib_clock_info *clock_info) +{ + struct cz_power_info *pi = cz_get_pi(adev); + struct cz_ps *ps = cz_get_ps(rps); + struct cz_pl *pl = &ps->levels[index]; + struct amdgpu_clock_voltage_dependency_table *table = + &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk; + + pl->sclk = table->entries[clock_info->carrizo.index].clk; + pl->vddc_index = table->entries[clock_info->carrizo.index].v; + + ps->num_levels = index + 1; + + if (pi->caps_sclk_ds) { + pl->ds_divider_index = 5; + pl->ss_divider_index = 5; + } + +} + +static void cz_parse_pplib_non_clock_info(struct amdgpu_device *adev, + struct amdgpu_ps *rps, + struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info, + u8 table_rev) +{ + struct cz_ps *ps = cz_get_ps(rps); + + rps->caps = le32_to_cpu(non_clock_info->ulCapsAndSettings); + rps->class = le16_to_cpu(non_clock_info->usClassification); + rps->class2 = le16_to_cpu(non_clock_info->usClassification2); + + if (ATOM_PPLIB_NONCLOCKINFO_VER1 < table_rev) { + rps->vclk = le32_to_cpu(non_clock_info->ulVCLK); + rps->dclk = le32_to_cpu(non_clock_info->ulDCLK); + } else { + rps->vclk = 0; + rps->dclk = 0; + } + + if (rps->class & ATOM_PPLIB_CLASSIFICATION_BOOT) { + adev->pm.dpm.boot_ps = rps; + cz_patch_boot_state(adev, ps); + } + if (rps->class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE) + adev->pm.dpm.uvd_ps = rps; + +} + +union power_info { + struct _ATOM_PPLIB_POWERPLAYTABLE pplib; + struct _ATOM_PPLIB_POWERPLAYTABLE2 pplib2; + struct _ATOM_PPLIB_POWERPLAYTABLE3 pplib3; + struct _ATOM_PPLIB_POWERPLAYTABLE4 pplib4; + struct _ATOM_PPLIB_POWERPLAYTABLE5 pplib5; +}; + +union pplib_power_state { + struct _ATOM_PPLIB_STATE v1; + struct _ATOM_PPLIB_STATE_V2 v2; +}; + +static int cz_parse_power_table(struct amdgpu_device *adev) +{ + struct amdgpu_mode_info *mode_info = &adev->mode_info; + struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info; + union pplib_power_state *power_state; + int i, j, k, non_clock_array_index, clock_array_index; + union pplib_clock_info *clock_info; + struct _StateArray *state_array; + struct _ClockInfoArray *clock_info_array; + struct _NonClockInfoArray *non_clock_info_array; + union power_info *power_info; + int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo); + u16 data_offset; + u8 frev, crev; + u8 *power_state_offset; + struct cz_ps *ps; + + if (!amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL, + &frev, &crev, &data_offset)) + return -EINVAL; + power_info = (union power_info *)(mode_info->atom_context->bios + data_offset); + + state_array = (struct _StateArray *) + (mode_info->atom_context->bios + data_offset + + le16_to_cpu(power_info->pplib.usStateArrayOffset)); + clock_info_array = (struct _ClockInfoArray *) + (mode_info->atom_context->bios + data_offset + + le16_to_cpu(power_info->pplib.usClockInfoArrayOffset)); + non_clock_info_array = (struct _NonClockInfoArray *) + (mode_info->atom_context->bios + data_offset + + le16_to_cpu(power_info->pplib.usNonClockInfoArrayOffset)); + + adev->pm.dpm.ps = kzalloc(sizeof(struct amdgpu_ps) * + state_array->ucNumEntries, GFP_KERNEL); + + if (!adev->pm.dpm.ps) + return -ENOMEM; + + power_state_offset = (u8 *)state_array->states; + adev->pm.dpm.platform_caps = + le32_to_cpu(power_info->pplib.ulPlatformCaps); + adev->pm.dpm.backbias_response_time = + le16_to_cpu(power_info->pplib.usBackbiasTime); + adev->pm.dpm.voltage_response_time = + le16_to_cpu(power_info->pplib.usVoltageTime); + + for (i = 0; i < state_array->ucNumEntries; i++) { + power_state = (union pplib_power_state *)power_state_offset; + non_clock_array_index = power_state->v2.nonClockInfoIndex; + non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *) + &non_clock_info_array->nonClockInfo[non_clock_array_index]; + + ps = kzalloc(sizeof(struct cz_ps), GFP_KERNEL); + if (ps == NULL) { + kfree(adev->pm.dpm.ps); + return -ENOMEM; + } + + adev->pm.dpm.ps[i].ps_priv = ps; + k = 0; + for (j = 0; j < power_state->v2.ucNumDPMLevels; j++) { + clock_array_index = power_state->v2.clockInfoIndex[j]; + if (clock_array_index >= clock_info_array->ucNumEntries) + continue; + if (k >= CZ_MAX_HARDWARE_POWERLEVELS) + break; + clock_info = (union pplib_clock_info *) + &clock_info_array->clockInfo[clock_array_index * + clock_info_array->ucEntrySize]; + cz_parse_pplib_clock_info(adev, &adev->pm.dpm.ps[i], + k, clock_info); + k++; + } + cz_parse_pplib_non_clock_info(adev, &adev->pm.dpm.ps[i], + non_clock_info, + non_clock_info_array->ucEntrySize); + power_state_offset += 2 + power_state->v2.ucNumDPMLevels; + } + adev->pm.dpm.num_ps = state_array->ucNumEntries; + + return 0; +} + +static int cz_process_firmware_header(struct amdgpu_device *adev) +{ + struct cz_power_info *pi = cz_get_pi(adev); + u32 tmp; + int ret; + + ret = cz_read_smc_sram_dword(adev, SMU8_FIRMWARE_HEADER_LOCATION + + offsetof(struct SMU8_Firmware_Header, + DpmTable), + &tmp, pi->sram_end); + + if (ret == 0) + pi->dpm_table_start = tmp; + + return ret; +} + +static int cz_dpm_init(struct amdgpu_device *adev) +{ + struct cz_power_info *pi; + int ret, i; + + pi = kzalloc(sizeof(struct cz_power_info), GFP_KERNEL); + if (NULL == pi) + return -ENOMEM; + + adev->pm.dpm.priv = pi; + + ret = amdgpu_get_platform_caps(adev); + if (ret) + return ret; + + ret = amdgpu_parse_extended_power_table(adev); + if (ret) + return ret; + + pi->sram_end = SMC_RAM_END; + + /* set up DPM defaults */ + for (i = 0; i < CZ_MAX_HARDWARE_POWERLEVELS; i++) + pi->active_target[i] = CZ_AT_DFLT; + + pi->mgcg_cgtt_local0 = 0x0; + pi->mgcg_cgtt_local1 = 0x0; + pi->clock_slow_down_step = 25000; + pi->skip_clock_slow_down = 1; + pi->enable_nb_ps_policy = 1; + pi->caps_power_containment = true; + pi->caps_cac = true; + pi->didt_enabled = false; + if (pi->didt_enabled) { + pi->caps_sq_ramping = true; + pi->caps_db_ramping = true; + pi->caps_td_ramping = true; + pi->caps_tcp_ramping = true; + } + pi->caps_sclk_ds = true; + pi->voting_clients = 0x00c00033; + pi->auto_thermal_throttling_enabled = true; + pi->bapm_enabled = false; + pi->disable_nb_ps3_in_battery = false; + pi->voltage_drop_threshold = 0; + pi->caps_sclk_throttle_low_notification = false; + pi->gfx_pg_threshold = 500; + pi->caps_fps = true; + /* uvd */ + pi->caps_uvd_pg = (adev->pg_flags & AMDGPU_PG_SUPPORT_UVD) ? true : false; + pi->caps_uvd_dpm = true; + /* vce */ + pi->caps_vce_pg = (adev->pg_flags & AMDGPU_PG_SUPPORT_VCE) ? true : false; + pi->caps_vce_dpm = true; + /* acp */ + pi->caps_acp_pg = (adev->pg_flags & AMDGPU_PG_SUPPORT_ACP) ? true : false; + pi->caps_acp_dpm = true; + + pi->caps_stable_power_state = false; + pi->nb_dpm_enabled_by_driver = true; + pi->nb_dpm_enabled = false; + pi->caps_voltage_island = false; + /* flags which indicate need to upload pptable */ + pi->need_pptable_upload = true; + + ret = cz_parse_sys_info_table(adev); + if (ret) + return ret; + + cz_patch_voltage_values(adev); + cz_construct_boot_state(adev); + + ret = cz_parse_power_table(adev); + if (ret) + return ret; + + ret = cz_process_firmware_header(adev); + if (ret) + return ret; + + pi->dpm_enabled = true; + + return 0; +} + +static void cz_dpm_fini(struct amdgpu_device *adev) +{ + int i; + + for (i = 0; i < adev->pm.dpm.num_ps; i++) + kfree(adev->pm.dpm.ps[i].ps_priv); + + kfree(adev->pm.dpm.ps); + kfree(adev->pm.dpm.priv); + amdgpu_free_extended_power_table(adev); +} + +static void +cz_dpm_debugfs_print_current_performance_level(struct amdgpu_device *adev, + struct seq_file *m) +{ + struct amdgpu_clock_voltage_dependency_table *table = + &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk; + u32 current_index = + (RREG32_SMC(ixTARGET_AND_CURRENT_PROFILE_INDEX) & + TARGET_AND_CURRENT_PROFILE_INDEX__CURR_SCLK_INDEX_MASK) >> + TARGET_AND_CURRENT_PROFILE_INDEX__CURR_SCLK_INDEX__SHIFT; + u32 sclk, tmp; + u16 vddc; + + if (current_index >= NUM_SCLK_LEVELS) { + seq_printf(m, "invalid dpm profile %d\n", current_index); + } else { + sclk = table->entries[current_index].clk; + tmp = (RREG32_SMC(ixSMU_VOLTAGE_STATUS) & + SMU_VOLTAGE_STATUS__SMU_VOLTAGE_CURRENT_LEVEL_MASK) >> + SMU_VOLTAGE_STATUS__SMU_VOLTAGE_CURRENT_LEVEL__SHIFT; + vddc = cz_convert_8bit_index_to_voltage(adev, (u16)tmp); + seq_printf(m, "power level %d sclk: %u vddc: %u\n", + current_index, sclk, vddc); + } +} + +static void cz_dpm_print_power_state(struct amdgpu_device *adev, + struct amdgpu_ps *rps) +{ + int i; + struct cz_ps *ps = cz_get_ps(rps); + + amdgpu_dpm_print_class_info(rps->class, rps->class2); + amdgpu_dpm_print_cap_info(rps->caps); + + DRM_INFO("\tuvd vclk: %d dclk: %d\n", rps->vclk, rps->dclk); + for (i = 0; i < ps->num_levels; i++) { + struct cz_pl *pl = &ps->levels[i]; + + DRM_INFO("\t\tpower level %d sclk: %u vddc: %u\n", + i, pl->sclk, + cz_convert_8bit_index_to_voltage(adev, pl->vddc_index)); + } + + amdgpu_dpm_print_ps_status(adev, rps); +} + +static void cz_dpm_set_funcs(struct amdgpu_device *adev); + +static int cz_dpm_early_init(struct amdgpu_device *adev) +{ + cz_dpm_set_funcs(adev); + + return 0; +} + +static int cz_dpm_sw_init(struct amdgpu_device *adev) +{ + int ret = 0; + /* fix me to add thermal support TODO */ + + /* default to balanced state */ + adev->pm.dpm.state = POWER_STATE_TYPE_BALANCED; + adev->pm.dpm.user_state = POWER_STATE_TYPE_BALANCED; + adev->pm.dpm.forced_level = AMDGPU_DPM_FORCED_LEVEL_AUTO; + adev->pm.default_sclk = adev->clock.default_sclk; + adev->pm.default_mclk = adev->clock.default_mclk; + adev->pm.current_sclk = adev->clock.default_sclk; + adev->pm.current_mclk = adev->clock.default_mclk; + adev->pm.int_thermal_type = THERMAL_TYPE_NONE; + + if (amdgpu_dpm == 0) + return 0; + + mutex_lock(&adev->pm.mutex); + ret = cz_dpm_init(adev); + if (ret) + goto dpm_init_failed; + + adev->pm.dpm.current_ps = adev->pm.dpm.requested_ps = adev->pm.dpm.boot_ps; + if (amdgpu_dpm == 1) + amdgpu_pm_print_power_states(adev); + + ret = amdgpu_pm_sysfs_init(adev); + if (ret) + goto dpm_init_failed; + + mutex_unlock(&adev->pm.mutex); + DRM_INFO("amdgpu: dpm initialized\n"); + + return 0; + +dpm_init_failed: + cz_dpm_fini(adev); + mutex_unlock(&adev->pm.mutex); + DRM_ERROR("amdgpu: dpm initialization failed\n"); + + return ret; +} + +static int cz_dpm_sw_fini(struct amdgpu_device *adev) +{ + mutex_lock(&adev->pm.mutex); + amdgpu_pm_sysfs_fini(adev); + cz_dpm_fini(adev); + mutex_unlock(&adev->pm.mutex); + + return 0; +} + +static void cz_reset_ap_mask(struct amdgpu_device *adev) +{ + struct cz_power_info *pi = cz_get_pi(adev); + + pi->active_process_mask = 0; + +} + +static int cz_dpm_download_pptable_from_smu(struct amdgpu_device *adev, + void **table) +{ + int ret = 0; + + ret = cz_smu_download_pptable(adev, table); + + return ret; +} + +static int cz_dpm_upload_pptable_to_smu(struct amdgpu_device *adev) +{ + struct cz_power_info *pi = cz_get_pi(adev); + struct SMU8_Fusion_ClkTable *clock_table; + struct atom_clock_dividers dividers; + void *table = NULL; + uint8_t i = 0; + int ret = 0; + + struct amdgpu_clock_voltage_dependency_table *vddc_table = + &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk; + struct amdgpu_clock_voltage_dependency_table *vddgfx_table = + &adev->pm.dpm.dyn_state.vddgfx_dependency_on_sclk; + struct amdgpu_uvd_clock_voltage_dependency_table *uvd_table = + &adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table; + struct amdgpu_vce_clock_voltage_dependency_table *vce_table = + &adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table; + struct amdgpu_clock_voltage_dependency_table *acp_table = + &adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table; + + if (!pi->need_pptable_upload) + return 0; + + ret = cz_dpm_download_pptable_from_smu(adev, &table); + if (ret) { + DRM_ERROR("amdgpu: Failed to get power play table from SMU!\n"); + return -EINVAL; + } + + clock_table = (struct SMU8_Fusion_ClkTable *)table; + /* patch clock table */ + if (vddc_table->count > CZ_MAX_HARDWARE_POWERLEVELS || + vddgfx_table->count > CZ_MAX_HARDWARE_POWERLEVELS || + uvd_table->count > CZ_MAX_HARDWARE_POWERLEVELS || + vce_table->count > CZ_MAX_HARDWARE_POWERLEVELS || + acp_table->count > CZ_MAX_HARDWARE_POWERLEVELS) { + DRM_ERROR("amdgpu: Invalid Clock Voltage Dependency Table!\n"); + return -EINVAL; + } + + for (i = 0; i < CZ_MAX_HARDWARE_POWERLEVELS; i++) { + + /* vddc sclk */ + clock_table->SclkBreakdownTable.ClkLevel[i].GnbVid = + (i < vddc_table->count) ? (uint8_t)vddc_table->entries[i].v : 0; + clock_table->SclkBreakdownTable.ClkLevel[i].Frequency = + (i < vddc_table->count) ? vddc_table->entries[i].clk : 0; + ret = amdgpu_atombios_get_clock_dividers(adev, COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK, + clock_table->SclkBreakdownTable.ClkLevel[i].Frequency, + false, ÷rs); + if (ret) + return ret; + clock_table->SclkBreakdownTable.ClkLevel[i].DfsDid = + (uint8_t)dividers.post_divider; + + /* vddgfx sclk */ + clock_table->SclkBreakdownTable.ClkLevel[i].GfxVid = + (i < vddgfx_table->count) ? (uint8_t)vddgfx_table->entries[i].v : 0; + + /* acp breakdown */ + clock_table->AclkBreakdownTable.ClkLevel[i].GfxVid = + (i < acp_table->count) ? (uint8_t)acp_table->entries[i].v : 0; + clock_table->AclkBreakdownTable.ClkLevel[i].Frequency = + (i < acp_table->count) ? acp_table->entries[i].clk : 0; + ret = amdgpu_atombios_get_clock_dividers(adev, COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK, + clock_table->SclkBreakdownTable.ClkLevel[i].Frequency, + false, ÷rs); + if (ret) + return ret; + clock_table->AclkBreakdownTable.ClkLevel[i].DfsDid = + (uint8_t)dividers.post_divider; + + /* uvd breakdown */ + clock_table->VclkBreakdownTable.ClkLevel[i].GfxVid = + (i < uvd_table->count) ? (uint8_t)uvd_table->entries[i].v : 0; + clock_table->VclkBreakdownTable.ClkLevel[i].Frequency = + (i < uvd_table->count) ? uvd_table->entries[i].vclk : 0; + ret = amdgpu_atombios_get_clock_dividers(adev, COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK, + clock_table->VclkBreakdownTable.ClkLevel[i].Frequency, + false, ÷rs); + if (ret) + return ret; + clock_table->VclkBreakdownTable.ClkLevel[i].DfsDid = + (uint8_t)dividers.post_divider; + + clock_table->DclkBreakdownTable.ClkLevel[i].GfxVid = + (i < uvd_table->count) ? (uint8_t)uvd_table->entries[i].v : 0; + clock_table->DclkBreakdownTable.ClkLevel[i].Frequency = + (i < uvd_table->count) ? uvd_table->entries[i].dclk : 0; + ret = amdgpu_atombios_get_clock_dividers(adev, COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK, + clock_table->DclkBreakdownTable.ClkLevel[i].Frequency, + false, ÷rs); + if (ret) + return ret; + clock_table->DclkBreakdownTable.ClkLevel[i].DfsDid = + (uint8_t)dividers.post_divider; + + /* vce breakdown */ + clock_table->EclkBreakdownTable.ClkLevel[i].GfxVid = + (i < vce_table->count) ? (uint8_t)vce_table->entries[i].v : 0; + clock_table->EclkBreakdownTable.ClkLevel[i].Frequency = + (i < vce_table->count) ? vce_table->entries[i].ecclk : 0; + ret = amdgpu_atombios_get_clock_dividers(adev, COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK, + clock_table->EclkBreakdownTable.ClkLevel[i].Frequency, + false, ÷rs); + if (ret) + return ret; + clock_table->EclkBreakdownTable.ClkLevel[i].DfsDid = + (uint8_t)dividers.post_divider; + } + + /* its time to upload to SMU */ + ret = cz_smu_upload_pptable(adev); + if (ret) { + DRM_ERROR("amdgpu: Failed to put power play table to SMU!\n"); + return ret; + } + + return 0; +} + +static void cz_init_sclk_limit(struct amdgpu_device *adev) +{ + struct cz_power_info *pi = cz_get_pi(adev); + struct amdgpu_clock_voltage_dependency_table *table = + &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk; + uint32_t clock = 0, level; + + if (!table || !table->count) { + DRM_ERROR("Invalid Voltage Dependency table.\n"); + return; + } + + pi->sclk_dpm.soft_min_clk = 0; + pi->sclk_dpm.hard_min_clk = 0; + cz_send_msg_to_smc(adev, PPSMC_MSG_GetMaxSclkLevel); + level = cz_get_argument(adev); + if (level < table->count) + clock = table->entries[level].clk; + else { + DRM_ERROR("Invalid SLCK Voltage Dependency table entry.\n"); + clock = table->entries[table->count - 1].clk; + } + + pi->sclk_dpm.soft_max_clk = clock; + pi->sclk_dpm.hard_max_clk = clock; + +} + +static void cz_init_uvd_limit(struct amdgpu_device *adev) +{ + struct cz_power_info *pi = cz_get_pi(adev); + struct amdgpu_uvd_clock_voltage_dependency_table *table = + &adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table; + uint32_t clock = 0, level; + + if (!table || !table->count) { + DRM_ERROR("Invalid Voltage Dependency table.\n"); + return; + } + + pi->uvd_dpm.soft_min_clk = 0; + pi->uvd_dpm.hard_min_clk = 0; + cz_send_msg_to_smc(adev, PPSMC_MSG_GetMaxUvdLevel); + level = cz_get_argument(adev); + if (level < table->count) + clock = table->entries[level].vclk; + else { + DRM_ERROR("Invalid UVD Voltage Dependency table entry.\n"); + clock = table->entries[table->count - 1].vclk; + } + + pi->uvd_dpm.soft_max_clk = clock; + pi->uvd_dpm.hard_max_clk = clock; + +} + +static void cz_init_vce_limit(struct amdgpu_device *adev) +{ + struct cz_power_info *pi = cz_get_pi(adev); + struct amdgpu_vce_clock_voltage_dependency_table *table = + &adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table; + uint32_t clock = 0, level; + + if (!table || !table->count) { + DRM_ERROR("Invalid Voltage Dependency table.\n"); + return; + } + + pi->vce_dpm.soft_min_clk = 0; + pi->vce_dpm.hard_min_clk = 0; + cz_send_msg_to_smc(adev, PPSMC_MSG_GetMaxEclkLevel); + level = cz_get_argument(adev); + if (level < table->count) + clock = table->entries[level].evclk; + else { + /* future BIOS would fix this error */ + DRM_ERROR("Invalid VCE Voltage Dependency table entry.\n"); + clock = table->entries[table->count - 1].evclk; + } + + pi->vce_dpm.soft_max_clk = clock; + pi->vce_dpm.hard_max_clk = clock; + +} + +static void cz_init_acp_limit(struct amdgpu_device *adev) +{ + struct cz_power_info *pi = cz_get_pi(adev); + struct amdgpu_clock_voltage_dependency_table *table = + &adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table; + uint32_t clock = 0, level; + + if (!table || !table->count) { + DRM_ERROR("Invalid Voltage Dependency table.\n"); + return; + } + + pi->acp_dpm.soft_min_clk = 0; + pi->acp_dpm.hard_min_clk = 0; + cz_send_msg_to_smc(adev, PPSMC_MSG_GetMaxAclkLevel); + level = cz_get_argument(adev); + if (level < table->count) + clock = table->entries[level].clk; + else { + DRM_ERROR("Invalid ACP Voltage Dependency table entry.\n"); + clock = table->entries[table->count - 1].clk; + } + + pi->acp_dpm.soft_max_clk = clock; + pi->acp_dpm.hard_max_clk = clock; + +} + +static void cz_init_pg_state(struct amdgpu_device *adev) +{ + struct cz_power_info *pi = cz_get_pi(adev); + + pi->uvd_power_gated = false; + pi->vce_power_gated = false; + pi->acp_power_gated = false; + +} + +static void cz_init_sclk_threshold(struct amdgpu_device *adev) +{ + struct cz_power_info *pi = cz_get_pi(adev); + + pi->low_sclk_interrupt_threshold = 0; + +} + +static void cz_dpm_setup_asic(struct amdgpu_device *adev) +{ + cz_reset_ap_mask(adev); + cz_dpm_upload_pptable_to_smu(adev); + cz_init_sclk_limit(adev); + cz_init_uvd_limit(adev); + cz_init_vce_limit(adev); + cz_init_acp_limit(adev); + cz_init_pg_state(adev); + cz_init_sclk_threshold(adev); + +} + +static bool cz_check_smu_feature(struct amdgpu_device *adev, + uint32_t feature) +{ + uint32_t smu_feature = 0; + int ret; + + ret = cz_send_msg_to_smc_with_parameter(adev, + PPSMC_MSG_GetFeatureStatus, 0); + if (ret) { + DRM_ERROR("Failed to get SMU features from SMC.\n"); + return false; + } else { + smu_feature = cz_get_argument(adev); + if (feature & smu_feature) + return true; + } + + return false; +} + +static bool cz_check_for_dpm_enabled(struct amdgpu_device *adev) +{ + if (cz_check_smu_feature(adev, + SMU_EnabledFeatureScoreboard_SclkDpmOn)) + return true; + + return false; +} + +static void cz_program_voting_clients(struct amdgpu_device *adev) +{ + WREG32_SMC(ixCG_FREQ_TRAN_VOTING_0, PPCZ_VOTINGRIGHTSCLIENTS_DFLT0); +} + +static void cz_clear_voting_clients(struct amdgpu_device *adev) +{ + WREG32_SMC(ixCG_FREQ_TRAN_VOTING_0, 0); +} + +static int cz_start_dpm(struct amdgpu_device *adev) +{ + int ret = 0; + + if (amdgpu_dpm) { + ret = cz_send_msg_to_smc_with_parameter(adev, + PPSMC_MSG_EnableAllSmuFeatures, SCLK_DPM_MASK); + if (ret) { + DRM_ERROR("SMU feature: SCLK_DPM enable failed\n"); + return -EINVAL; + } + } + + return 0; +} + +static int cz_stop_dpm(struct amdgpu_device *adev) +{ + int ret = 0; + + if (amdgpu_dpm && adev->pm.dpm_enabled) { + ret = cz_send_msg_to_smc_with_parameter(adev, + PPSMC_MSG_DisableAllSmuFeatures, SCLK_DPM_MASK); + if (ret) { + DRM_ERROR("SMU feature: SCLK_DPM disable failed\n"); + return -EINVAL; + } + } + + return 0; +} + +static uint32_t cz_get_sclk_level(struct amdgpu_device *adev, + uint32_t clock, uint16_t msg) +{ + int i = 0; + struct amdgpu_clock_voltage_dependency_table *table = + &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk; + + switch (msg) { + case PPSMC_MSG_SetSclkSoftMin: + case PPSMC_MSG_SetSclkHardMin: + for (i = 0; i < table->count; i++) + if (clock <= table->entries[i].clk) + break; + if (i == table->count) + i = table->count - 1; + break; + case PPSMC_MSG_SetSclkSoftMax: + case PPSMC_MSG_SetSclkHardMax: + for (i = table->count - 1; i >= 0; i--) + if (clock >= table->entries[i].clk) + break; + if (i < 0) + i = 0; + break; + default: + break; + } + + return i; +} + +static int cz_program_bootup_state(struct amdgpu_device *adev) +{ + struct cz_power_info *pi = cz_get_pi(adev); + uint32_t soft_min_clk = 0; + uint32_t soft_max_clk = 0; + int ret = 0; + + pi->sclk_dpm.soft_min_clk = pi->sys_info.bootup_sclk; + pi->sclk_dpm.soft_max_clk = pi->sys_info.bootup_sclk; + + soft_min_clk = cz_get_sclk_level(adev, + pi->sclk_dpm.soft_min_clk, + PPSMC_MSG_SetSclkSoftMin); + soft_max_clk = cz_get_sclk_level(adev, + pi->sclk_dpm.soft_max_clk, + PPSMC_MSG_SetSclkSoftMax); + + ret = cz_send_msg_to_smc_with_parameter(adev, + PPSMC_MSG_SetSclkSoftMin, soft_min_clk); + if (ret) + return -EINVAL; + + ret = cz_send_msg_to_smc_with_parameter(adev, + PPSMC_MSG_SetSclkSoftMax, soft_max_clk); + if (ret) + return -EINVAL; + + return 0; +} + +/* TODO */ +static int cz_disable_cgpg(struct amdgpu_device *adev) +{ + return 0; +} + +/* TODO */ +static int cz_enable_cgpg(struct amdgpu_device *adev) +{ + return 0; +} + +/* TODO */ +static int cz_program_pt_config_registers(struct amdgpu_device *adev) +{ + return 0; +} + +static void cz_do_enable_didt(struct amdgpu_device *adev, bool enable) +{ + struct cz_power_info *pi = cz_get_pi(adev); + uint32_t reg = 0; + + if (pi->caps_sq_ramping) { + reg = RREG32_DIDT(ixDIDT_SQ_CTRL0); + if (enable) + reg = REG_SET_FIELD(reg, DIDT_SQ_CTRL0, DIDT_CTRL_EN, 1); + else + reg = REG_SET_FIELD(reg, DIDT_SQ_CTRL0, DIDT_CTRL_EN, 0); + WREG32_DIDT(ixDIDT_SQ_CTRL0, reg); + } + if (pi->caps_db_ramping) { + reg = RREG32_DIDT(ixDIDT_DB_CTRL0); + if (enable) + reg = REG_SET_FIELD(reg, DIDT_DB_CTRL0, DIDT_CTRL_EN, 1); + else + reg = REG_SET_FIELD(reg, DIDT_DB_CTRL0, DIDT_CTRL_EN, 0); + WREG32_DIDT(ixDIDT_DB_CTRL0, reg); + } + if (pi->caps_td_ramping) { + reg = RREG32_DIDT(ixDIDT_TD_CTRL0); + if (enable) + reg = REG_SET_FIELD(reg, DIDT_TD_CTRL0, DIDT_CTRL_EN, 1); + else + reg = REG_SET_FIELD(reg, DIDT_TD_CTRL0, DIDT_CTRL_EN, 0); + WREG32_DIDT(ixDIDT_TD_CTRL0, reg); + } + if (pi->caps_tcp_ramping) { + reg = RREG32_DIDT(ixDIDT_TCP_CTRL0); + if (enable) + reg = REG_SET_FIELD(reg, DIDT_SQ_CTRL0, DIDT_CTRL_EN, 1); + else + reg = REG_SET_FIELD(reg, DIDT_SQ_CTRL0, DIDT_CTRL_EN, 0); + WREG32_DIDT(ixDIDT_TCP_CTRL0, reg); + } + +} + +static int cz_enable_didt(struct amdgpu_device *adev, bool enable) +{ + struct cz_power_info *pi = cz_get_pi(adev); + int ret; + + if (pi->caps_sq_ramping || pi->caps_db_ramping || + pi->caps_td_ramping || pi->caps_tcp_ramping) { + if (adev->gfx.gfx_current_status != AMDGPU_GFX_SAFE_MODE) { + ret = cz_disable_cgpg(adev); + if (ret) { + DRM_ERROR("Pre Di/Dt disable cg/pg failed\n"); + return -EINVAL; + } + adev->gfx.gfx_current_status = AMDGPU_GFX_SAFE_MODE; + } + + ret = cz_program_pt_config_registers(adev); + if (ret) { + DRM_ERROR("Di/Dt config failed\n"); + return -EINVAL; + } + cz_do_enable_didt(adev, enable); + + if (adev->gfx.gfx_current_status == AMDGPU_GFX_SAFE_MODE) { + ret = cz_enable_cgpg(adev); + if (ret) { + DRM_ERROR("Post Di/Dt enable cg/pg failed\n"); + return -EINVAL; + } + adev->gfx.gfx_current_status = AMDGPU_GFX_NORMAL_MODE; + } + } + + return 0; +} + +/* TODO */ +static void cz_reset_acp_boot_level(struct amdgpu_device *adev) +{ +} + +static void cz_update_current_ps(struct amdgpu_device *adev, + struct amdgpu_ps *rps) +{ + struct cz_power_info *pi = cz_get_pi(adev); + struct cz_ps *ps = cz_get_ps(rps); + + pi->current_ps = *ps; + pi->current_rps = *rps; + pi->current_rps.ps_priv = ps; + +} + +static void cz_update_requested_ps(struct amdgpu_device *adev, + struct amdgpu_ps *rps) +{ + struct cz_power_info *pi = cz_get_pi(adev); + struct cz_ps *ps = cz_get_ps(rps); + + pi->requested_ps = *ps; + pi->requested_rps = *rps; + pi->requested_rps.ps_priv = ps; + +} + +/* PP arbiter support needed TODO */ +static void cz_apply_state_adjust_rules(struct amdgpu_device *adev, + struct amdgpu_ps *new_rps, + struct amdgpu_ps *old_rps) +{ + struct cz_ps *ps = cz_get_ps(new_rps); + struct cz_power_info *pi = cz_get_pi(adev); + struct amdgpu_clock_and_voltage_limits *limits = + &adev->pm.dpm.dyn_state.max_clock_voltage_on_ac; + /* 10kHz memory clock */ + uint32_t mclk = 0; + + ps->force_high = false; + ps->need_dfs_bypass = true; + pi->video_start = new_rps->dclk || new_rps->vclk || + new_rps->evclk || new_rps->ecclk; + + if ((new_rps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) == + ATOM_PPLIB_CLASSIFICATION_UI_BATTERY) + pi->battery_state = true; + else + pi->battery_state = false; + + if (pi->caps_stable_power_state) + mclk = limits->mclk; + + if (mclk > pi->sys_info.nbp_memory_clock[CZ_NUM_NBPMEMORY_CLOCK - 1]) + ps->force_high = true; + +} + +static int cz_dpm_enable(struct amdgpu_device *adev) +{ + int ret = 0; + + /* renable will hang up SMU, so check first */ + if (cz_check_for_dpm_enabled(adev)) + return -EINVAL; + + cz_program_voting_clients(adev); + + ret = cz_start_dpm(adev); + if (ret) { + DRM_ERROR("Carrizo DPM enable failed\n"); + return -EINVAL; + } + + ret = cz_program_bootup_state(adev); + if (ret) { + DRM_ERROR("Carrizo bootup state program failed\n"); + return -EINVAL; + } + + ret = cz_enable_didt(adev, true); + if (ret) { + DRM_ERROR("Carrizo enable di/dt failed\n"); + return -EINVAL; + } + + cz_reset_acp_boot_level(adev); + + cz_update_current_ps(adev, adev->pm.dpm.boot_ps); + + return 0; +} + +static int cz_dpm_hw_init(struct amdgpu_device *adev) +{ + int ret; + + if (!amdgpu_dpm) + return 0; + + mutex_lock(&adev->pm.mutex); + + /* init smc in dpm hw init */ + ret = cz_smu_init(adev); + if (ret) { + DRM_ERROR("amdgpu: smc initialization failed\n"); + mutex_unlock(&adev->pm.mutex); + return ret; + } + + /* do the actual fw loading */ + ret = cz_smu_start(adev); + if (ret) { + DRM_ERROR("amdgpu: smc start failed\n"); + mutex_unlock(&adev->pm.mutex); + return ret; + } + + /* cz dpm setup asic */ + cz_dpm_setup_asic(adev); + + /* cz dpm enable */ + ret = cz_dpm_enable(adev); + if (ret) + adev->pm.dpm_enabled = false; + else + adev->pm.dpm_enabled = true; + + mutex_unlock(&adev->pm.mutex); + + return 0; +} + +static int cz_dpm_disable(struct amdgpu_device *adev) +{ + int ret = 0; + + if (!cz_check_for_dpm_enabled(adev)) + return -EINVAL; + + ret = cz_enable_didt(adev, false); + if (ret) { + DRM_ERROR("Carrizo disable di/dt failed\n"); + return -EINVAL; + } + + cz_clear_voting_clients(adev); + cz_stop_dpm(adev); + cz_update_current_ps(adev, adev->pm.dpm.boot_ps); + + return 0; +} + +static int cz_dpm_hw_fini(struct amdgpu_device *adev) +{ + int ret = 0; + + mutex_lock(&adev->pm.mutex); + + cz_smu_fini(adev); + + if (adev->pm.dpm_enabled) { + ret = cz_dpm_disable(adev); + if (ret) + return -EINVAL; + + adev->pm.dpm.current_ps = + adev->pm.dpm.requested_ps = + adev->pm.dpm.boot_ps; + } + + adev->pm.dpm_enabled = false; + + mutex_unlock(&adev->pm.mutex); + + return 0; +} + +static int cz_dpm_suspend(struct amdgpu_device *adev) +{ + int ret = 0; + + if (adev->pm.dpm_enabled) { + mutex_lock(&adev->pm.mutex); + + ret = cz_dpm_disable(adev); + if (ret) + return -EINVAL; + + adev->pm.dpm.current_ps = + adev->pm.dpm.requested_ps = + adev->pm.dpm.boot_ps; + + mutex_unlock(&adev->pm.mutex); + } + + return 0; +} + +static int cz_dpm_resume(struct amdgpu_device *adev) +{ + int ret = 0; + + mutex_lock(&adev->pm.mutex); + ret = cz_smu_init(adev); + if (ret) { + DRM_ERROR("amdgpu: smc resume failed\n"); + mutex_unlock(&adev->pm.mutex); + return ret; + } + + /* do the actual fw loading */ + ret = cz_smu_start(adev); + if (ret) { + DRM_ERROR("amdgpu: smc start failed\n"); + mutex_unlock(&adev->pm.mutex); + return ret; + } + + /* cz dpm setup asic */ + cz_dpm_setup_asic(adev); + + /* cz dpm enable */ + ret = cz_dpm_enable(adev); + if (ret) + adev->pm.dpm_enabled = false; + else + adev->pm.dpm_enabled = true; + + mutex_unlock(&adev->pm.mutex); + /* upon resume, re-compute the clocks */ + if (adev->pm.dpm_enabled) + amdgpu_pm_compute_clocks(adev); + + return 0; +} + +static int cz_dpm_set_clockgating_state(struct amdgpu_device *adev, + enum amdgpu_clockgating_state state) +{ + return 0; +} + +static int cz_dpm_set_powergating_state(struct amdgpu_device *adev, + enum amdgpu_powergating_state state) +{ + return 0; +} + +/* borrowed from KV, need future unify */ +static int cz_dpm_get_temperature(struct amdgpu_device *adev) +{ + int actual_temp = 0; + uint32_t temp = RREG32_SMC(0xC0300E0C); + + if (temp) + actual_temp = 1000 * ((temp / 8) - 49); + + return actual_temp; +} + +static int cz_dpm_pre_set_power_state(struct amdgpu_device *adev) +{ + struct cz_power_info *pi = cz_get_pi(adev); + struct amdgpu_ps requested_ps = *adev->pm.dpm.requested_ps; + struct amdgpu_ps *new_ps = &requested_ps; + + cz_update_requested_ps(adev, new_ps); + cz_apply_state_adjust_rules(adev, &pi->requested_rps, + &pi->current_rps); + + return 0; +} + +static int cz_dpm_update_sclk_limit(struct amdgpu_device *adev) +{ + struct cz_power_info *pi = cz_get_pi(adev); + struct amdgpu_clock_and_voltage_limits *limits = + &adev->pm.dpm.dyn_state.max_clock_voltage_on_ac; + uint32_t clock, stable_ps_clock = 0; + + clock = pi->sclk_dpm.soft_min_clk; + + if (pi->caps_stable_power_state) { + stable_ps_clock = limits->sclk * 75 / 100; + if (clock < stable_ps_clock) + clock = stable_ps_clock; + } + + if (clock != pi->sclk_dpm.soft_min_clk) { + pi->sclk_dpm.soft_min_clk = clock; + cz_send_msg_to_smc_with_parameter(adev, + PPSMC_MSG_SetSclkSoftMin, + cz_get_sclk_level(adev, clock, + PPSMC_MSG_SetSclkSoftMin)); + } + + if (pi->caps_stable_power_state && + pi->sclk_dpm.soft_max_clk != clock) { + pi->sclk_dpm.soft_max_clk = clock; + cz_send_msg_to_smc_with_parameter(adev, + PPSMC_MSG_SetSclkSoftMax, + cz_get_sclk_level(adev, clock, + PPSMC_MSG_SetSclkSoftMax)); + } else { + cz_send_msg_to_smc_with_parameter(adev, + PPSMC_MSG_SetSclkSoftMax, + cz_get_sclk_level(adev, + pi->sclk_dpm.soft_max_clk, + PPSMC_MSG_SetSclkSoftMax)); + } + + return 0; +} + +static int cz_dpm_set_deep_sleep_sclk_threshold(struct amdgpu_device *adev) +{ + int ret = 0; + struct cz_power_info *pi = cz_get_pi(adev); + + if (pi->caps_sclk_ds) { + cz_send_msg_to_smc_with_parameter(adev, + PPSMC_MSG_SetMinDeepSleepSclk, + CZ_MIN_DEEP_SLEEP_SCLK); + } + + return ret; +} + +/* ?? without dal support, is this still needed in setpowerstate list*/ +static int cz_dpm_set_watermark_threshold(struct amdgpu_device *adev) +{ + int ret = 0; + struct cz_power_info *pi = cz_get_pi(adev); + + cz_send_msg_to_smc_with_parameter(adev, + PPSMC_MSG_SetWatermarkFrequency, + pi->sclk_dpm.soft_max_clk); + + return ret; +} + +static int cz_dpm_enable_nbdpm(struct amdgpu_device *adev) +{ + int ret = 0; + struct cz_power_info *pi = cz_get_pi(adev); + + /* also depend on dal NBPStateDisableRequired */ + if (pi->nb_dpm_enabled_by_driver && !pi->nb_dpm_enabled) { + ret = cz_send_msg_to_smc_with_parameter(adev, + PPSMC_MSG_EnableAllSmuFeatures, + NB_DPM_MASK); + if (ret) { + DRM_ERROR("amdgpu: nb dpm enable failed\n"); + return ret; + } + pi->nb_dpm_enabled = true; + } + + return ret; +} + +static void cz_dpm_nbdpm_lm_pstate_enable(struct amdgpu_device *adev, + bool enable) +{ + if (enable) + cz_send_msg_to_smc(adev, PPSMC_MSG_EnableLowMemoryPstate); + else + cz_send_msg_to_smc(adev, PPSMC_MSG_DisableLowMemoryPstate); + +} + +static int cz_dpm_update_low_memory_pstate(struct amdgpu_device *adev) +{ + int ret = 0; + struct cz_power_info *pi = cz_get_pi(adev); + struct cz_ps *ps = &pi->requested_ps; + + if (pi->sys_info.nb_dpm_enable) { + if (ps->force_high) + cz_dpm_nbdpm_lm_pstate_enable(adev, true); + else + cz_dpm_nbdpm_lm_pstate_enable(adev, false); + } + + return ret; +} + +/* with dpm enabled */ +static int cz_dpm_set_power_state(struct amdgpu_device *adev) +{ + int ret = 0; + + cz_dpm_update_sclk_limit(adev); + cz_dpm_set_deep_sleep_sclk_threshold(adev); + cz_dpm_set_watermark_threshold(adev); + cz_dpm_enable_nbdpm(adev); + cz_dpm_update_low_memory_pstate(adev); + + return ret; +} + +static void cz_dpm_post_set_power_state(struct amdgpu_device *adev) +{ + struct cz_power_info *pi = cz_get_pi(adev); + struct amdgpu_ps *ps = &pi->requested_rps; + + cz_update_current_ps(adev, ps); + +} + +static int cz_dpm_force_highest(struct amdgpu_device *adev) +{ + struct cz_power_info *pi = cz_get_pi(adev); + int ret = 0; + + if (pi->sclk_dpm.soft_min_clk != pi->sclk_dpm.soft_max_clk) { + pi->sclk_dpm.soft_min_clk = + pi->sclk_dpm.soft_max_clk; + ret = cz_send_msg_to_smc_with_parameter(adev, + PPSMC_MSG_SetSclkSoftMin, + cz_get_sclk_level(adev, + pi->sclk_dpm.soft_min_clk, + PPSMC_MSG_SetSclkSoftMin)); + if (ret) + return ret; + } + + return ret; +} + +static int cz_dpm_force_lowest(struct amdgpu_device *adev) +{ + struct cz_power_info *pi = cz_get_pi(adev); + int ret = 0; + + if (pi->sclk_dpm.soft_max_clk != pi->sclk_dpm.soft_min_clk) { + pi->sclk_dpm.soft_max_clk = pi->sclk_dpm.soft_min_clk; + ret = cz_send_msg_to_smc_with_parameter(adev, + PPSMC_MSG_SetSclkSoftMax, + cz_get_sclk_level(adev, + pi->sclk_dpm.soft_max_clk, + PPSMC_MSG_SetSclkSoftMax)); + if (ret) + return ret; + } + + return ret; +} + +static uint32_t cz_dpm_get_max_sclk_level(struct amdgpu_device *adev) +{ + struct cz_power_info *pi = cz_get_pi(adev); + + if (!pi->max_sclk_level) { + cz_send_msg_to_smc(adev, PPSMC_MSG_GetMaxSclkLevel); + pi->max_sclk_level = cz_get_argument(adev) + 1; + } + + if (pi->max_sclk_level > CZ_MAX_HARDWARE_POWERLEVELS) { + DRM_ERROR("Invalid max sclk level!\n"); + return -EINVAL; + } + + return pi->max_sclk_level; +} + +static int cz_dpm_unforce_dpm_levels(struct amdgpu_device *adev) +{ + struct cz_power_info *pi = cz_get_pi(adev); + struct amdgpu_clock_voltage_dependency_table *dep_table = + &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk; + uint32_t level = 0; + int ret = 0; + + pi->sclk_dpm.soft_min_clk = dep_table->entries[0].clk; + level = cz_dpm_get_max_sclk_level(adev) - 1; + if (level < dep_table->count) + pi->sclk_dpm.soft_max_clk = dep_table->entries[level].clk; + else + pi->sclk_dpm.soft_max_clk = + dep_table->entries[dep_table->count - 1].clk; + + /* get min/max sclk soft value + * notify SMU to execute */ + ret = cz_send_msg_to_smc_with_parameter(adev, + PPSMC_MSG_SetSclkSoftMin, + cz_get_sclk_level(adev, + pi->sclk_dpm.soft_min_clk, + PPSMC_MSG_SetSclkSoftMin)); + if (ret) + return ret; + + ret = cz_send_msg_to_smc_with_parameter(adev, + PPSMC_MSG_SetSclkSoftMax, + cz_get_sclk_level(adev, + pi->sclk_dpm.soft_max_clk, + PPSMC_MSG_SetSclkSoftMax)); + if (ret) + return ret; + + DRM_INFO("DPM unforce state min=%d, max=%d.\n", + pi->sclk_dpm.soft_min_clk, + pi->sclk_dpm.soft_max_clk); + + return 0; +} + +static int cz_dpm_force_dpm_level(struct amdgpu_device *adev, + enum amdgpu_dpm_forced_level level) +{ + int ret = 0; + + switch (level) { + case AMDGPU_DPM_FORCED_LEVEL_HIGH: + ret = cz_dpm_force_highest(adev); + if (ret) + return ret; + break; + case AMDGPU_DPM_FORCED_LEVEL_LOW: + ret = cz_dpm_force_lowest(adev); + if (ret) + return ret; + break; + case AMDGPU_DPM_FORCED_LEVEL_AUTO: + ret = cz_dpm_unforce_dpm_levels(adev); + if (ret) + return ret; + break; + default: + break; + } + + return ret; +} + +/* fix me, display configuration change lists here + * mostly dal related*/ +static void cz_dpm_display_configuration_changed(struct amdgpu_device *adev) +{ +} + +static uint32_t cz_dpm_get_sclk(struct amdgpu_device *adev, bool low) +{ + struct cz_power_info *pi = cz_get_pi(adev); + struct cz_ps *requested_state = cz_get_ps(&pi->requested_rps); + + if (low) + return requested_state->levels[0].sclk; + else + return requested_state->levels[requested_state->num_levels - 1].sclk; + +} + +static uint32_t cz_dpm_get_mclk(struct amdgpu_device *adev, bool low) +{ + struct cz_power_info *pi = cz_get_pi(adev); + + return pi->sys_info.bootup_uma_clk; +} + +const struct amdgpu_ip_funcs cz_dpm_ip_funcs = { + .early_init = cz_dpm_early_init, + .late_init = NULL, + .sw_init = cz_dpm_sw_init, + .sw_fini = cz_dpm_sw_fini, + .hw_init = cz_dpm_hw_init, + .hw_fini = cz_dpm_hw_fini, + .suspend = cz_dpm_suspend, + .resume = cz_dpm_resume, + .is_idle = NULL, + .wait_for_idle = NULL, + .soft_reset = NULL, + .print_status = NULL, + .set_clockgating_state = cz_dpm_set_clockgating_state, + .set_powergating_state = cz_dpm_set_powergating_state, +}; + +static const struct amdgpu_dpm_funcs cz_dpm_funcs = { + .get_temperature = cz_dpm_get_temperature, + .pre_set_power_state = cz_dpm_pre_set_power_state, + .set_power_state = cz_dpm_set_power_state, + .post_set_power_state = cz_dpm_post_set_power_state, + .display_configuration_changed = cz_dpm_display_configuration_changed, + .get_sclk = cz_dpm_get_sclk, + .get_mclk = cz_dpm_get_mclk, + .print_power_state = cz_dpm_print_power_state, + .debugfs_print_current_performance_level = + cz_dpm_debugfs_print_current_performance_level, + .force_performance_level = cz_dpm_force_dpm_level, + .vblank_too_short = NULL, + .powergate_uvd = NULL, +}; + +static void cz_dpm_set_funcs(struct amdgpu_device *adev) +{ + if (NULL == adev->pm.funcs) + adev->pm.funcs = &cz_dpm_funcs; +} diff --git a/drivers/gpu/drm/amd/amdgpu/cz_dpm.h b/drivers/gpu/drm/amd/amdgpu/cz_dpm.h new file mode 100644 index 000000000000..ed6449de5dc5 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/cz_dpm.h @@ -0,0 +1,235 @@ +/* + * Copyright 2014 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef __CZ_DPM_H__ +#define __CZ_DPM_H__ + +#include "smu8_fusion.h" + +#define CZ_AT_DFLT 30 +#define CZ_NUM_NBPSTATES 4 +#define CZ_NUM_NBPMEMORY_CLOCK 2 +#define CZ_MAX_HARDWARE_POWERLEVELS 8 +#define CZ_MAX_DISPLAY_CLOCK_LEVEL 8 +#define CZ_MAX_DISPLAYPHY_IDS 10 + +#define PPCZ_VOTINGRIGHTSCLIENTS_DFLT0 0x3FFFC102 + +#define SMC_RAM_END 0x40000 + +#define DPMFlags_SCLK_Enabled 0x00000001 +#define DPMFlags_UVD_Enabled 0x00000002 +#define DPMFlags_VCE_Enabled 0x00000004 +#define DPMFlags_ACP_Enabled 0x00000008 +#define DPMFlags_ForceHighestValid 0x40000000 +#define DPMFlags_Debug 0x80000000 + +/* Do not change the following, it is also defined in SMU8.h */ +#define SMU_EnabledFeatureScoreboard_AcpDpmOn 0x00000001 +#define SMU_EnabledFeatureScoreboard_SclkDpmOn 0x00100000 +#define SMU_EnabledFeatureScoreboard_UvdDpmOn 0x00800000 +#define SMU_EnabledFeatureScoreboard_VceDpmOn 0x01000000 + +/* temporary solution to SetMinDeepSleepSclk + * should indicate by display adaptor + * 10k Hz unit*/ +#define CZ_MIN_DEEP_SLEEP_SCLK 800 + +enum cz_pt_config_reg_type { + CZ_CONFIGREG_MMR = 0, + CZ_CONFIGREG_SMC_IND, + CZ_CONFIGREG_DIDT_IND, + CZ_CONFIGREG_CACHE, + CZ_CONFIGREG_MAX +}; + +struct cz_pt_config_reg { + uint32_t offset; + uint32_t mask; + uint32_t shift; + uint32_t value; + enum cz_pt_config_reg_type type; +}; + +struct cz_dpm_entry { + uint32_t soft_min_clk; + uint32_t hard_min_clk; + uint32_t soft_max_clk; + uint32_t hard_max_clk; +}; + +struct cz_pl { + uint32_t sclk; + uint8_t vddc_index; + uint8_t ds_divider_index; + uint8_t ss_divider_index; + uint8_t allow_gnb_slow; + uint8_t force_nbp_state; + uint8_t display_wm; + uint8_t vce_wm; +}; + +struct cz_ps { + struct cz_pl levels[CZ_MAX_HARDWARE_POWERLEVELS]; + uint32_t num_levels; + bool need_dfs_bypass; + uint8_t dpm0_pg_nb_ps_lo; + uint8_t dpm0_pg_nb_ps_hi; + uint8_t dpmx_nb_ps_lo; + uint8_t dpmx_nb_ps_hi; + bool force_high; +}; + +struct cz_displayphy_entry { + uint8_t phy_present; + uint8_t active_lane_mapping; + uint8_t display_conf_type; + uint8_t num_active_lanes; +}; + +struct cz_displayphy_info { + bool phy_access_initialized; + struct cz_displayphy_entry entries[CZ_MAX_DISPLAYPHY_IDS]; +}; + +struct cz_sys_info { + uint32_t bootup_uma_clk; + uint32_t bootup_sclk; + uint32_t dentist_vco_freq; + uint32_t nb_dpm_enable; + uint32_t nbp_memory_clock[CZ_NUM_NBPMEMORY_CLOCK]; + uint32_t nbp_n_clock[CZ_NUM_NBPSTATES]; + uint8_t nbp_voltage_index[CZ_NUM_NBPSTATES]; + uint32_t display_clock[CZ_MAX_DISPLAY_CLOCK_LEVEL]; + uint16_t bootup_nb_voltage_index; + uint8_t htc_tmp_lmt; + uint8_t htc_hyst_lmt; + uint32_t uma_channel_number; +}; + +struct cz_power_info { + uint32_t active_target[CZ_MAX_HARDWARE_POWERLEVELS]; + struct cz_sys_info sys_info; + struct cz_pl boot_pl; + bool disable_nb_ps3_in_battery; + bool battery_state; + uint32_t lowest_valid; + uint32_t highest_valid; + uint16_t high_voltage_threshold; + /* smc offsets */ + uint32_t sram_end; + uint32_t dpm_table_start; + uint32_t soft_regs_start; + /* dpm SMU tables */ + uint8_t uvd_level_count; + uint8_t vce_level_count; + uint8_t acp_level_count; + uint32_t fps_high_threshold; + uint32_t fps_low_threshold; + /* dpm table */ + uint32_t dpm_flags; + struct cz_dpm_entry sclk_dpm; + struct cz_dpm_entry uvd_dpm; + struct cz_dpm_entry vce_dpm; + struct cz_dpm_entry acp_dpm; + + uint8_t uvd_boot_level; + uint8_t uvd_interval; + uint8_t vce_boot_level; + uint8_t vce_interval; + uint8_t acp_boot_level; + uint8_t acp_interval; + + uint8_t graphics_boot_level; + uint8_t graphics_interval; + uint8_t graphics_therm_throttle_enable; + uint8_t graphics_voltage_change_enable; + uint8_t graphics_clk_slow_enable; + uint8_t graphics_clk_slow_divider; + + uint32_t low_sclk_interrupt_threshold; + bool uvd_power_gated; + bool vce_power_gated; + bool acp_power_gated; + + uint32_t active_process_mask; + + uint32_t mgcg_cgtt_local0; + uint32_t mgcg_cgtt_local1; + uint32_t clock_slow_down_step; + uint32_t skip_clock_slow_down; + bool enable_nb_ps_policy; + uint32_t voting_clients; + uint32_t voltage_drop_threshold; + uint32_t gfx_pg_threshold; + uint32_t max_sclk_level; + /* flags */ + bool didt_enabled; + bool video_start; + bool cac_enabled; + bool bapm_enabled; + bool nb_dpm_enabled_by_driver; + bool nb_dpm_enabled; + bool auto_thermal_throttling_enabled; + bool dpm_enabled; + bool need_pptable_upload; + /* caps */ + bool caps_cac; + bool caps_power_containment; + bool caps_sq_ramping; + bool caps_db_ramping; + bool caps_td_ramping; + bool caps_tcp_ramping; + bool caps_sclk_throttle_low_notification; + bool caps_fps; + bool caps_uvd_dpm; + bool caps_uvd_pg; + bool caps_vce_dpm; + bool caps_vce_pg; + bool caps_acp_dpm; + bool caps_acp_pg; + bool caps_stable_power_state; + bool caps_enable_dfs_bypass; + bool caps_sclk_ds; + bool caps_voltage_island; + /* power state */ + struct amdgpu_ps current_rps; + struct cz_ps current_ps; + struct amdgpu_ps requested_rps; + struct cz_ps requested_ps; + + bool uvd_power_down; + bool vce_power_down; + bool acp_power_down; +}; + +/* cz_smc.c */ +uint32_t cz_get_argument(struct amdgpu_device *adev); +int cz_send_msg_to_smc(struct amdgpu_device *adev, uint16_t msg); +int cz_send_msg_to_smc_with_parameter(struct amdgpu_device *adev, + uint16_t msg, uint32_t parameter); +int cz_read_smc_sram_dword(struct amdgpu_device *adev, + uint32_t smc_address, uint32_t *value, uint32_t limit); +int cz_smu_upload_pptable(struct amdgpu_device *adev); +int cz_smu_download_pptable(struct amdgpu_device *adev, void **table); +#endif diff --git a/drivers/gpu/drm/amd/amdgpu/cz_ih.c b/drivers/gpu/drm/amd/amdgpu/cz_ih.c new file mode 100644 index 000000000000..80d508e64a86 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/cz_ih.c @@ -0,0 +1,435 @@ +/* + * Copyright 2014 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#include "drmP.h" +#include "amdgpu.h" +#include "amdgpu_ih.h" +#include "vid.h" + +#include "oss/oss_3_0_1_d.h" +#include "oss/oss_3_0_1_sh_mask.h" + +#include "bif/bif_5_1_d.h" +#include "bif/bif_5_1_sh_mask.h" + +/* + * Interrupts + * Starting with r6xx, interrupts are handled via a ring buffer. + * Ring buffers are areas of GPU accessible memory that the GPU + * writes interrupt vectors into and the host reads vectors out of. + * There is a rptr (read pointer) that determines where the + * host is currently reading, and a wptr (write pointer) + * which determines where the GPU has written. When the + * pointers are equal, the ring is idle. When the GPU + * writes vectors to the ring buffer, it increments the + * wptr. When there is an interrupt, the host then starts + * fetching commands and processing them until the pointers are + * equal again at which point it updates the rptr. + */ + +static void cz_ih_set_interrupt_funcs(struct amdgpu_device *adev); + +/** + * cz_ih_enable_interrupts - Enable the interrupt ring buffer + * + * @adev: amdgpu_device pointer + * + * Enable the interrupt ring buffer (VI). + */ +static void cz_ih_enable_interrupts(struct amdgpu_device *adev) +{ + u32 ih_cntl = RREG32(mmIH_CNTL); + u32 ih_rb_cntl = RREG32(mmIH_RB_CNTL); + + ih_cntl = REG_SET_FIELD(ih_cntl, IH_CNTL, ENABLE_INTR, 1); + ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, RB_ENABLE, 1); + WREG32(mmIH_CNTL, ih_cntl); + WREG32(mmIH_RB_CNTL, ih_rb_cntl); + adev->irq.ih.enabled = true; +} + +/** + * cz_ih_disable_interrupts - Disable the interrupt ring buffer + * + * @adev: amdgpu_device pointer + * + * Disable the interrupt ring buffer (VI). + */ +static void cz_ih_disable_interrupts(struct amdgpu_device *adev) +{ + u32 ih_rb_cntl = RREG32(mmIH_RB_CNTL); + u32 ih_cntl = RREG32(mmIH_CNTL); + + ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, RB_ENABLE, 0); + ih_cntl = REG_SET_FIELD(ih_cntl, IH_CNTL, ENABLE_INTR, 0); + WREG32(mmIH_RB_CNTL, ih_rb_cntl); + WREG32(mmIH_CNTL, ih_cntl); + /* set rptr, wptr to 0 */ + WREG32(mmIH_RB_RPTR, 0); + WREG32(mmIH_RB_WPTR, 0); + adev->irq.ih.enabled = false; + adev->irq.ih.rptr = 0; +} + +/** + * cz_ih_irq_init - init and enable the interrupt ring + * + * @adev: amdgpu_device pointer + * + * Allocate a ring buffer for the interrupt controller, + * enable the RLC, disable interrupts, enable the IH + * ring buffer and enable it (VI). + * Called at device load and reume. + * Returns 0 for success, errors for failure. + */ +static int cz_ih_irq_init(struct amdgpu_device *adev) +{ + int ret = 0; + int rb_bufsz; + u32 interrupt_cntl, ih_cntl, ih_rb_cntl; + u64 wptr_off; + + /* disable irqs */ + cz_ih_disable_interrupts(adev); + + /* setup interrupt control */ + WREG32(mmINTERRUPT_CNTL2, adev->dummy_page.addr >> 8); + interrupt_cntl = RREG32(mmINTERRUPT_CNTL); + /* INTERRUPT_CNTL__IH_DUMMY_RD_OVERRIDE_MASK=0 - dummy read disabled with msi, enabled without msi + * INTERRUPT_CNTL__IH_DUMMY_RD_OVERRIDE_MASK=1 - dummy read controlled by IH_DUMMY_RD_EN + */ + interrupt_cntl = REG_SET_FIELD(interrupt_cntl, INTERRUPT_CNTL, IH_DUMMY_RD_OVERRIDE, 0); + /* INTERRUPT_CNTL__IH_REQ_NONSNOOP_EN_MASK=1 if ring is in non-cacheable memory, e.g., vram */ + interrupt_cntl = REG_SET_FIELD(interrupt_cntl, INTERRUPT_CNTL, IH_REQ_NONSNOOP_EN, 0); + WREG32(mmINTERRUPT_CNTL, interrupt_cntl); + + /* Ring Buffer base. [39:8] of 40-bit address of the beginning of the ring buffer*/ + WREG32(mmIH_RB_BASE, adev->irq.ih.gpu_addr >> 8); + + rb_bufsz = order_base_2(adev->irq.ih.ring_size / 4); + ih_rb_cntl = REG_SET_FIELD(0, IH_RB_CNTL, WPTR_OVERFLOW_ENABLE, 1); + ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 1); + ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, RB_SIZE, rb_bufsz); + + /* Ring Buffer write pointer writeback. If enabled, IH_RB_WPTR register value is written to memory */ + ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, WPTR_WRITEBACK_ENABLE, 1); + + /* set the writeback address whether it's enabled or not */ + wptr_off = adev->wb.gpu_addr + (adev->irq.ih.wptr_offs * 4); + WREG32(mmIH_RB_WPTR_ADDR_LO, lower_32_bits(wptr_off)); + WREG32(mmIH_RB_WPTR_ADDR_HI, upper_32_bits(wptr_off) & 0xFF); + + WREG32(mmIH_RB_CNTL, ih_rb_cntl); + + /* set rptr, wptr to 0 */ + WREG32(mmIH_RB_RPTR, 0); + WREG32(mmIH_RB_WPTR, 0); + + /* Default settings for IH_CNTL (disabled at first) */ + ih_cntl = RREG32(mmIH_CNTL); + ih_cntl = REG_SET_FIELD(ih_cntl, IH_CNTL, MC_VMID, 0); + + if (adev->irq.msi_enabled) + ih_cntl = REG_SET_FIELD(ih_cntl, IH_CNTL, RPTR_REARM, 1); + WREG32(mmIH_CNTL, ih_cntl); + + pci_set_master(adev->pdev); + + /* enable interrupts */ + cz_ih_enable_interrupts(adev); + + return ret; +} + +/** + * cz_ih_irq_disable - disable interrupts + * + * @adev: amdgpu_device pointer + * + * Disable interrupts on the hw (VI). + */ +static void cz_ih_irq_disable(struct amdgpu_device *adev) +{ + cz_ih_disable_interrupts(adev); + + /* Wait and acknowledge irq */ + mdelay(1); +} + +/** + * cz_ih_get_wptr - get the IH ring buffer wptr + * + * @adev: amdgpu_device pointer + * + * Get the IH ring buffer wptr from either the register + * or the writeback memory buffer (VI). Also check for + * ring buffer overflow and deal with it. + * Used by cz_irq_process(VI). + * Returns the value of the wptr. + */ +static u32 cz_ih_get_wptr(struct amdgpu_device *adev) +{ + u32 wptr, tmp; + + wptr = le32_to_cpu(adev->wb.wb[adev->irq.ih.wptr_offs]); + + if (REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW)) { + wptr = REG_SET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW, 0); + /* When a ring buffer overflow happen start parsing interrupt + * from the last not overwritten vector (wptr + 16). Hopefully + * this should allow us to catchup. + */ + dev_warn(adev->dev, "IH ring buffer overflow (0x%08X, 0x%08X, 0x%08X)\n", + wptr, adev->irq.ih.rptr, (wptr + 16) & adev->irq.ih.ptr_mask); + adev->irq.ih.rptr = (wptr + 16) & adev->irq.ih.ptr_mask; + tmp = RREG32(mmIH_RB_CNTL); + tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 1); + WREG32(mmIH_RB_CNTL, tmp); + } + return (wptr & adev->irq.ih.ptr_mask); +} + +/** + * cz_ih_decode_iv - decode an interrupt vector + * + * @adev: amdgpu_device pointer + * + * Decodes the interrupt vector at the current rptr + * position and also advance the position. + */ +static void cz_ih_decode_iv(struct amdgpu_device *adev, + struct amdgpu_iv_entry *entry) +{ + /* wptr/rptr are in bytes! */ + u32 ring_index = adev->irq.ih.rptr >> 2; + uint32_t dw[4]; + + dw[0] = le32_to_cpu(adev->irq.ih.ring[ring_index + 0]); + dw[1] = le32_to_cpu(adev->irq.ih.ring[ring_index + 1]); + dw[2] = le32_to_cpu(adev->irq.ih.ring[ring_index + 2]); + dw[3] = le32_to_cpu(adev->irq.ih.ring[ring_index + 3]); + + entry->src_id = dw[0] & 0xff; + entry->src_data = dw[1] & 0xfffffff; + entry->ring_id = dw[2] & 0xff; + entry->vm_id = (dw[2] >> 8) & 0xff; + entry->pas_id = (dw[2] >> 16) & 0xffff; + + /* wptr/rptr are in bytes! */ + adev->irq.ih.rptr += 16; +} + +/** + * cz_ih_set_rptr - set the IH ring buffer rptr + * + * @adev: amdgpu_device pointer + * + * Set the IH ring buffer rptr. + */ +static void cz_ih_set_rptr(struct amdgpu_device *adev) +{ + WREG32(mmIH_RB_RPTR, adev->irq.ih.rptr); +} + +static int cz_ih_early_init(struct amdgpu_device *adev) +{ + cz_ih_set_interrupt_funcs(adev); + return 0; +} + +static int cz_ih_sw_init(struct amdgpu_device *adev) +{ + int r; + + r = amdgpu_ih_ring_init(adev, 64 * 1024, false); + if (r) + return r; + + r = amdgpu_irq_init(adev); + + return r; +} + +static int cz_ih_sw_fini(struct amdgpu_device *adev) +{ + amdgpu_irq_fini(adev); + amdgpu_ih_ring_fini(adev); + + return 0; +} + +static int cz_ih_hw_init(struct amdgpu_device *adev) +{ + int r; + + r = cz_ih_irq_init(adev); + if (r) + return r; + + return 0; +} + +static int cz_ih_hw_fini(struct amdgpu_device *adev) +{ + cz_ih_irq_disable(adev); + + return 0; +} + +static int cz_ih_suspend(struct amdgpu_device *adev) +{ + return cz_ih_hw_fini(adev); +} + +static int cz_ih_resume(struct amdgpu_device *adev) +{ + return cz_ih_hw_init(adev); +} + +static bool cz_ih_is_idle(struct amdgpu_device *adev) +{ + u32 tmp = RREG32(mmSRBM_STATUS); + + if (REG_GET_FIELD(tmp, SRBM_STATUS, IH_BUSY)) + return false; + + return true; +} + +static int cz_ih_wait_for_idle(struct amdgpu_device *adev) +{ + unsigned i; + u32 tmp; + + for (i = 0; i < adev->usec_timeout; i++) { + /* read MC_STATUS */ + tmp = RREG32(mmSRBM_STATUS); + if (!REG_GET_FIELD(tmp, SRBM_STATUS, IH_BUSY)) + return 0; + udelay(1); + } + return -ETIMEDOUT; +} + +static void cz_ih_print_status(struct amdgpu_device *adev) +{ + dev_info(adev->dev, "CZ IH registers\n"); + dev_info(adev->dev, " SRBM_STATUS=0x%08X\n", + RREG32(mmSRBM_STATUS)); + dev_info(adev->dev, " SRBM_STATUS2=0x%08X\n", + RREG32(mmSRBM_STATUS2)); + dev_info(adev->dev, " INTERRUPT_CNTL=0x%08X\n", + RREG32(mmINTERRUPT_CNTL)); + dev_info(adev->dev, " INTERRUPT_CNTL2=0x%08X\n", + RREG32(mmINTERRUPT_CNTL2)); + dev_info(adev->dev, " IH_CNTL=0x%08X\n", + RREG32(mmIH_CNTL)); + dev_info(adev->dev, " IH_RB_CNTL=0x%08X\n", + RREG32(mmIH_RB_CNTL)); + dev_info(adev->dev, " IH_RB_BASE=0x%08X\n", + RREG32(mmIH_RB_BASE)); + dev_info(adev->dev, " IH_RB_WPTR_ADDR_LO=0x%08X\n", + RREG32(mmIH_RB_WPTR_ADDR_LO)); + dev_info(adev->dev, " IH_RB_WPTR_ADDR_HI=0x%08X\n", + RREG32(mmIH_RB_WPTR_ADDR_HI)); + dev_info(adev->dev, " IH_RB_RPTR=0x%08X\n", + RREG32(mmIH_RB_RPTR)); + dev_info(adev->dev, " IH_RB_WPTR=0x%08X\n", + RREG32(mmIH_RB_WPTR)); +} + +static int cz_ih_soft_reset(struct amdgpu_device *adev) +{ + u32 srbm_soft_reset = 0; + u32 tmp = RREG32(mmSRBM_STATUS); + + if (tmp & SRBM_STATUS__IH_BUSY_MASK) + srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, + SOFT_RESET_IH, 1); + + if (srbm_soft_reset) { + cz_ih_print_status(adev); + + tmp = RREG32(mmSRBM_SOFT_RESET); + tmp |= srbm_soft_reset; + dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp); + WREG32(mmSRBM_SOFT_RESET, tmp); + tmp = RREG32(mmSRBM_SOFT_RESET); + + udelay(50); + + tmp &= ~srbm_soft_reset; + WREG32(mmSRBM_SOFT_RESET, tmp); + tmp = RREG32(mmSRBM_SOFT_RESET); + + /* Wait a little for things to settle down */ + udelay(50); + + cz_ih_print_status(adev); + } + + return 0; +} + +static int cz_ih_set_clockgating_state(struct amdgpu_device *adev, + enum amdgpu_clockgating_state state) +{ + // TODO + return 0; +} + +static int cz_ih_set_powergating_state(struct amdgpu_device *adev, + enum amdgpu_powergating_state state) +{ + // TODO + return 0; +} + +const struct amdgpu_ip_funcs cz_ih_ip_funcs = { + .early_init = cz_ih_early_init, + .late_init = NULL, + .sw_init = cz_ih_sw_init, + .sw_fini = cz_ih_sw_fini, + .hw_init = cz_ih_hw_init, + .hw_fini = cz_ih_hw_fini, + .suspend = cz_ih_suspend, + .resume = cz_ih_resume, + .is_idle = cz_ih_is_idle, + .wait_for_idle = cz_ih_wait_for_idle, + .soft_reset = cz_ih_soft_reset, + .print_status = cz_ih_print_status, + .set_clockgating_state = cz_ih_set_clockgating_state, + .set_powergating_state = cz_ih_set_powergating_state, +}; + +static const struct amdgpu_ih_funcs cz_ih_funcs = { + .get_wptr = cz_ih_get_wptr, + .decode_iv = cz_ih_decode_iv, + .set_rptr = cz_ih_set_rptr +}; + +static void cz_ih_set_interrupt_funcs(struct amdgpu_device *adev) +{ + if (adev->irq.ih_funcs == NULL) + adev->irq.ih_funcs = &cz_ih_funcs; +} + diff --git a/drivers/gpu/drm/amd/amdgpu/cz_ih.h b/drivers/gpu/drm/amd/amdgpu/cz_ih.h new file mode 100644 index 000000000000..1bce136876ff --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/cz_ih.h @@ -0,0 +1,29 @@ +/* + * Copyright 2014 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef __CZ_IH_H__ +#define __CZ_IH_H__ + +extern const struct amdgpu_ip_funcs cz_ih_ip_funcs; + +#endif /* __CZ_IH_H__ */ diff --git a/drivers/gpu/drm/amd/amdgpu/cz_ppsmc.h b/drivers/gpu/drm/amd/amdgpu/cz_ppsmc.h new file mode 100644 index 000000000000..273616ab43db --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/cz_ppsmc.h @@ -0,0 +1,185 @@ +/* + * Copyright 2014 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef CZ_PP_SMC_H +#define CZ_PP_SMC_H + +#pragma pack(push, 1) + +/* Fan control algorithm:*/ +#define FDO_MODE_HARDWARE 0 +#define FDO_MODE_PIECE_WISE_LINEAR 1 + +enum FAN_CONTROL { + FAN_CONTROL_FUZZY, + FAN_CONTROL_TABLE +}; + +enum DPM_ARRAY { + DPM_ARRAY_HARD_MAX, + DPM_ARRAY_HARD_MIN, + DPM_ARRAY_SOFT_MAX, + DPM_ARRAY_SOFT_MIN +}; + +/* + * Return codes for driver to SMC communication. + * Leave these #define-s, enums might not be exactly 8-bits on the microcontroller. + */ +#define PPSMC_Result_OK ((uint16_t)0x01) +#define PPSMC_Result_NoMore ((uint16_t)0x02) +#define PPSMC_Result_NotNow ((uint16_t)0x03) +#define PPSMC_Result_Failed ((uint16_t)0xFF) +#define PPSMC_Result_UnknownCmd ((uint16_t)0xFE) +#define PPSMC_Result_UnknownVT ((uint16_t)0xFD) + +#define PPSMC_isERROR(x) ((uint16_t)0x80 & (x)) + +/* + * Supported driver messages + */ +#define PPSMC_MSG_Test ((uint16_t) 0x1) +#define PPSMC_MSG_GetFeatureStatus ((uint16_t) 0x2) +#define PPSMC_MSG_EnableAllSmuFeatures ((uint16_t) 0x3) +#define PPSMC_MSG_DisableAllSmuFeatures ((uint16_t) 0x4) +#define PPSMC_MSG_OptimizeBattery ((uint16_t) 0x5) +#define PPSMC_MSG_MaximizePerf ((uint16_t) 0x6) +#define PPSMC_MSG_UVDPowerOFF ((uint16_t) 0x7) +#define PPSMC_MSG_UVDPowerON ((uint16_t) 0x8) +#define PPSMC_MSG_VCEPowerOFF ((uint16_t) 0x9) +#define PPSMC_MSG_VCEPowerON ((uint16_t) 0xA) +#define PPSMC_MSG_ACPPowerOFF ((uint16_t) 0xB) +#define PPSMC_MSG_ACPPowerON ((uint16_t) 0xC) +#define PPSMC_MSG_SDMAPowerOFF ((uint16_t) 0xD) +#define PPSMC_MSG_SDMAPowerON ((uint16_t) 0xE) +#define PPSMC_MSG_XDMAPowerOFF ((uint16_t) 0xF) +#define PPSMC_MSG_XDMAPowerON ((uint16_t) 0x10) +#define PPSMC_MSG_SetMinDeepSleepSclk ((uint16_t) 0x11) +#define PPSMC_MSG_SetSclkSoftMin ((uint16_t) 0x12) +#define PPSMC_MSG_SetSclkSoftMax ((uint16_t) 0x13) +#define PPSMC_MSG_SetSclkHardMin ((uint16_t) 0x14) +#define PPSMC_MSG_SetSclkHardMax ((uint16_t) 0x15) +#define PPSMC_MSG_SetLclkSoftMin ((uint16_t) 0x16) +#define PPSMC_MSG_SetLclkSoftMax ((uint16_t) 0x17) +#define PPSMC_MSG_SetLclkHardMin ((uint16_t) 0x18) +#define PPSMC_MSG_SetLclkHardMax ((uint16_t) 0x19) +#define PPSMC_MSG_SetUvdSoftMin ((uint16_t) 0x1A) +#define PPSMC_MSG_SetUvdSoftMax ((uint16_t) 0x1B) +#define PPSMC_MSG_SetUvdHardMin ((uint16_t) 0x1C) +#define PPSMC_MSG_SetUvdHardMax ((uint16_t) 0x1D) +#define PPSMC_MSG_SetEclkSoftMin ((uint16_t) 0x1E) +#define PPSMC_MSG_SetEclkSoftMax ((uint16_t) 0x1F) +#define PPSMC_MSG_SetEclkHardMin ((uint16_t) 0x20) +#define PPSMC_MSG_SetEclkHardMax ((uint16_t) 0x21) +#define PPSMC_MSG_SetAclkSoftMin ((uint16_t) 0x22) +#define PPSMC_MSG_SetAclkSoftMax ((uint16_t) 0x23) +#define PPSMC_MSG_SetAclkHardMin ((uint16_t) 0x24) +#define PPSMC_MSG_SetAclkHardMax ((uint16_t) 0x25) +#define PPSMC_MSG_SetNclkSoftMin ((uint16_t) 0x26) +#define PPSMC_MSG_SetNclkSoftMax ((uint16_t) 0x27) +#define PPSMC_MSG_SetNclkHardMin ((uint16_t) 0x28) +#define PPSMC_MSG_SetNclkHardMax ((uint16_t) 0x29) +#define PPSMC_MSG_SetPstateSoftMin ((uint16_t) 0x2A) +#define PPSMC_MSG_SetPstateSoftMax ((uint16_t) 0x2B) +#define PPSMC_MSG_SetPstateHardMin ((uint16_t) 0x2C) +#define PPSMC_MSG_SetPstateHardMax ((uint16_t) 0x2D) +#define PPSMC_MSG_DisableLowMemoryPstate ((uint16_t) 0x2E) +#define PPSMC_MSG_EnableLowMemoryPstate ((uint16_t) 0x2F) +#define PPSMC_MSG_UcodeAddressLow ((uint16_t) 0x30) +#define PPSMC_MSG_UcodeAddressHigh ((uint16_t) 0x31) +#define PPSMC_MSG_UcodeLoadStatus ((uint16_t) 0x32) +#define PPSMC_MSG_DriverDramAddrHi ((uint16_t) 0x33) +#define PPSMC_MSG_DriverDramAddrLo ((uint16_t) 0x34) +#define PPSMC_MSG_CondExecDramAddrHi ((uint16_t) 0x35) +#define PPSMC_MSG_CondExecDramAddrLo ((uint16_t) 0x36) +#define PPSMC_MSG_LoadUcodes ((uint16_t) 0x37) +#define PPSMC_MSG_DriverResetMode ((uint16_t) 0x38) +#define PPSMC_MSG_PowerStateNotify ((uint16_t) 0x39) +#define PPSMC_MSG_SetDisplayPhyConfig ((uint16_t) 0x3A) +#define PPSMC_MSG_GetMaxSclkLevel ((uint16_t) 0x3B) +#define PPSMC_MSG_GetMaxLclkLevel ((uint16_t) 0x3C) +#define PPSMC_MSG_GetMaxUvdLevel ((uint16_t) 0x3D) +#define PPSMC_MSG_GetMaxEclkLevel ((uint16_t) 0x3E) +#define PPSMC_MSG_GetMaxAclkLevel ((uint16_t) 0x3F) +#define PPSMC_MSG_GetMaxNclkLevel ((uint16_t) 0x40) +#define PPSMC_MSG_GetMaxPstate ((uint16_t) 0x41) +#define PPSMC_MSG_DramAddrHiVirtual ((uint16_t) 0x42) +#define PPSMC_MSG_DramAddrLoVirtual ((uint16_t) 0x43) +#define PPSMC_MSG_DramAddrHiPhysical ((uint16_t) 0x44) +#define PPSMC_MSG_DramAddrLoPhysical ((uint16_t) 0x45) +#define PPSMC_MSG_DramBufferSize ((uint16_t) 0x46) +#define PPSMC_MSG_SetMmPwrLogDramAddrHi ((uint16_t) 0x47) +#define PPSMC_MSG_SetMmPwrLogDramAddrLo ((uint16_t) 0x48) +#define PPSMC_MSG_SetClkTableAddrHi ((uint16_t) 0x49) +#define PPSMC_MSG_SetClkTableAddrLo ((uint16_t) 0x4A) +#define PPSMC_MSG_GetConservativePowerLimit ((uint16_t) 0x4B) + +#define PPSMC_MSG_InitJobs ((uint16_t) 0x252) +#define PPSMC_MSG_ExecuteJob ((uint16_t) 0x254) + +#define PPSMC_MSG_NBDPM_Enable ((uint16_t) 0x140) +#define PPSMC_MSG_NBDPM_Disable ((uint16_t) 0x141) + +#define PPSMC_MSG_DPM_FPS_Mode ((uint16_t) 0x15d) +#define PPSMC_MSG_DPM_Activity_Mode ((uint16_t) 0x15e) + +#define PPSMC_MSG_PmStatusLogStart ((uint16_t) 0x170) +#define PPSMC_MSG_PmStatusLogSample ((uint16_t) 0x171) + +#define PPSMC_MSG_AllowLowSclkInterrupt ((uint16_t) 0x184) +#define PPSMC_MSG_MmPowerMonitorStart ((uint16_t) 0x18F) +#define PPSMC_MSG_MmPowerMonitorStop ((uint16_t) 0x190) +#define PPSMC_MSG_MmPowerMonitorRestart ((uint16_t) 0x191) + +#define PPSMC_MSG_SetClockGateMask ((uint16_t) 0x260) +#define PPSMC_MSG_SetFpsThresholdLo ((uint16_t) 0x264) +#define PPSMC_MSG_SetFpsThresholdHi ((uint16_t) 0x265) +#define PPSMC_MSG_SetLowSclkIntrThreshold ((uint16_t) 0x266) + +#define PPSMC_MSG_ClkTableXferToDram ((uint16_t) 0x267) +#define PPSMC_MSG_ClkTableXferToSmu ((uint16_t) 0x268) +#define PPSMC_MSG_GetAverageGraphicsActivity ((uint16_t) 0x269) +#define PPSMC_MSG_GetAverageGioActivity ((uint16_t) 0x26A) +#define PPSMC_MSG_SetLoggerBufferSize ((uint16_t) 0x26B) +#define PPSMC_MSG_SetLoggerAddressHigh ((uint16_t) 0x26C) +#define PPSMC_MSG_SetLoggerAddressLow ((uint16_t) 0x26D) +#define PPSMC_MSG_SetWatermarkFrequency ((uint16_t) 0x26E) + +/* REMOVE LATER*/ +#define PPSMC_MSG_DPM_ForceState ((uint16_t) 0x104) + +/* Feature Enable Masks*/ +#define NB_DPM_MASK 0x00000800 +#define VDDGFX_MASK 0x00800000 +#define VCE_DPM_MASK 0x00400000 +#define ACP_DPM_MASK 0x00040000 +#define UVD_DPM_MASK 0x00010000 +#define GFX_CU_PG_MASK 0x00004000 +#define SCLK_DPM_MASK 0x00080000 + +#if !defined(SMC_MICROCODE) +#pragma pack(pop) + +#endif + +#endif diff --git a/drivers/gpu/drm/amd/amdgpu/cz_smc.c b/drivers/gpu/drm/amd/amdgpu/cz_smc.c new file mode 100644 index 000000000000..a72ffc7d6c26 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/cz_smc.c @@ -0,0 +1,962 @@ +/* + * Copyright 2014 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#include +#include "drmP.h" +#include "amdgpu.h" +#include "smu8.h" +#include "smu8_fusion.h" +#include "cz_ppsmc.h" +#include "cz_smumgr.h" +#include "smu_ucode_xfer_cz.h" +#include "amdgpu_ucode.h" + +#include "smu/smu_8_0_d.h" +#include "smu/smu_8_0_sh_mask.h" +#include "gca/gfx_8_0_d.h" +#include "gca/gfx_8_0_sh_mask.h" + +uint32_t cz_get_argument(struct amdgpu_device *adev) +{ + return RREG32(mmSMU_MP1_SRBM2P_ARG_0); +} + +static struct cz_smu_private_data *cz_smu_get_priv(struct amdgpu_device *adev) +{ + struct cz_smu_private_data *priv = + (struct cz_smu_private_data *)(adev->smu.priv); + + return priv; +} + +int cz_send_msg_to_smc_async(struct amdgpu_device *adev, u16 msg) +{ + int i; + u32 content = 0, tmp; + + for (i = 0; i < adev->usec_timeout; i++) { + tmp = REG_GET_FIELD(RREG32(mmSMU_MP1_SRBM2P_RESP_0), + SMU_MP1_SRBM2P_RESP_0, CONTENT); + if (content != tmp) + break; + udelay(1); + } + + /* timeout means wrong logic*/ + if (i == adev->usec_timeout) + return -EINVAL; + + WREG32(mmSMU_MP1_SRBM2P_RESP_0, 0); + WREG32(mmSMU_MP1_SRBM2P_MSG_0, msg); + + return 0; +} + +int cz_send_msg_to_smc(struct amdgpu_device *adev, u16 msg) +{ + int i; + u32 content = 0, tmp = 0; + + if (cz_send_msg_to_smc_async(adev, msg)) + return -EINVAL; + + for (i = 0; i < adev->usec_timeout; i++) { + tmp = REG_GET_FIELD(RREG32(mmSMU_MP1_SRBM2P_RESP_0), + SMU_MP1_SRBM2P_RESP_0, CONTENT); + if (content != tmp) + break; + udelay(1); + } + + /* timeout means wrong logic*/ + if (i == adev->usec_timeout) + return -EINVAL; + + if (PPSMC_Result_OK != tmp) { + dev_err(adev->dev, "SMC Failed to send Message.\n"); + return -EINVAL; + } + + return 0; +} + +int cz_send_msg_to_smc_with_parameter_async(struct amdgpu_device *adev, + u16 msg, u32 parameter) +{ + WREG32(mmSMU_MP1_SRBM2P_ARG_0, parameter); + return cz_send_msg_to_smc_async(adev, msg); +} + +int cz_send_msg_to_smc_with_parameter(struct amdgpu_device *adev, + u16 msg, u32 parameter) +{ + WREG32(mmSMU_MP1_SRBM2P_ARG_0, parameter); + return cz_send_msg_to_smc(adev, msg); +} + +static int cz_set_smc_sram_address(struct amdgpu_device *adev, + u32 smc_address, u32 limit) +{ + if (smc_address & 3) + return -EINVAL; + if ((smc_address + 3) > limit) + return -EINVAL; + + WREG32(mmMP0PUB_IND_INDEX_0, SMN_MP1_SRAM_START_ADDR + smc_address); + + return 0; +} + +int cz_read_smc_sram_dword(struct amdgpu_device *adev, u32 smc_address, + u32 *value, u32 limit) +{ + int ret; + + ret = cz_set_smc_sram_address(adev, smc_address, limit); + if (ret) + return ret; + + *value = RREG32(mmMP0PUB_IND_DATA_0); + + return 0; +} + +int cz_write_smc_sram_dword(struct amdgpu_device *adev, u32 smc_address, + u32 value, u32 limit) +{ + int ret; + + ret = cz_set_smc_sram_address(adev, smc_address, limit); + if (ret) + return ret; + + WREG32(mmMP0PUB_IND_DATA_0, value); + + return 0; +} + +static int cz_smu_request_load_fw(struct amdgpu_device *adev) +{ + struct cz_smu_private_data *priv = cz_smu_get_priv(adev); + + uint32_t smc_addr = SMU8_FIRMWARE_HEADER_LOCATION + + offsetof(struct SMU8_Firmware_Header, UcodeLoadStatus); + + cz_write_smc_sram_dword(adev, smc_addr, 0, smc_addr + 4); + + /*prepare toc buffers*/ + cz_send_msg_to_smc_with_parameter(adev, + PPSMC_MSG_DriverDramAddrHi, + priv->toc_buffer.mc_addr_high); + cz_send_msg_to_smc_with_parameter(adev, + PPSMC_MSG_DriverDramAddrLo, + priv->toc_buffer.mc_addr_low); + cz_send_msg_to_smc(adev, PPSMC_MSG_InitJobs); + + /*execute jobs*/ + cz_send_msg_to_smc_with_parameter(adev, + PPSMC_MSG_ExecuteJob, + priv->toc_entry_aram); + + cz_send_msg_to_smc_with_parameter(adev, + PPSMC_MSG_ExecuteJob, + priv->toc_entry_power_profiling_index); + + cz_send_msg_to_smc_with_parameter(adev, + PPSMC_MSG_ExecuteJob, + priv->toc_entry_initialize_index); + + return 0; +} + +/* + *Check if the FW has been loaded, SMU will not return if loading + *has not finished. + */ +static int cz_smu_check_fw_load_finish(struct amdgpu_device *adev, + uint32_t fw_mask) +{ + int i; + uint32_t index = SMN_MP1_SRAM_START_ADDR + + SMU8_FIRMWARE_HEADER_LOCATION + + offsetof(struct SMU8_Firmware_Header, UcodeLoadStatus); + + WREG32(mmMP0PUB_IND_INDEX, index); + + for (i = 0; i < adev->usec_timeout; i++) { + if (fw_mask == (RREG32(mmMP0PUB_IND_DATA) & fw_mask)) + break; + udelay(1); + } + + if (i >= adev->usec_timeout) { + dev_err(adev->dev, + "SMU check loaded firmware failed, expecting 0x%x, getting 0x%x", + fw_mask, RREG32(mmMP0PUB_IND_DATA)); + return -EINVAL; + } + + return 0; +} + +/* + * interfaces for different ip blocks to check firmware loading status + * 0 for success otherwise failed + */ +static int cz_smu_check_finished(struct amdgpu_device *adev, + enum AMDGPU_UCODE_ID id) +{ + switch (id) { + case AMDGPU_UCODE_ID_SDMA0: + if (adev->smu.fw_flags & AMDGPU_SDMA0_UCODE_LOADED) + return 0; + break; + case AMDGPU_UCODE_ID_SDMA1: + if (adev->smu.fw_flags & AMDGPU_SDMA1_UCODE_LOADED) + return 0; + break; + case AMDGPU_UCODE_ID_CP_CE: + if (adev->smu.fw_flags & AMDGPU_CPCE_UCODE_LOADED) + return 0; + break; + case AMDGPU_UCODE_ID_CP_PFP: + if (adev->smu.fw_flags & AMDGPU_CPPFP_UCODE_LOADED) + return 0; + case AMDGPU_UCODE_ID_CP_ME: + if (adev->smu.fw_flags & AMDGPU_CPME_UCODE_LOADED) + return 0; + break; + case AMDGPU_UCODE_ID_CP_MEC1: + if (adev->smu.fw_flags & AMDGPU_CPMEC1_UCODE_LOADED) + return 0; + break; + case AMDGPU_UCODE_ID_CP_MEC2: + if (adev->smu.fw_flags & AMDGPU_CPMEC2_UCODE_LOADED) + return 0; + break; + case AMDGPU_UCODE_ID_RLC_G: + if (adev->smu.fw_flags & AMDGPU_CPRLC_UCODE_LOADED) + return 0; + break; + case AMDGPU_UCODE_ID_MAXIMUM: + default: + break; + } + + return 1; +} + +static int cz_load_mec_firmware(struct amdgpu_device *adev) +{ + struct amdgpu_firmware_info *ucode = + &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC1]; + uint32_t reg_data; + uint32_t tmp; + + if (ucode->fw == NULL) + return -EINVAL; + + /* Disable MEC parsing/prefetching */ + tmp = RREG32(mmCP_MEC_CNTL); + tmp = REG_SET_FIELD(tmp, CP_MEC_CNTL, MEC_ME1_HALT, 1); + tmp = REG_SET_FIELD(tmp, CP_MEC_CNTL, MEC_ME2_HALT, 1); + WREG32(mmCP_MEC_CNTL, tmp); + + tmp = RREG32(mmCP_CPC_IC_BASE_CNTL); + tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, VMID, 0); + tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, ATC, 0); + tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, CACHE_POLICY, 0); + tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, MTYPE, 1); + WREG32(mmCP_CPC_IC_BASE_CNTL, tmp); + + reg_data = lower_32_bits(ucode->mc_addr) & + REG_FIELD_MASK(CP_CPC_IC_BASE_LO, IC_BASE_LO); + WREG32(mmCP_CPC_IC_BASE_LO, reg_data); + + reg_data = upper_32_bits(ucode->mc_addr) & + REG_FIELD_MASK(CP_CPC_IC_BASE_HI, IC_BASE_HI); + WREG32(mmCP_CPC_IC_BASE_HI, reg_data); + + return 0; +} + +int cz_smu_start(struct amdgpu_device *adev) +{ + int ret = 0; + + uint32_t fw_to_check = UCODE_ID_RLC_G_MASK | + UCODE_ID_SDMA0_MASK | + UCODE_ID_SDMA1_MASK | + UCODE_ID_CP_CE_MASK | + UCODE_ID_CP_ME_MASK | + UCODE_ID_CP_PFP_MASK | + UCODE_ID_CP_MEC_JT1_MASK | + UCODE_ID_CP_MEC_JT2_MASK; + + cz_smu_request_load_fw(adev); + ret = cz_smu_check_fw_load_finish(adev, fw_to_check); + if (ret) + return ret; + + /* manually load MEC firmware for CZ */ + if (adev->asic_type == CHIP_CARRIZO) { + ret = cz_load_mec_firmware(adev); + if (ret) { + dev_err(adev->dev, "(%d) Mec Firmware load failed\n", ret); + return ret; + } + } + + /* setup fw load flag */ + adev->smu.fw_flags = AMDGPU_SDMA0_UCODE_LOADED | + AMDGPU_SDMA1_UCODE_LOADED | + AMDGPU_CPCE_UCODE_LOADED | + AMDGPU_CPPFP_UCODE_LOADED | + AMDGPU_CPME_UCODE_LOADED | + AMDGPU_CPMEC1_UCODE_LOADED | + AMDGPU_CPMEC2_UCODE_LOADED | + AMDGPU_CPRLC_UCODE_LOADED; + + return ret; +} + +static uint32_t cz_convert_fw_type(uint32_t fw_type) +{ + enum AMDGPU_UCODE_ID result = AMDGPU_UCODE_ID_MAXIMUM; + + switch (fw_type) { + case UCODE_ID_SDMA0: + result = AMDGPU_UCODE_ID_SDMA0; + break; + case UCODE_ID_SDMA1: + result = AMDGPU_UCODE_ID_SDMA1; + break; + case UCODE_ID_CP_CE: + result = AMDGPU_UCODE_ID_CP_CE; + break; + case UCODE_ID_CP_PFP: + result = AMDGPU_UCODE_ID_CP_PFP; + break; + case UCODE_ID_CP_ME: + result = AMDGPU_UCODE_ID_CP_ME; + break; + case UCODE_ID_CP_MEC_JT1: + case UCODE_ID_CP_MEC_JT2: + result = AMDGPU_UCODE_ID_CP_MEC1; + break; + case UCODE_ID_RLC_G: + result = AMDGPU_UCODE_ID_RLC_G; + break; + default: + DRM_ERROR("UCode type is out of range!"); + } + + return result; +} + +static uint8_t cz_smu_translate_firmware_enum_to_arg( + enum cz_scratch_entry firmware_enum) +{ + uint8_t ret = 0; + + switch (firmware_enum) { + case CZ_SCRATCH_ENTRY_UCODE_ID_SDMA0: + ret = UCODE_ID_SDMA0; + break; + case CZ_SCRATCH_ENTRY_UCODE_ID_SDMA1: + ret = UCODE_ID_SDMA1; + break; + case CZ_SCRATCH_ENTRY_UCODE_ID_CP_CE: + ret = UCODE_ID_CP_CE; + break; + case CZ_SCRATCH_ENTRY_UCODE_ID_CP_PFP: + ret = UCODE_ID_CP_PFP; + break; + case CZ_SCRATCH_ENTRY_UCODE_ID_CP_ME: + ret = UCODE_ID_CP_ME; + break; + case CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1: + ret = UCODE_ID_CP_MEC_JT1; + break; + case CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT2: + ret = UCODE_ID_CP_MEC_JT2; + break; + case CZ_SCRATCH_ENTRY_UCODE_ID_GMCON_RENG: + ret = UCODE_ID_GMCON_RENG; + break; + case CZ_SCRATCH_ENTRY_UCODE_ID_RLC_G: + ret = UCODE_ID_RLC_G; + break; + case CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SCRATCH: + ret = UCODE_ID_RLC_SCRATCH; + break; + case CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_ARAM: + ret = UCODE_ID_RLC_SRM_ARAM; + break; + case CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_DRAM: + ret = UCODE_ID_RLC_SRM_DRAM; + break; + case CZ_SCRATCH_ENTRY_UCODE_ID_DMCU_ERAM: + ret = UCODE_ID_DMCU_ERAM; + break; + case CZ_SCRATCH_ENTRY_UCODE_ID_DMCU_IRAM: + ret = UCODE_ID_DMCU_IRAM; + break; + case CZ_SCRATCH_ENTRY_UCODE_ID_POWER_PROFILING: + ret = TASK_ARG_INIT_MM_PWR_LOG; + break; + case CZ_SCRATCH_ENTRY_DATA_ID_SDMA_HALT: + case CZ_SCRATCH_ENTRY_DATA_ID_SYS_CLOCKGATING: + case CZ_SCRATCH_ENTRY_DATA_ID_SDMA_RING_REGS: + case CZ_SCRATCH_ENTRY_DATA_ID_NONGFX_REINIT: + case CZ_SCRATCH_ENTRY_DATA_ID_SDMA_START: + case CZ_SCRATCH_ENTRY_DATA_ID_IH_REGISTERS: + ret = TASK_ARG_REG_MMIO; + break; + case CZ_SCRATCH_ENTRY_SMU8_FUSION_CLKTABLE: + ret = TASK_ARG_INIT_CLK_TABLE; + break; + } + + return ret; +} + +static int cz_smu_populate_single_firmware_entry(struct amdgpu_device *adev, + enum cz_scratch_entry firmware_enum, + struct cz_buffer_entry *entry) +{ + uint64_t gpu_addr; + uint32_t data_size; + uint8_t ucode_id = cz_smu_translate_firmware_enum_to_arg(firmware_enum); + enum AMDGPU_UCODE_ID id = cz_convert_fw_type(ucode_id); + struct amdgpu_firmware_info *ucode = &adev->firmware.ucode[id]; + const struct gfx_firmware_header_v1_0 *header; + + if (ucode->fw == NULL) + return -EINVAL; + + gpu_addr = ucode->mc_addr; + header = (const struct gfx_firmware_header_v1_0 *)ucode->fw->data; + data_size = le32_to_cpu(header->header.ucode_size_bytes); + + if ((firmware_enum == CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1) || + (firmware_enum == CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT2)) { + gpu_addr += le32_to_cpu(header->jt_offset) << 2; + data_size = le32_to_cpu(header->jt_size) << 2; + } + + entry->mc_addr_low = lower_32_bits(gpu_addr); + entry->mc_addr_high = upper_32_bits(gpu_addr); + entry->data_size = data_size; + entry->firmware_ID = firmware_enum; + + return 0; +} + +static int cz_smu_populate_single_scratch_entry(struct amdgpu_device *adev, + enum cz_scratch_entry scratch_type, + uint32_t size_in_byte, + struct cz_buffer_entry *entry) +{ + struct cz_smu_private_data *priv = cz_smu_get_priv(adev); + uint64_t mc_addr = (((uint64_t) priv->smu_buffer.mc_addr_high) << 32) | + priv->smu_buffer.mc_addr_low; + mc_addr += size_in_byte; + + priv->smu_buffer_used_bytes += size_in_byte; + entry->data_size = size_in_byte; + entry->kaddr = priv->smu_buffer.kaddr + priv->smu_buffer_used_bytes; + entry->mc_addr_low = lower_32_bits(mc_addr); + entry->mc_addr_high = upper_32_bits(mc_addr); + entry->firmware_ID = scratch_type; + + return 0; +} + +static int cz_smu_populate_single_ucode_load_task(struct amdgpu_device *adev, + enum cz_scratch_entry firmware_enum, + bool is_last) +{ + uint8_t i; + struct cz_smu_private_data *priv = cz_smu_get_priv(adev); + struct TOC *toc = (struct TOC *)priv->toc_buffer.kaddr; + struct SMU_Task *task = &toc->tasks[priv->toc_entry_used_count++]; + + task->type = TASK_TYPE_UCODE_LOAD; + task->arg = cz_smu_translate_firmware_enum_to_arg(firmware_enum); + task->next = is_last ? END_OF_TASK_LIST : priv->toc_entry_used_count; + + for (i = 0; i < priv->driver_buffer_length; i++) + if (priv->driver_buffer[i].firmware_ID == firmware_enum) + break; + + if (i >= priv->driver_buffer_length) { + dev_err(adev->dev, "Invalid Firmware Type\n"); + return -EINVAL; + } + + task->addr.low = priv->driver_buffer[i].mc_addr_low; + task->addr.high = priv->driver_buffer[i].mc_addr_high; + task->size_bytes = priv->driver_buffer[i].data_size; + + return 0; +} + +static int cz_smu_populate_single_scratch_task(struct amdgpu_device *adev, + enum cz_scratch_entry firmware_enum, + uint8_t type, bool is_last) +{ + uint8_t i; + struct cz_smu_private_data *priv = cz_smu_get_priv(adev); + struct TOC *toc = (struct TOC *)priv->toc_buffer.kaddr; + struct SMU_Task *task = &toc->tasks[priv->toc_entry_used_count++]; + + task->type = type; + task->arg = cz_smu_translate_firmware_enum_to_arg(firmware_enum); + task->next = is_last ? END_OF_TASK_LIST : priv->toc_entry_used_count; + + for (i = 0; i < priv->scratch_buffer_length; i++) + if (priv->scratch_buffer[i].firmware_ID == firmware_enum) + break; + + if (i >= priv->scratch_buffer_length) { + dev_err(adev->dev, "Invalid Firmware Type\n"); + return -EINVAL; + } + + task->addr.low = priv->scratch_buffer[i].mc_addr_low; + task->addr.high = priv->scratch_buffer[i].mc_addr_high; + task->size_bytes = priv->scratch_buffer[i].data_size; + + if (CZ_SCRATCH_ENTRY_DATA_ID_IH_REGISTERS == firmware_enum) { + struct cz_ih_meta_data *pIHReg_restore = + (struct cz_ih_meta_data *)priv->scratch_buffer[i].kaddr; + pIHReg_restore->command = + METADATA_CMD_MODE0 | METADATA_PERFORM_ON_LOAD; + } + + return 0; +} + +static int cz_smu_construct_toc_for_rlc_aram_save(struct amdgpu_device *adev) +{ + struct cz_smu_private_data *priv = cz_smu_get_priv(adev); + priv->toc_entry_aram = priv->toc_entry_used_count; + cz_smu_populate_single_scratch_task(adev, + CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_ARAM, + TASK_TYPE_UCODE_SAVE, true); + + return 0; +} + +static int cz_smu_construct_toc_for_vddgfx_enter(struct amdgpu_device *adev) +{ + struct cz_smu_private_data *priv = cz_smu_get_priv(adev); + struct TOC *toc = (struct TOC *)priv->toc_buffer.kaddr; + + toc->JobList[JOB_GFX_SAVE] = (uint8_t)priv->toc_entry_used_count; + cz_smu_populate_single_scratch_task(adev, + CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SCRATCH, + TASK_TYPE_UCODE_SAVE, false); + cz_smu_populate_single_scratch_task(adev, + CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_DRAM, + TASK_TYPE_UCODE_SAVE, true); + + return 0; +} + +static int cz_smu_construct_toc_for_vddgfx_exit(struct amdgpu_device *adev) +{ + struct cz_smu_private_data *priv = cz_smu_get_priv(adev); + struct TOC *toc = (struct TOC *)priv->toc_buffer.kaddr; + + toc->JobList[JOB_GFX_RESTORE] = (uint8_t)priv->toc_entry_used_count; + + /* populate ucode */ + if (adev->firmware.smu_load) { + cz_smu_populate_single_ucode_load_task(adev, + CZ_SCRATCH_ENTRY_UCODE_ID_CP_CE, false); + cz_smu_populate_single_ucode_load_task(adev, + CZ_SCRATCH_ENTRY_UCODE_ID_CP_PFP, false); + cz_smu_populate_single_ucode_load_task(adev, + CZ_SCRATCH_ENTRY_UCODE_ID_CP_ME, false); + cz_smu_populate_single_ucode_load_task(adev, + CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1, false); + cz_smu_populate_single_ucode_load_task(adev, + CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT2, false); + cz_smu_populate_single_ucode_load_task(adev, + CZ_SCRATCH_ENTRY_UCODE_ID_RLC_G, false); + } + + /* populate scratch */ + cz_smu_populate_single_scratch_task(adev, + CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SCRATCH, + TASK_TYPE_UCODE_LOAD, false); + cz_smu_populate_single_scratch_task(adev, + CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_ARAM, + TASK_TYPE_UCODE_LOAD, false); + cz_smu_populate_single_scratch_task(adev, + CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_DRAM, + TASK_TYPE_UCODE_LOAD, true); + + return 0; +} + +static int cz_smu_construct_toc_for_power_profiling(struct amdgpu_device *adev) +{ + struct cz_smu_private_data *priv = cz_smu_get_priv(adev); + + priv->toc_entry_power_profiling_index = priv->toc_entry_used_count; + + cz_smu_populate_single_scratch_task(adev, + CZ_SCRATCH_ENTRY_UCODE_ID_POWER_PROFILING, + TASK_TYPE_INITIALIZE, true); + return 0; +} + +static int cz_smu_construct_toc_for_bootup(struct amdgpu_device *adev) +{ + struct cz_smu_private_data *priv = cz_smu_get_priv(adev); + + priv->toc_entry_initialize_index = priv->toc_entry_used_count; + + if (adev->firmware.smu_load) { + cz_smu_populate_single_ucode_load_task(adev, + CZ_SCRATCH_ENTRY_UCODE_ID_SDMA0, false); + cz_smu_populate_single_ucode_load_task(adev, + CZ_SCRATCH_ENTRY_UCODE_ID_SDMA1, false); + cz_smu_populate_single_ucode_load_task(adev, + CZ_SCRATCH_ENTRY_UCODE_ID_CP_CE, false); + cz_smu_populate_single_ucode_load_task(adev, + CZ_SCRATCH_ENTRY_UCODE_ID_CP_PFP, false); + cz_smu_populate_single_ucode_load_task(adev, + CZ_SCRATCH_ENTRY_UCODE_ID_CP_ME, false); + cz_smu_populate_single_ucode_load_task(adev, + CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1, false); + cz_smu_populate_single_ucode_load_task(adev, + CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT2, false); + cz_smu_populate_single_ucode_load_task(adev, + CZ_SCRATCH_ENTRY_UCODE_ID_RLC_G, true); + } + + return 0; +} + +static int cz_smu_construct_toc_for_clock_table(struct amdgpu_device *adev) +{ + struct cz_smu_private_data *priv = cz_smu_get_priv(adev); + + priv->toc_entry_clock_table = priv->toc_entry_used_count; + + cz_smu_populate_single_scratch_task(adev, + CZ_SCRATCH_ENTRY_SMU8_FUSION_CLKTABLE, + TASK_TYPE_INITIALIZE, true); + + return 0; +} + +static int cz_smu_initialize_toc_empty_job_list(struct amdgpu_device *adev) +{ + int i; + struct cz_smu_private_data *priv = cz_smu_get_priv(adev); + struct TOC *toc = (struct TOC *)priv->toc_buffer.kaddr; + + for (i = 0; i < NUM_JOBLIST_ENTRIES; i++) + toc->JobList[i] = (uint8_t)IGNORE_JOB; + + return 0; +} + +/* + * cz smu uninitialization + */ +int cz_smu_fini(struct amdgpu_device *adev) +{ + amdgpu_bo_unref(&adev->smu.toc_buf); + amdgpu_bo_unref(&adev->smu.smu_buf); + kfree(adev->smu.priv); + adev->smu.priv = NULL; + if (adev->firmware.smu_load) + amdgpu_ucode_fini_bo(adev); + + return 0; +} + +int cz_smu_download_pptable(struct amdgpu_device *adev, void **table) +{ + uint8_t i; + struct cz_smu_private_data *priv = cz_smu_get_priv(adev); + + for (i = 0; i < priv->scratch_buffer_length; i++) + if (priv->scratch_buffer[i].firmware_ID == + CZ_SCRATCH_ENTRY_SMU8_FUSION_CLKTABLE) + break; + + if (i >= priv->scratch_buffer_length) { + dev_err(adev->dev, "Invalid Scratch Type\n"); + return -EINVAL; + } + + *table = (struct SMU8_Fusion_ClkTable *)priv->scratch_buffer[i].kaddr; + + /* prepare buffer for pptable */ + cz_send_msg_to_smc_with_parameter(adev, + PPSMC_MSG_SetClkTableAddrHi, + priv->scratch_buffer[i].mc_addr_high); + cz_send_msg_to_smc_with_parameter(adev, + PPSMC_MSG_SetClkTableAddrLo, + priv->scratch_buffer[i].mc_addr_low); + cz_send_msg_to_smc_with_parameter(adev, + PPSMC_MSG_ExecuteJob, + priv->toc_entry_clock_table); + + /* actual downloading */ + cz_send_msg_to_smc(adev, PPSMC_MSG_ClkTableXferToDram); + + return 0; +} + +int cz_smu_upload_pptable(struct amdgpu_device *adev) +{ + uint8_t i; + struct cz_smu_private_data *priv = cz_smu_get_priv(adev); + + for (i = 0; i < priv->scratch_buffer_length; i++) + if (priv->scratch_buffer[i].firmware_ID == + CZ_SCRATCH_ENTRY_SMU8_FUSION_CLKTABLE) + break; + + if (i >= priv->scratch_buffer_length) { + dev_err(adev->dev, "Invalid Scratch Type\n"); + return -EINVAL; + } + + /* prepare SMU */ + cz_send_msg_to_smc_with_parameter(adev, + PPSMC_MSG_SetClkTableAddrHi, + priv->scratch_buffer[i].mc_addr_high); + cz_send_msg_to_smc_with_parameter(adev, + PPSMC_MSG_SetClkTableAddrLo, + priv->scratch_buffer[i].mc_addr_low); + cz_send_msg_to_smc_with_parameter(adev, + PPSMC_MSG_ExecuteJob, + priv->toc_entry_clock_table); + + /* actual uploading */ + cz_send_msg_to_smc(adev, PPSMC_MSG_ClkTableXferToSmu); + + return 0; +} + +/* + * cz smumgr functions initialization + */ +static const struct amdgpu_smumgr_funcs cz_smumgr_funcs = { + .check_fw_load_finish = cz_smu_check_finished, + .request_smu_load_fw = NULL, + .request_smu_specific_fw = NULL, +}; + +/* + * cz smu initialization + */ +int cz_smu_init(struct amdgpu_device *adev) +{ + int ret = -EINVAL; + uint64_t mc_addr = 0; + struct amdgpu_bo **toc_buf = &adev->smu.toc_buf; + struct amdgpu_bo **smu_buf = &adev->smu.smu_buf; + void *toc_buf_ptr = NULL; + void *smu_buf_ptr = NULL; + + struct cz_smu_private_data *priv = + kzalloc(sizeof(struct cz_smu_private_data), GFP_KERNEL); + if (priv == NULL) + return -ENOMEM; + + /* allocate firmware buffers */ + if (adev->firmware.smu_load) + amdgpu_ucode_init_bo(adev); + + adev->smu.priv = priv; + adev->smu.fw_flags = 0; + priv->toc_buffer.data_size = 4096; + + priv->smu_buffer.data_size = + ALIGN(UCODE_ID_RLC_SCRATCH_SIZE_BYTE, 32) + + ALIGN(UCODE_ID_RLC_SRM_ARAM_SIZE_BYTE, 32) + + ALIGN(UCODE_ID_RLC_SRM_DRAM_SIZE_BYTE, 32) + + ALIGN(sizeof(struct SMU8_MultimediaPowerLogData), 32) + + ALIGN(sizeof(struct SMU8_Fusion_ClkTable), 32); + + /* prepare toc buffer and smu buffer: + * 1. create amdgpu_bo for toc buffer and smu buffer + * 2. pin mc address + * 3. map kernel virtual address + */ + ret = amdgpu_bo_create(adev, priv->toc_buffer.data_size, PAGE_SIZE, + true, AMDGPU_GEM_DOMAIN_GTT, 0, NULL, toc_buf); + + if (ret) { + dev_err(adev->dev, "(%d) SMC TOC buffer allocation failed\n", ret); + return ret; + } + + ret = amdgpu_bo_create(adev, priv->smu_buffer.data_size, PAGE_SIZE, + true, AMDGPU_GEM_DOMAIN_GTT, 0, NULL, smu_buf); + + if (ret) { + dev_err(adev->dev, "(%d) SMC Internal buffer allocation failed\n", ret); + return ret; + } + + /* toc buffer reserve/pin/map */ + ret = amdgpu_bo_reserve(adev->smu.toc_buf, false); + if (ret) { + amdgpu_bo_unref(&adev->smu.toc_buf); + dev_err(adev->dev, "(%d) SMC TOC buffer reserve failed\n", ret); + return ret; + } + + ret = amdgpu_bo_pin(adev->smu.toc_buf, AMDGPU_GEM_DOMAIN_GTT, &mc_addr); + if (ret) { + amdgpu_bo_unreserve(adev->smu.toc_buf); + amdgpu_bo_unref(&adev->smu.toc_buf); + dev_err(adev->dev, "(%d) SMC TOC buffer pin failed\n", ret); + return ret; + } + + ret = amdgpu_bo_kmap(*toc_buf, &toc_buf_ptr); + if (ret) + goto smu_init_failed; + + amdgpu_bo_unreserve(adev->smu.toc_buf); + + priv->toc_buffer.mc_addr_low = lower_32_bits(mc_addr); + priv->toc_buffer.mc_addr_high = upper_32_bits(mc_addr); + priv->toc_buffer.kaddr = toc_buf_ptr; + + /* smu buffer reserve/pin/map */ + ret = amdgpu_bo_reserve(adev->smu.smu_buf, false); + if (ret) { + amdgpu_bo_unref(&adev->smu.smu_buf); + dev_err(adev->dev, "(%d) SMC Internal buffer reserve failed\n", ret); + return ret; + } + + ret = amdgpu_bo_pin(adev->smu.smu_buf, AMDGPU_GEM_DOMAIN_GTT, &mc_addr); + if (ret) { + amdgpu_bo_unreserve(adev->smu.smu_buf); + amdgpu_bo_unref(&adev->smu.smu_buf); + dev_err(adev->dev, "(%d) SMC Internal buffer pin failed\n", ret); + return ret; + } + + ret = amdgpu_bo_kmap(*smu_buf, &smu_buf_ptr); + if (ret) + goto smu_init_failed; + + amdgpu_bo_unreserve(adev->smu.smu_buf); + + priv->smu_buffer.mc_addr_low = lower_32_bits(mc_addr); + priv->smu_buffer.mc_addr_high = upper_32_bits(mc_addr); + priv->smu_buffer.kaddr = smu_buf_ptr; + + if (adev->firmware.smu_load) { + if (cz_smu_populate_single_firmware_entry(adev, + CZ_SCRATCH_ENTRY_UCODE_ID_SDMA0, + &priv->driver_buffer[priv->driver_buffer_length++])) + goto smu_init_failed; + if (cz_smu_populate_single_firmware_entry(adev, + CZ_SCRATCH_ENTRY_UCODE_ID_SDMA1, + &priv->driver_buffer[priv->driver_buffer_length++])) + goto smu_init_failed; + if (cz_smu_populate_single_firmware_entry(adev, + CZ_SCRATCH_ENTRY_UCODE_ID_CP_CE, + &priv->driver_buffer[priv->driver_buffer_length++])) + goto smu_init_failed; + if (cz_smu_populate_single_firmware_entry(adev, + CZ_SCRATCH_ENTRY_UCODE_ID_CP_PFP, + &priv->driver_buffer[priv->driver_buffer_length++])) + goto smu_init_failed; + if (cz_smu_populate_single_firmware_entry(adev, + CZ_SCRATCH_ENTRY_UCODE_ID_CP_ME, + &priv->driver_buffer[priv->driver_buffer_length++])) + goto smu_init_failed; + if (cz_smu_populate_single_firmware_entry(adev, + CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1, + &priv->driver_buffer[priv->driver_buffer_length++])) + goto smu_init_failed; + if (cz_smu_populate_single_firmware_entry(adev, + CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT2, + &priv->driver_buffer[priv->driver_buffer_length++])) + goto smu_init_failed; + if (cz_smu_populate_single_firmware_entry(adev, + CZ_SCRATCH_ENTRY_UCODE_ID_RLC_G, + &priv->driver_buffer[priv->driver_buffer_length++])) + goto smu_init_failed; + } + + if (cz_smu_populate_single_scratch_entry(adev, + CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SCRATCH, + UCODE_ID_RLC_SCRATCH_SIZE_BYTE, + &priv->scratch_buffer[priv->scratch_buffer_length++])) + goto smu_init_failed; + if (cz_smu_populate_single_scratch_entry(adev, + CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_ARAM, + UCODE_ID_RLC_SRM_ARAM_SIZE_BYTE, + &priv->scratch_buffer[priv->scratch_buffer_length++])) + goto smu_init_failed; + if (cz_smu_populate_single_scratch_entry(adev, + CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_DRAM, + UCODE_ID_RLC_SRM_DRAM_SIZE_BYTE, + &priv->scratch_buffer[priv->scratch_buffer_length++])) + goto smu_init_failed; + if (cz_smu_populate_single_scratch_entry(adev, + CZ_SCRATCH_ENTRY_UCODE_ID_POWER_PROFILING, + sizeof(struct SMU8_MultimediaPowerLogData), + &priv->scratch_buffer[priv->scratch_buffer_length++])) + goto smu_init_failed; + if (cz_smu_populate_single_scratch_entry(adev, + CZ_SCRATCH_ENTRY_SMU8_FUSION_CLKTABLE, + sizeof(struct SMU8_Fusion_ClkTable), + &priv->scratch_buffer[priv->scratch_buffer_length++])) + goto smu_init_failed; + + cz_smu_initialize_toc_empty_job_list(adev); + cz_smu_construct_toc_for_rlc_aram_save(adev); + cz_smu_construct_toc_for_vddgfx_enter(adev); + cz_smu_construct_toc_for_vddgfx_exit(adev); + cz_smu_construct_toc_for_power_profiling(adev); + cz_smu_construct_toc_for_bootup(adev); + cz_smu_construct_toc_for_clock_table(adev); + /* init the smumgr functions */ + adev->smu.smumgr_funcs = &cz_smumgr_funcs; + + return 0; + +smu_init_failed: + amdgpu_bo_unref(toc_buf); + amdgpu_bo_unref(smu_buf); + + return ret; +} diff --git a/drivers/gpu/drm/amd/amdgpu/cz_smumgr.h b/drivers/gpu/drm/amd/amdgpu/cz_smumgr.h new file mode 100644 index 000000000000..924d355b4e2c --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/cz_smumgr.h @@ -0,0 +1,94 @@ +/* + * Copyright 2014 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#ifndef __CZ_SMC_H__ +#define __CZ_SMC_H__ + +#define MAX_NUM_FIRMWARE 8 +#define MAX_NUM_SCRATCH 11 +#define CZ_SCRATCH_SIZE_NONGFX_CLOCKGATING 1024 +#define CZ_SCRATCH_SIZE_NONGFX_GOLDENSETTING 2048 +#define CZ_SCRATCH_SIZE_SDMA_METADATA 1024 +#define CZ_SCRATCH_SIZE_IH ((2*256+1)*4) + +enum cz_scratch_entry { + CZ_SCRATCH_ENTRY_UCODE_ID_SDMA0 = 0, + CZ_SCRATCH_ENTRY_UCODE_ID_SDMA1, + CZ_SCRATCH_ENTRY_UCODE_ID_CP_CE, + CZ_SCRATCH_ENTRY_UCODE_ID_CP_PFP, + CZ_SCRATCH_ENTRY_UCODE_ID_CP_ME, + CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1, + CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT2, + CZ_SCRATCH_ENTRY_UCODE_ID_GMCON_RENG, + CZ_SCRATCH_ENTRY_UCODE_ID_RLC_G, + CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SCRATCH, + CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_ARAM, + CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_DRAM, + CZ_SCRATCH_ENTRY_UCODE_ID_DMCU_ERAM, + CZ_SCRATCH_ENTRY_UCODE_ID_DMCU_IRAM, + CZ_SCRATCH_ENTRY_UCODE_ID_POWER_PROFILING, + CZ_SCRATCH_ENTRY_DATA_ID_SDMA_HALT, + CZ_SCRATCH_ENTRY_DATA_ID_SYS_CLOCKGATING, + CZ_SCRATCH_ENTRY_DATA_ID_SDMA_RING_REGS, + CZ_SCRATCH_ENTRY_DATA_ID_NONGFX_REINIT, + CZ_SCRATCH_ENTRY_DATA_ID_SDMA_START, + CZ_SCRATCH_ENTRY_DATA_ID_IH_REGISTERS, + CZ_SCRATCH_ENTRY_SMU8_FUSION_CLKTABLE +}; + +struct cz_buffer_entry { + uint32_t data_size; + uint32_t mc_addr_low; + uint32_t mc_addr_high; + void *kaddr; + enum cz_scratch_entry firmware_ID; +}; + +struct cz_register_index_data_pair { + uint32_t offset; + uint32_t value; +}; + +struct cz_ih_meta_data { + uint32_t command; + struct cz_register_index_data_pair register_index_value_pair[1]; +}; + +struct cz_smu_private_data { + uint8_t driver_buffer_length; + uint8_t scratch_buffer_length; + uint16_t toc_entry_used_count; + uint16_t toc_entry_initialize_index; + uint16_t toc_entry_power_profiling_index; + uint16_t toc_entry_aram; + uint16_t toc_entry_ih_register_restore_task_index; + uint16_t toc_entry_clock_table; + uint16_t ih_register_restore_task_size; + uint16_t smu_buffer_used_bytes; + + struct cz_buffer_entry toc_buffer; + struct cz_buffer_entry smu_buffer; + struct cz_buffer_entry driver_buffer[MAX_NUM_FIRMWARE]; + struct cz_buffer_entry scratch_buffer[MAX_NUM_SCRATCH]; +}; + +#endif diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c new file mode 100644 index 000000000000..d412291ed70e --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c @@ -0,0 +1,3871 @@ +/* + * Copyright 2014 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#include "drmP.h" +#include "amdgpu.h" +#include "amdgpu_pm.h" +#include "amdgpu_i2c.h" +#include "vid.h" +#include "atom.h" +#include "amdgpu_atombios.h" +#include "atombios_crtc.h" +#include "atombios_encoders.h" +#include "amdgpu_pll.h" +#include "amdgpu_connectors.h" + +#include "dce/dce_10_0_d.h" +#include "dce/dce_10_0_sh_mask.h" +#include "dce/dce_10_0_enum.h" +#include "oss/oss_3_0_d.h" +#include "oss/oss_3_0_sh_mask.h" +#include "gmc/gmc_8_1_d.h" +#include "gmc/gmc_8_1_sh_mask.h" + +static void dce_v10_0_set_display_funcs(struct amdgpu_device *adev); +static void dce_v10_0_set_irq_funcs(struct amdgpu_device *adev); + +static const u32 crtc_offsets[] = +{ + CRTC0_REGISTER_OFFSET, + CRTC1_REGISTER_OFFSET, + CRTC2_REGISTER_OFFSET, + CRTC3_REGISTER_OFFSET, + CRTC4_REGISTER_OFFSET, + CRTC5_REGISTER_OFFSET, + CRTC6_REGISTER_OFFSET +}; + +static const u32 hpd_offsets[] = +{ + HPD0_REGISTER_OFFSET, + HPD1_REGISTER_OFFSET, + HPD2_REGISTER_OFFSET, + HPD3_REGISTER_OFFSET, + HPD4_REGISTER_OFFSET, + HPD5_REGISTER_OFFSET +}; + +static const uint32_t dig_offsets[] = { + DIG0_REGISTER_OFFSET, + DIG1_REGISTER_OFFSET, + DIG2_REGISTER_OFFSET, + DIG3_REGISTER_OFFSET, + DIG4_REGISTER_OFFSET, + DIG5_REGISTER_OFFSET, + DIG6_REGISTER_OFFSET +}; + +static const struct { + uint32_t reg; + uint32_t vblank; + uint32_t vline; + uint32_t hpd; + +} interrupt_status_offsets[] = { { + .reg = mmDISP_INTERRUPT_STATUS, + .vblank = DISP_INTERRUPT_STATUS__LB_D1_VBLANK_INTERRUPT_MASK, + .vline = DISP_INTERRUPT_STATUS__LB_D1_VLINE_INTERRUPT_MASK, + .hpd = DISP_INTERRUPT_STATUS__DC_HPD1_INTERRUPT_MASK +}, { + .reg = mmDISP_INTERRUPT_STATUS_CONTINUE, + .vblank = DISP_INTERRUPT_STATUS_CONTINUE__LB_D2_VBLANK_INTERRUPT_MASK, + .vline = DISP_INTERRUPT_STATUS_CONTINUE__LB_D2_VLINE_INTERRUPT_MASK, + .hpd = DISP_INTERRUPT_STATUS_CONTINUE__DC_HPD2_INTERRUPT_MASK +}, { + .reg = mmDISP_INTERRUPT_STATUS_CONTINUE2, + .vblank = DISP_INTERRUPT_STATUS_CONTINUE2__LB_D3_VBLANK_INTERRUPT_MASK, + .vline = DISP_INTERRUPT_STATUS_CONTINUE2__LB_D3_VLINE_INTERRUPT_MASK, + .hpd = DISP_INTERRUPT_STATUS_CONTINUE2__DC_HPD3_INTERRUPT_MASK +}, { + .reg = mmDISP_INTERRUPT_STATUS_CONTINUE3, + .vblank = DISP_INTERRUPT_STATUS_CONTINUE3__LB_D4_VBLANK_INTERRUPT_MASK, + .vline = DISP_INTERRUPT_STATUS_CONTINUE3__LB_D4_VLINE_INTERRUPT_MASK, + .hpd = DISP_INTERRUPT_STATUS_CONTINUE3__DC_HPD4_INTERRUPT_MASK +}, { + .reg = mmDISP_INTERRUPT_STATUS_CONTINUE4, + .vblank = DISP_INTERRUPT_STATUS_CONTINUE4__LB_D5_VBLANK_INTERRUPT_MASK, + .vline = DISP_INTERRUPT_STATUS_CONTINUE4__LB_D5_VLINE_INTERRUPT_MASK, + .hpd = DISP_INTERRUPT_STATUS_CONTINUE4__DC_HPD5_INTERRUPT_MASK +}, { + .reg = mmDISP_INTERRUPT_STATUS_CONTINUE5, + .vblank = DISP_INTERRUPT_STATUS_CONTINUE5__LB_D6_VBLANK_INTERRUPT_MASK, + .vline = DISP_INTERRUPT_STATUS_CONTINUE5__LB_D6_VLINE_INTERRUPT_MASK, + .hpd = DISP_INTERRUPT_STATUS_CONTINUE5__DC_HPD6_INTERRUPT_MASK +} }; + +static const u32 golden_settings_tonga_a11[] = +{ + mmDCI_CLK_CNTL, 0x00000080, 0x00000000, + mmFBC_DEBUG_COMP, 0x000000f0, 0x00000070, + mmFBC_MISC, 0x1f311fff, 0x12300000, + mmHDMI_CONTROL, 0x31000111, 0x00000011, +}; + +static void dce_v10_0_init_golden_registers(struct amdgpu_device *adev) +{ + switch (adev->asic_type) { + case CHIP_TONGA: + amdgpu_program_register_sequence(adev, + golden_settings_tonga_a11, + (const u32)ARRAY_SIZE(golden_settings_tonga_a11)); + break; + default: + break; + } +} + +static u32 dce_v10_0_audio_endpt_rreg(struct amdgpu_device *adev, + u32 block_offset, u32 reg) +{ + unsigned long flags; + u32 r; + + spin_lock_irqsave(&adev->audio_endpt_idx_lock, flags); + WREG32(mmAZALIA_F0_CODEC_ENDPOINT_INDEX + block_offset, reg); + r = RREG32(mmAZALIA_F0_CODEC_ENDPOINT_DATA + block_offset); + spin_unlock_irqrestore(&adev->audio_endpt_idx_lock, flags); + + return r; +} + +static void dce_v10_0_audio_endpt_wreg(struct amdgpu_device *adev, + u32 block_offset, u32 reg, u32 v) +{ + unsigned long flags; + + spin_lock_irqsave(&adev->audio_endpt_idx_lock, flags); + WREG32(mmAZALIA_F0_CODEC_ENDPOINT_INDEX + block_offset, reg); + WREG32(mmAZALIA_F0_CODEC_ENDPOINT_DATA + block_offset, v); + spin_unlock_irqrestore(&adev->audio_endpt_idx_lock, flags); +} + +static bool dce_v10_0_is_in_vblank(struct amdgpu_device *adev, int crtc) +{ + if (RREG32(mmCRTC_STATUS + crtc_offsets[crtc]) & + CRTC_V_BLANK_START_END__CRTC_V_BLANK_START_MASK) + return true; + else + return false; +} + +static bool dce_v10_0_is_counter_moving(struct amdgpu_device *adev, int crtc) +{ + u32 pos1, pos2; + + pos1 = RREG32(mmCRTC_STATUS_POSITION + crtc_offsets[crtc]); + pos2 = RREG32(mmCRTC_STATUS_POSITION + crtc_offsets[crtc]); + + if (pos1 != pos2) + return true; + else + return false; +} + +/** + * dce_v10_0_vblank_wait - vblank wait asic callback. + * + * @adev: amdgpu_device pointer + * @crtc: crtc to wait for vblank on + * + * Wait for vblank on the requested crtc (evergreen+). + */ +static void dce_v10_0_vblank_wait(struct amdgpu_device *adev, int crtc) +{ + unsigned i = 0; + + if (crtc >= adev->mode_info.num_crtc) + return; + + if (!(RREG32(mmCRTC_CONTROL + crtc_offsets[crtc]) & CRTC_CONTROL__CRTC_MASTER_EN_MASK)) + return; + + /* depending on when we hit vblank, we may be close to active; if so, + * wait for another frame. + */ + while (dce_v10_0_is_in_vblank(adev, crtc)) { + if (i++ % 100 == 0) { + if (!dce_v10_0_is_counter_moving(adev, crtc)) + break; + } + } + + while (!dce_v10_0_is_in_vblank(adev, crtc)) { + if (i++ % 100 == 0) { + if (!dce_v10_0_is_counter_moving(adev, crtc)) + break; + } + } +} + +static u32 dce_v10_0_vblank_get_counter(struct amdgpu_device *adev, int crtc) +{ + if (crtc >= adev->mode_info.num_crtc) + return 0; + else + return RREG32(mmCRTC_STATUS_FRAME_COUNT + crtc_offsets[crtc]); +} + +/** + * dce_v10_0_page_flip - pageflip callback. + * + * @adev: amdgpu_device pointer + * @crtc_id: crtc to cleanup pageflip on + * @crtc_base: new address of the crtc (GPU MC address) + * + * Does the actual pageflip (evergreen+). + * During vblank we take the crtc lock and wait for the update_pending + * bit to go high, when it does, we release the lock, and allow the + * double buffered update to take place. + * Returns the current update pending status. + */ +static void dce_v10_0_page_flip(struct amdgpu_device *adev, + int crtc_id, u64 crtc_base) +{ + struct amdgpu_crtc *amdgpu_crtc = adev->mode_info.crtcs[crtc_id]; + u32 tmp = RREG32(mmGRPH_UPDATE + amdgpu_crtc->crtc_offset); + int i; + + /* Lock the graphics update lock */ + tmp = REG_SET_FIELD(tmp, GRPH_UPDATE, GRPH_UPDATE_LOCK, 1); + WREG32(mmGRPH_UPDATE + amdgpu_crtc->crtc_offset, tmp); + + /* update the scanout addresses */ + WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset, + upper_32_bits(crtc_base)); + WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset, + lower_32_bits(crtc_base)); + + WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset, + upper_32_bits(crtc_base)); + WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset, + lower_32_bits(crtc_base)); + + /* Wait for update_pending to go high. */ + for (i = 0; i < adev->usec_timeout; i++) { + if (RREG32(mmGRPH_UPDATE + amdgpu_crtc->crtc_offset) & + GRPH_UPDATE__GRPH_SURFACE_UPDATE_PENDING_MASK) + break; + udelay(1); + } + DRM_DEBUG("Update pending now high. Unlocking vupdate_lock.\n"); + + /* Unlock the lock, so double-buffering can take place inside vblank */ + tmp = REG_SET_FIELD(tmp, GRPH_UPDATE, GRPH_UPDATE_LOCK, 0); + WREG32(mmGRPH_UPDATE + amdgpu_crtc->crtc_offset, tmp); +} + +static int dce_v10_0_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc, + u32 *vbl, u32 *position) +{ + if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc)) + return -EINVAL; + + *vbl = RREG32(mmCRTC_V_BLANK_START_END + crtc_offsets[crtc]); + *position = RREG32(mmCRTC_STATUS_POSITION + crtc_offsets[crtc]); + + return 0; +} + +/** + * dce_v10_0_hpd_sense - hpd sense callback. + * + * @adev: amdgpu_device pointer + * @hpd: hpd (hotplug detect) pin + * + * Checks if a digital monitor is connected (evergreen+). + * Returns true if connected, false if not connected. + */ +static bool dce_v10_0_hpd_sense(struct amdgpu_device *adev, + enum amdgpu_hpd_id hpd) +{ + int idx; + bool connected = false; + + switch (hpd) { + case AMDGPU_HPD_1: + idx = 0; + break; + case AMDGPU_HPD_2: + idx = 1; + break; + case AMDGPU_HPD_3: + idx = 2; + break; + case AMDGPU_HPD_4: + idx = 3; + break; + case AMDGPU_HPD_5: + idx = 4; + break; + case AMDGPU_HPD_6: + idx = 5; + break; + default: + return connected; + } + + if (RREG32(mmDC_HPD_INT_STATUS + hpd_offsets[idx]) & + DC_HPD_INT_STATUS__DC_HPD_SENSE_MASK) + connected = true; + + return connected; +} + +/** + * dce_v10_0_hpd_set_polarity - hpd set polarity callback. + * + * @adev: amdgpu_device pointer + * @hpd: hpd (hotplug detect) pin + * + * Set the polarity of the hpd pin (evergreen+). + */ +static void dce_v10_0_hpd_set_polarity(struct amdgpu_device *adev, + enum amdgpu_hpd_id hpd) +{ + u32 tmp; + bool connected = dce_v10_0_hpd_sense(adev, hpd); + int idx; + + switch (hpd) { + case AMDGPU_HPD_1: + idx = 0; + break; + case AMDGPU_HPD_2: + idx = 1; + break; + case AMDGPU_HPD_3: + idx = 2; + break; + case AMDGPU_HPD_4: + idx = 3; + break; + case AMDGPU_HPD_5: + idx = 4; + break; + case AMDGPU_HPD_6: + idx = 5; + break; + default: + return; + } + + tmp = RREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[idx]); + if (connected) + tmp = REG_SET_FIELD(tmp, DC_HPD_INT_CONTROL, DC_HPD_INT_POLARITY, 0); + else + tmp = REG_SET_FIELD(tmp, DC_HPD_INT_CONTROL, DC_HPD_INT_POLARITY, 1); + WREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[idx], tmp); +} + +/** + * dce_v10_0_hpd_init - hpd setup callback. + * + * @adev: amdgpu_device pointer + * + * Setup the hpd pins used by the card (evergreen+). + * Enable the pin, set the polarity, and enable the hpd interrupts. + */ +static void dce_v10_0_hpd_init(struct amdgpu_device *adev) +{ + struct drm_device *dev = adev->ddev; + struct drm_connector *connector; + u32 tmp; + int idx; + + list_for_each_entry(connector, &dev->mode_config.connector_list, head) { + struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector); + + if (connector->connector_type == DRM_MODE_CONNECTOR_eDP || + connector->connector_type == DRM_MODE_CONNECTOR_LVDS) { + /* don't try to enable hpd on eDP or LVDS avoid breaking the + * aux dp channel on imac and help (but not completely fix) + * https://bugzilla.redhat.com/show_bug.cgi?id=726143 + * also avoid interrupt storms during dpms. + */ + continue; + } + + switch (amdgpu_connector->hpd.hpd) { + case AMDGPU_HPD_1: + idx = 0; + break; + case AMDGPU_HPD_2: + idx = 1; + break; + case AMDGPU_HPD_3: + idx = 2; + break; + case AMDGPU_HPD_4: + idx = 3; + break; + case AMDGPU_HPD_5: + idx = 4; + break; + case AMDGPU_HPD_6: + idx = 5; + break; + default: + continue; + } + + tmp = RREG32(mmDC_HPD_CONTROL + hpd_offsets[idx]); + tmp = REG_SET_FIELD(tmp, DC_HPD_CONTROL, DC_HPD_EN, 1); + WREG32(mmDC_HPD_CONTROL + hpd_offsets[idx], tmp); + + tmp = RREG32(mmDC_HPD_TOGGLE_FILT_CNTL + hpd_offsets[idx]); + tmp = REG_SET_FIELD(tmp, DC_HPD_TOGGLE_FILT_CNTL, + DC_HPD_CONNECT_INT_DELAY, + AMDGPU_HPD_CONNECT_INT_DELAY_IN_MS); + tmp = REG_SET_FIELD(tmp, DC_HPD_TOGGLE_FILT_CNTL, + DC_HPD_DISCONNECT_INT_DELAY, + AMDGPU_HPD_DISCONNECT_INT_DELAY_IN_MS); + WREG32(mmDC_HPD_TOGGLE_FILT_CNTL + hpd_offsets[idx], tmp); + + dce_v10_0_hpd_set_polarity(adev, amdgpu_connector->hpd.hpd); + amdgpu_irq_get(adev, &adev->hpd_irq, + amdgpu_connector->hpd.hpd); + } +} + +/** + * dce_v10_0_hpd_fini - hpd tear down callback. + * + * @adev: amdgpu_device pointer + * + * Tear down the hpd pins used by the card (evergreen+). + * Disable the hpd interrupts. + */ +static void dce_v10_0_hpd_fini(struct amdgpu_device *adev) +{ + struct drm_device *dev = adev->ddev; + struct drm_connector *connector; + u32 tmp; + int idx; + + list_for_each_entry(connector, &dev->mode_config.connector_list, head) { + struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector); + + switch (amdgpu_connector->hpd.hpd) { + case AMDGPU_HPD_1: + idx = 0; + break; + case AMDGPU_HPD_2: + idx = 1; + break; + case AMDGPU_HPD_3: + idx = 2; + break; + case AMDGPU_HPD_4: + idx = 3; + break; + case AMDGPU_HPD_5: + idx = 4; + break; + case AMDGPU_HPD_6: + idx = 5; + break; + default: + continue; + } + + tmp = RREG32(mmDC_HPD_CONTROL + hpd_offsets[idx]); + tmp = REG_SET_FIELD(tmp, DC_HPD_CONTROL, DC_HPD_EN, 0); + WREG32(mmDC_HPD_CONTROL + hpd_offsets[idx], tmp); + + amdgpu_irq_put(adev, &adev->hpd_irq, + amdgpu_connector->hpd.hpd); + } +} + +static u32 dce_v10_0_hpd_get_gpio_reg(struct amdgpu_device *adev) +{ + return mmDC_GPIO_HPD_A; +} + +static bool dce_v10_0_is_display_hung(struct amdgpu_device *adev) +{ + u32 crtc_hung = 0; + u32 crtc_status[6]; + u32 i, j, tmp; + + for (i = 0; i < adev->mode_info.num_crtc; i++) { + tmp = RREG32(mmCRTC_CONTROL + crtc_offsets[i]); + if (REG_GET_FIELD(tmp, CRTC_CONTROL, CRTC_MASTER_EN)) { + crtc_status[i] = RREG32(mmCRTC_STATUS_HV_COUNT + crtc_offsets[i]); + crtc_hung |= (1 << i); + } + } + + for (j = 0; j < 10; j++) { + for (i = 0; i < adev->mode_info.num_crtc; i++) { + if (crtc_hung & (1 << i)) { + tmp = RREG32(mmCRTC_STATUS_HV_COUNT + crtc_offsets[i]); + if (tmp != crtc_status[i]) + crtc_hung &= ~(1 << i); + } + } + if (crtc_hung == 0) + return false; + udelay(100); + } + + return true; +} + +static void dce_v10_0_stop_mc_access(struct amdgpu_device *adev, + struct amdgpu_mode_mc_save *save) +{ + u32 crtc_enabled, tmp; + int i; + + save->vga_render_control = RREG32(mmVGA_RENDER_CONTROL); + save->vga_hdp_control = RREG32(mmVGA_HDP_CONTROL); + + /* disable VGA render */ + tmp = RREG32(mmVGA_RENDER_CONTROL); + tmp = REG_SET_FIELD(tmp, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 0); + WREG32(mmVGA_RENDER_CONTROL, tmp); + + /* blank the display controllers */ + for (i = 0; i < adev->mode_info.num_crtc; i++) { + crtc_enabled = REG_GET_FIELD(RREG32(mmCRTC_CONTROL + crtc_offsets[i]), + CRTC_CONTROL, CRTC_MASTER_EN); + if (crtc_enabled) { +#if 0 + u32 frame_count; + int j; + + save->crtc_enabled[i] = true; + tmp = RREG32(mmCRTC_BLANK_CONTROL + crtc_offsets[i]); + if (REG_GET_FIELD(tmp, CRTC_BLANK_CONTROL, CRTC_BLANK_DATA_EN) == 0) { + amdgpu_display_vblank_wait(adev, i); + WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 1); + tmp = REG_SET_FIELD(tmp, CRTC_BLANK_CONTROL, CRTC_BLANK_DATA_EN, 1); + WREG32(mmCRTC_BLANK_CONTROL + crtc_offsets[i], tmp); + WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 0); + } + /* wait for the next frame */ + frame_count = amdgpu_display_vblank_get_counter(adev, i); + for (j = 0; j < adev->usec_timeout; j++) { + if (amdgpu_display_vblank_get_counter(adev, i) != frame_count) + break; + udelay(1); + } + tmp = RREG32(mmGRPH_UPDATE + crtc_offsets[i]); + if (REG_GET_FIELD(tmp, GRPH_UPDATE, GRPH_UPDATE_LOCK) == 0) { + tmp = REG_SET_FIELD(tmp, GRPH_UPDATE, GRPH_UPDATE_LOCK, 1); + WREG32(mmGRPH_UPDATE + crtc_offsets[i], tmp); + } + tmp = RREG32(mmMASTER_UPDATE_LOCK + crtc_offsets[i]); + if (REG_GET_FIELD(tmp, MASTER_UPDATE_LOCK, MASTER_UPDATE_LOCK) == 0) { + tmp = REG_SET_FIELD(tmp, MASTER_UPDATE_LOCK, MASTER_UPDATE_LOCK, 1); + WREG32(mmMASTER_UPDATE_LOCK + crtc_offsets[i], tmp); + } +#else + /* XXX this is a hack to avoid strange behavior with EFI on certain systems */ + WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 1); + tmp = RREG32(mmCRTC_CONTROL + crtc_offsets[i]); + tmp = REG_SET_FIELD(tmp, CRTC_CONTROL, CRTC_MASTER_EN, 0); + WREG32(mmCRTC_CONTROL + crtc_offsets[i], tmp); + WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 0); + save->crtc_enabled[i] = false; + /* ***** */ +#endif + } else { + save->crtc_enabled[i] = false; + } + } +} + +static void dce_v10_0_resume_mc_access(struct amdgpu_device *adev, + struct amdgpu_mode_mc_save *save) +{ + u32 tmp, frame_count; + int i, j; + + /* update crtc base addresses */ + for (i = 0; i < adev->mode_info.num_crtc; i++) { + WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + crtc_offsets[i], + upper_32_bits(adev->mc.vram_start)); + WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS_HIGH + crtc_offsets[i], + upper_32_bits(adev->mc.vram_start)); + WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + crtc_offsets[i], + (u32)adev->mc.vram_start); + WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS + crtc_offsets[i], + (u32)adev->mc.vram_start); + + if (save->crtc_enabled[i]) { + tmp = RREG32(mmMASTER_UPDATE_MODE + crtc_offsets[i]); + if (REG_GET_FIELD(tmp, MASTER_UPDATE_MODE, MASTER_UPDATE_MODE) != 3) { + tmp = REG_SET_FIELD(tmp, MASTER_UPDATE_MODE, MASTER_UPDATE_MODE, 3); + WREG32(mmMASTER_UPDATE_MODE + crtc_offsets[i], tmp); + } + tmp = RREG32(mmGRPH_UPDATE + crtc_offsets[i]); + if (REG_GET_FIELD(tmp, GRPH_UPDATE, GRPH_UPDATE_LOCK)) { + tmp = REG_SET_FIELD(tmp, GRPH_UPDATE, GRPH_UPDATE_LOCK, 0); + WREG32(mmGRPH_UPDATE + crtc_offsets[i], tmp); + } + tmp = RREG32(mmMASTER_UPDATE_LOCK + crtc_offsets[i]); + if (REG_GET_FIELD(tmp, MASTER_UPDATE_LOCK, MASTER_UPDATE_LOCK)) { + tmp = REG_SET_FIELD(tmp, MASTER_UPDATE_LOCK, MASTER_UPDATE_LOCK, 0); + WREG32(mmMASTER_UPDATE_LOCK + crtc_offsets[i], tmp); + } + for (j = 0; j < adev->usec_timeout; j++) { + tmp = RREG32(mmGRPH_UPDATE + crtc_offsets[i]); + if (REG_GET_FIELD(tmp, GRPH_UPDATE, GRPH_SURFACE_UPDATE_PENDING) == 0) + break; + udelay(1); + } + tmp = RREG32(mmCRTC_BLANK_CONTROL + crtc_offsets[i]); + tmp = REG_SET_FIELD(tmp, CRTC_BLANK_CONTROL, CRTC_BLANK_DATA_EN, 0); + WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 1); + WREG32(mmCRTC_BLANK_CONTROL + crtc_offsets[i], tmp); + WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 0); + /* wait for the next frame */ + frame_count = amdgpu_display_vblank_get_counter(adev, i); + for (j = 0; j < adev->usec_timeout; j++) { + if (amdgpu_display_vblank_get_counter(adev, i) != frame_count) + break; + udelay(1); + } + } + } + + WREG32(mmVGA_MEMORY_BASE_ADDRESS_HIGH, upper_32_bits(adev->mc.vram_start)); + WREG32(mmVGA_MEMORY_BASE_ADDRESS, lower_32_bits(adev->mc.vram_start)); + + /* Unlock vga access */ + WREG32(mmVGA_HDP_CONTROL, save->vga_hdp_control); + mdelay(1); + WREG32(mmVGA_RENDER_CONTROL, save->vga_render_control); +} + +static void dce_v10_0_set_vga_render_state(struct amdgpu_device *adev, + bool render) +{ + u32 tmp; + + /* Lockout access through VGA aperture*/ + tmp = RREG32(mmVGA_HDP_CONTROL); + if (render) + tmp = REG_SET_FIELD(tmp, VGA_HDP_CONTROL, VGA_MEMORY_DISABLE, 0); + else + tmp = REG_SET_FIELD(tmp, VGA_HDP_CONTROL, VGA_MEMORY_DISABLE, 1); + WREG32(mmVGA_HDP_CONTROL, tmp); + + /* disable VGA render */ + tmp = RREG32(mmVGA_RENDER_CONTROL); + if (render) + tmp = REG_SET_FIELD(tmp, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 1); + else + tmp = REG_SET_FIELD(tmp, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 0); + WREG32(mmVGA_RENDER_CONTROL, tmp); +} + +static void dce_v10_0_program_fmt(struct drm_encoder *encoder) +{ + struct drm_device *dev = encoder->dev; + struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); + struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(encoder->crtc); + struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder); + int bpc = 0; + u32 tmp = 0; + enum amdgpu_connector_dither dither = AMDGPU_FMT_DITHER_DISABLE; + + if (connector) { + struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector); + bpc = amdgpu_connector_get_monitor_bpc(connector); + dither = amdgpu_connector->dither; + } + + /* LVDS/eDP FMT is set up by atom */ + if (amdgpu_encoder->devices & ATOM_DEVICE_LCD_SUPPORT) + return; + + /* not needed for analog */ + if ((amdgpu_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1) || + (amdgpu_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2)) + return; + + if (bpc == 0) + return; + + switch (bpc) { + case 6: + if (dither == AMDGPU_FMT_DITHER_ENABLE) { + /* XXX sort out optimal dither settings */ + tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_FRAME_RANDOM_ENABLE, 1); + tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_HIGHPASS_RANDOM_ENABLE, 1); + tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_SPATIAL_DITHER_EN, 1); + tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_SPATIAL_DITHER_DEPTH, 0); + } else { + tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_TRUNCATE_EN, 1); + tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_TRUNCATE_DEPTH, 0); + } + break; + case 8: + if (dither == AMDGPU_FMT_DITHER_ENABLE) { + /* XXX sort out optimal dither settings */ + tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_FRAME_RANDOM_ENABLE, 1); + tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_HIGHPASS_RANDOM_ENABLE, 1); + tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_RGB_RANDOM_ENABLE, 1); + tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_SPATIAL_DITHER_EN, 1); + tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_SPATIAL_DITHER_DEPTH, 1); + } else { + tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_TRUNCATE_EN, 1); + tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_TRUNCATE_DEPTH, 1); + } + break; + case 10: + if (dither == AMDGPU_FMT_DITHER_ENABLE) { + /* XXX sort out optimal dither settings */ + tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_FRAME_RANDOM_ENABLE, 1); + tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_HIGHPASS_RANDOM_ENABLE, 1); + tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_RGB_RANDOM_ENABLE, 1); + tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_SPATIAL_DITHER_EN, 1); + tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_SPATIAL_DITHER_DEPTH, 2); + } else { + tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_TRUNCATE_EN, 1); + tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_TRUNCATE_DEPTH, 2); + } + break; + default: + /* not needed */ + break; + } + + WREG32(mmFMT_BIT_DEPTH_CONTROL + amdgpu_crtc->crtc_offset, tmp); +} + + +/* display watermark setup */ +/** + * dce_v10_0_line_buffer_adjust - Set up the line buffer + * + * @adev: amdgpu_device pointer + * @amdgpu_crtc: the selected display controller + * @mode: the current display mode on the selected display + * controller + * + * Setup up the line buffer allocation for + * the selected display controller (CIK). + * Returns the line buffer size in pixels. + */ +static u32 dce_v10_0_line_buffer_adjust(struct amdgpu_device *adev, + struct amdgpu_crtc *amdgpu_crtc, + struct drm_display_mode *mode) +{ + u32 tmp, buffer_alloc, i, mem_cfg; + u32 pipe_offset = amdgpu_crtc->crtc_id; + /* + * Line Buffer Setup + * There are 6 line buffers, one for each display controllers. + * There are 3 partitions per LB. Select the number of partitions + * to enable based on the display width. For display widths larger + * than 4096, you need use to use 2 display controllers and combine + * them using the stereo blender. + */ + if (amdgpu_crtc->base.enabled && mode) { + if (mode->crtc_hdisplay < 1920) { + mem_cfg = 1; + buffer_alloc = 2; + } else if (mode->crtc_hdisplay < 2560) { + mem_cfg = 2; + buffer_alloc = 2; + } else if (mode->crtc_hdisplay < 4096) { + mem_cfg = 0; + buffer_alloc = (adev->flags & AMDGPU_IS_APU) ? 2 : 4; + } else { + DRM_DEBUG_KMS("Mode too big for LB!\n"); + mem_cfg = 0; + buffer_alloc = (adev->flags & AMDGPU_IS_APU) ? 2 : 4; + } + } else { + mem_cfg = 1; + buffer_alloc = 0; + } + + tmp = RREG32(mmLB_MEMORY_CTRL + amdgpu_crtc->crtc_offset); + tmp = REG_SET_FIELD(tmp, LB_MEMORY_CTRL, LB_MEMORY_CONFIG, mem_cfg); + WREG32(mmLB_MEMORY_CTRL + amdgpu_crtc->crtc_offset, tmp); + + tmp = RREG32(mmPIPE0_DMIF_BUFFER_CONTROL + pipe_offset); + tmp = REG_SET_FIELD(tmp, PIPE0_DMIF_BUFFER_CONTROL, DMIF_BUFFERS_ALLOCATED, buffer_alloc); + WREG32(mmPIPE0_DMIF_BUFFER_CONTROL + pipe_offset, tmp); + + for (i = 0; i < adev->usec_timeout; i++) { + tmp = RREG32(mmPIPE0_DMIF_BUFFER_CONTROL + pipe_offset); + if (REG_GET_FIELD(tmp, PIPE0_DMIF_BUFFER_CONTROL, DMIF_BUFFERS_ALLOCATION_COMPLETED)) + break; + udelay(1); + } + + if (amdgpu_crtc->base.enabled && mode) { + switch (mem_cfg) { + case 0: + default: + return 4096 * 2; + case 1: + return 1920 * 2; + case 2: + return 2560 * 2; + } + } + + /* controller not enabled, so no lb used */ + return 0; +} + +/** + * cik_get_number_of_dram_channels - get the number of dram channels + * + * @adev: amdgpu_device pointer + * + * Look up the number of video ram channels (CIK). + * Used for display watermark bandwidth calculations + * Returns the number of dram channels + */ +static u32 cik_get_number_of_dram_channels(struct amdgpu_device *adev) +{ + u32 tmp = RREG32(mmMC_SHARED_CHMAP); + + switch (REG_GET_FIELD(tmp, MC_SHARED_CHMAP, NOOFCHAN)) { + case 0: + default: + return 1; + case 1: + return 2; + case 2: + return 4; + case 3: + return 8; + case 4: + return 3; + case 5: + return 6; + case 6: + return 10; + case 7: + return 12; + case 8: + return 16; + } +} + +struct dce10_wm_params { + u32 dram_channels; /* number of dram channels */ + u32 yclk; /* bandwidth per dram data pin in kHz */ + u32 sclk; /* engine clock in kHz */ + u32 disp_clk; /* display clock in kHz */ + u32 src_width; /* viewport width */ + u32 active_time; /* active display time in ns */ + u32 blank_time; /* blank time in ns */ + bool interlaced; /* mode is interlaced */ + fixed20_12 vsc; /* vertical scale ratio */ + u32 num_heads; /* number of active crtcs */ + u32 bytes_per_pixel; /* bytes per pixel display + overlay */ + u32 lb_size; /* line buffer allocated to pipe */ + u32 vtaps; /* vertical scaler taps */ +}; + +/** + * dce_v10_0_dram_bandwidth - get the dram bandwidth + * + * @wm: watermark calculation data + * + * Calculate the raw dram bandwidth (CIK). + * Used for display watermark bandwidth calculations + * Returns the dram bandwidth in MBytes/s + */ +static u32 dce_v10_0_dram_bandwidth(struct dce10_wm_params *wm) +{ + /* Calculate raw DRAM Bandwidth */ + fixed20_12 dram_efficiency; /* 0.7 */ + fixed20_12 yclk, dram_channels, bandwidth; + fixed20_12 a; + + a.full = dfixed_const(1000); + yclk.full = dfixed_const(wm->yclk); + yclk.full = dfixed_div(yclk, a); + dram_channels.full = dfixed_const(wm->dram_channels * 4); + a.full = dfixed_const(10); + dram_efficiency.full = dfixed_const(7); + dram_efficiency.full = dfixed_div(dram_efficiency, a); + bandwidth.full = dfixed_mul(dram_channels, yclk); + bandwidth.full = dfixed_mul(bandwidth, dram_efficiency); + + return dfixed_trunc(bandwidth); +} + +/** + * dce_v10_0_dram_bandwidth_for_display - get the dram bandwidth for display + * + * @wm: watermark calculation data + * + * Calculate the dram bandwidth used for display (CIK). + * Used for display watermark bandwidth calculations + * Returns the dram bandwidth for display in MBytes/s + */ +static u32 dce_v10_0_dram_bandwidth_for_display(struct dce10_wm_params *wm) +{ + /* Calculate DRAM Bandwidth and the part allocated to display. */ + fixed20_12 disp_dram_allocation; /* 0.3 to 0.7 */ + fixed20_12 yclk, dram_channels, bandwidth; + fixed20_12 a; + + a.full = dfixed_const(1000); + yclk.full = dfixed_const(wm->yclk); + yclk.full = dfixed_div(yclk, a); + dram_channels.full = dfixed_const(wm->dram_channels * 4); + a.full = dfixed_const(10); + disp_dram_allocation.full = dfixed_const(3); /* XXX worse case value 0.3 */ + disp_dram_allocation.full = dfixed_div(disp_dram_allocation, a); + bandwidth.full = dfixed_mul(dram_channels, yclk); + bandwidth.full = dfixed_mul(bandwidth, disp_dram_allocation); + + return dfixed_trunc(bandwidth); +} + +/** + * dce_v10_0_data_return_bandwidth - get the data return bandwidth + * + * @wm: watermark calculation data + * + * Calculate the data return bandwidth used for display (CIK). + * Used for display watermark bandwidth calculations + * Returns the data return bandwidth in MBytes/s + */ +static u32 dce_v10_0_data_return_bandwidth(struct dce10_wm_params *wm) +{ + /* Calculate the display Data return Bandwidth */ + fixed20_12 return_efficiency; /* 0.8 */ + fixed20_12 sclk, bandwidth; + fixed20_12 a; + + a.full = dfixed_const(1000); + sclk.full = dfixed_const(wm->sclk); + sclk.full = dfixed_div(sclk, a); + a.full = dfixed_const(10); + return_efficiency.full = dfixed_const(8); + return_efficiency.full = dfixed_div(return_efficiency, a); + a.full = dfixed_const(32); + bandwidth.full = dfixed_mul(a, sclk); + bandwidth.full = dfixed_mul(bandwidth, return_efficiency); + + return dfixed_trunc(bandwidth); +} + +/** + * dce_v10_0_dmif_request_bandwidth - get the dmif bandwidth + * + * @wm: watermark calculation data + * + * Calculate the dmif bandwidth used for display (CIK). + * Used for display watermark bandwidth calculations + * Returns the dmif bandwidth in MBytes/s + */ +static u32 dce_v10_0_dmif_request_bandwidth(struct dce10_wm_params *wm) +{ + /* Calculate the DMIF Request Bandwidth */ + fixed20_12 disp_clk_request_efficiency; /* 0.8 */ + fixed20_12 disp_clk, bandwidth; + fixed20_12 a, b; + + a.full = dfixed_const(1000); + disp_clk.full = dfixed_const(wm->disp_clk); + disp_clk.full = dfixed_div(disp_clk, a); + a.full = dfixed_const(32); + b.full = dfixed_mul(a, disp_clk); + + a.full = dfixed_const(10); + disp_clk_request_efficiency.full = dfixed_const(8); + disp_clk_request_efficiency.full = dfixed_div(disp_clk_request_efficiency, a); + + bandwidth.full = dfixed_mul(b, disp_clk_request_efficiency); + + return dfixed_trunc(bandwidth); +} + +/** + * dce_v10_0_available_bandwidth - get the min available bandwidth + * + * @wm: watermark calculation data + * + * Calculate the min available bandwidth used for display (CIK). + * Used for display watermark bandwidth calculations + * Returns the min available bandwidth in MBytes/s + */ +static u32 dce_v10_0_available_bandwidth(struct dce10_wm_params *wm) +{ + /* Calculate the Available bandwidth. Display can use this temporarily but not in average. */ + u32 dram_bandwidth = dce_v10_0_dram_bandwidth(wm); + u32 data_return_bandwidth = dce_v10_0_data_return_bandwidth(wm); + u32 dmif_req_bandwidth = dce_v10_0_dmif_request_bandwidth(wm); + + return min(dram_bandwidth, min(data_return_bandwidth, dmif_req_bandwidth)); +} + +/** + * dce_v10_0_average_bandwidth - get the average available bandwidth + * + * @wm: watermark calculation data + * + * Calculate the average available bandwidth used for display (CIK). + * Used for display watermark bandwidth calculations + * Returns the average available bandwidth in MBytes/s + */ +static u32 dce_v10_0_average_bandwidth(struct dce10_wm_params *wm) +{ + /* Calculate the display mode Average Bandwidth + * DisplayMode should contain the source and destination dimensions, + * timing, etc. + */ + fixed20_12 bpp; + fixed20_12 line_time; + fixed20_12 src_width; + fixed20_12 bandwidth; + fixed20_12 a; + + a.full = dfixed_const(1000); + line_time.full = dfixed_const(wm->active_time + wm->blank_time); + line_time.full = dfixed_div(line_time, a); + bpp.full = dfixed_const(wm->bytes_per_pixel); + src_width.full = dfixed_const(wm->src_width); + bandwidth.full = dfixed_mul(src_width, bpp); + bandwidth.full = dfixed_mul(bandwidth, wm->vsc); + bandwidth.full = dfixed_div(bandwidth, line_time); + + return dfixed_trunc(bandwidth); +} + +/** + * dce_v10_0_latency_watermark - get the latency watermark + * + * @wm: watermark calculation data + * + * Calculate the latency watermark (CIK). + * Used for display watermark bandwidth calculations + * Returns the latency watermark in ns + */ +static u32 dce_v10_0_latency_watermark(struct dce10_wm_params *wm) +{ + /* First calculate the latency in ns */ + u32 mc_latency = 2000; /* 2000 ns. */ + u32 available_bandwidth = dce_v10_0_available_bandwidth(wm); + u32 worst_chunk_return_time = (512 * 8 * 1000) / available_bandwidth; + u32 cursor_line_pair_return_time = (128 * 4 * 1000) / available_bandwidth; + u32 dc_latency = 40000000 / wm->disp_clk; /* dc pipe latency */ + u32 other_heads_data_return_time = ((wm->num_heads + 1) * worst_chunk_return_time) + + (wm->num_heads * cursor_line_pair_return_time); + u32 latency = mc_latency + other_heads_data_return_time + dc_latency; + u32 max_src_lines_per_dst_line, lb_fill_bw, line_fill_time; + u32 tmp, dmif_size = 12288; + fixed20_12 a, b, c; + + if (wm->num_heads == 0) + return 0; + + a.full = dfixed_const(2); + b.full = dfixed_const(1); + if ((wm->vsc.full > a.full) || + ((wm->vsc.full > b.full) && (wm->vtaps >= 3)) || + (wm->vtaps >= 5) || + ((wm->vsc.full >= a.full) && wm->interlaced)) + max_src_lines_per_dst_line = 4; + else + max_src_lines_per_dst_line = 2; + + a.full = dfixed_const(available_bandwidth); + b.full = dfixed_const(wm->num_heads); + a.full = dfixed_div(a, b); + + b.full = dfixed_const(mc_latency + 512); + c.full = dfixed_const(wm->disp_clk); + b.full = dfixed_div(b, c); + + c.full = dfixed_const(dmif_size); + b.full = dfixed_div(c, b); + + tmp = min(dfixed_trunc(a), dfixed_trunc(b)); + + b.full = dfixed_const(1000); + c.full = dfixed_const(wm->disp_clk); + b.full = dfixed_div(c, b); + c.full = dfixed_const(wm->bytes_per_pixel); + b.full = dfixed_mul(b, c); + + lb_fill_bw = min(tmp, dfixed_trunc(b)); + + a.full = dfixed_const(max_src_lines_per_dst_line * wm->src_width * wm->bytes_per_pixel); + b.full = dfixed_const(1000); + c.full = dfixed_const(lb_fill_bw); + b.full = dfixed_div(c, b); + a.full = dfixed_div(a, b); + line_fill_time = dfixed_trunc(a); + + if (line_fill_time < wm->active_time) + return latency; + else + return latency + (line_fill_time - wm->active_time); + +} + +/** + * dce_v10_0_average_bandwidth_vs_dram_bandwidth_for_display - check + * average and available dram bandwidth + * + * @wm: watermark calculation data + * + * Check if the display average bandwidth fits in the display + * dram bandwidth (CIK). + * Used for display watermark bandwidth calculations + * Returns true if the display fits, false if not. + */ +static bool dce_v10_0_average_bandwidth_vs_dram_bandwidth_for_display(struct dce10_wm_params *wm) +{ + if (dce_v10_0_average_bandwidth(wm) <= + (dce_v10_0_dram_bandwidth_for_display(wm) / wm->num_heads)) + return true; + else + return false; +} + +/** + * dce_v10_0_average_bandwidth_vs_available_bandwidth - check + * average and available bandwidth + * + * @wm: watermark calculation data + * + * Check if the display average bandwidth fits in the display + * available bandwidth (CIK). + * Used for display watermark bandwidth calculations + * Returns true if the display fits, false if not. + */ +static bool dce_v10_0_average_bandwidth_vs_available_bandwidth(struct dce10_wm_params *wm) +{ + if (dce_v10_0_average_bandwidth(wm) <= + (dce_v10_0_available_bandwidth(wm) / wm->num_heads)) + return true; + else + return false; +} + +/** + * dce_v10_0_check_latency_hiding - check latency hiding + * + * @wm: watermark calculation data + * + * Check latency hiding (CIK). + * Used for display watermark bandwidth calculations + * Returns true if the display fits, false if not. + */ +static bool dce_v10_0_check_latency_hiding(struct dce10_wm_params *wm) +{ + u32 lb_partitions = wm->lb_size / wm->src_width; + u32 line_time = wm->active_time + wm->blank_time; + u32 latency_tolerant_lines; + u32 latency_hiding; + fixed20_12 a; + + a.full = dfixed_const(1); + if (wm->vsc.full > a.full) + latency_tolerant_lines = 1; + else { + if (lb_partitions <= (wm->vtaps + 1)) + latency_tolerant_lines = 1; + else + latency_tolerant_lines = 2; + } + + latency_hiding = (latency_tolerant_lines * line_time + wm->blank_time); + + if (dce_v10_0_latency_watermark(wm) <= latency_hiding) + return true; + else + return false; +} + +/** + * dce_v10_0_program_watermarks - program display watermarks + * + * @adev: amdgpu_device pointer + * @amdgpu_crtc: the selected display controller + * @lb_size: line buffer size + * @num_heads: number of display controllers in use + * + * Calculate and program the display watermarks for the + * selected display controller (CIK). + */ +static void dce_v10_0_program_watermarks(struct amdgpu_device *adev, + struct amdgpu_crtc *amdgpu_crtc, + u32 lb_size, u32 num_heads) +{ + struct drm_display_mode *mode = &amdgpu_crtc->base.mode; + struct dce10_wm_params wm_low, wm_high; + u32 pixel_period; + u32 line_time = 0; + u32 latency_watermark_a = 0, latency_watermark_b = 0; + u32 tmp, wm_mask; + + if (amdgpu_crtc->base.enabled && num_heads && mode) { + pixel_period = 1000000 / (u32)mode->clock; + line_time = min((u32)mode->crtc_htotal * pixel_period, (u32)65535); + + /* watermark for high clocks */ + if (adev->pm.dpm_enabled) { + wm_high.yclk = + amdgpu_dpm_get_mclk(adev, false) * 10; + wm_high.sclk = + amdgpu_dpm_get_sclk(adev, false) * 10; + } else { + wm_high.yclk = adev->pm.current_mclk * 10; + wm_high.sclk = adev->pm.current_sclk * 10; + } + + wm_high.disp_clk = mode->clock; + wm_high.src_width = mode->crtc_hdisplay; + wm_high.active_time = mode->crtc_hdisplay * pixel_period; + wm_high.blank_time = line_time - wm_high.active_time; + wm_high.interlaced = false; + if (mode->flags & DRM_MODE_FLAG_INTERLACE) + wm_high.interlaced = true; + wm_high.vsc = amdgpu_crtc->vsc; + wm_high.vtaps = 1; + if (amdgpu_crtc->rmx_type != RMX_OFF) + wm_high.vtaps = 2; + wm_high.bytes_per_pixel = 4; /* XXX: get this from fb config */ + wm_high.lb_size = lb_size; + wm_high.dram_channels = cik_get_number_of_dram_channels(adev); + wm_high.num_heads = num_heads; + + /* set for high clocks */ + latency_watermark_a = min(dce_v10_0_latency_watermark(&wm_high), (u32)65535); + + /* possibly force display priority to high */ + /* should really do this at mode validation time... */ + if (!dce_v10_0_average_bandwidth_vs_dram_bandwidth_for_display(&wm_high) || + !dce_v10_0_average_bandwidth_vs_available_bandwidth(&wm_high) || + !dce_v10_0_check_latency_hiding(&wm_high) || + (adev->mode_info.disp_priority == 2)) { + DRM_DEBUG_KMS("force priority to high\n"); + } + + /* watermark for low clocks */ + if (adev->pm.dpm_enabled) { + wm_low.yclk = + amdgpu_dpm_get_mclk(adev, true) * 10; + wm_low.sclk = + amdgpu_dpm_get_sclk(adev, true) * 10; + } else { + wm_low.yclk = adev->pm.current_mclk * 10; + wm_low.sclk = adev->pm.current_sclk * 10; + } + + wm_low.disp_clk = mode->clock; + wm_low.src_width = mode->crtc_hdisplay; + wm_low.active_time = mode->crtc_hdisplay * pixel_period; + wm_low.blank_time = line_time - wm_low.active_time; + wm_low.interlaced = false; + if (mode->flags & DRM_MODE_FLAG_INTERLACE) + wm_low.interlaced = true; + wm_low.vsc = amdgpu_crtc->vsc; + wm_low.vtaps = 1; + if (amdgpu_crtc->rmx_type != RMX_OFF) + wm_low.vtaps = 2; + wm_low.bytes_per_pixel = 4; /* XXX: get this from fb config */ + wm_low.lb_size = lb_size; + wm_low.dram_channels = cik_get_number_of_dram_channels(adev); + wm_low.num_heads = num_heads; + + /* set for low clocks */ + latency_watermark_b = min(dce_v10_0_latency_watermark(&wm_low), (u32)65535); + + /* possibly force display priority to high */ + /* should really do this at mode validation time... */ + if (!dce_v10_0_average_bandwidth_vs_dram_bandwidth_for_display(&wm_low) || + !dce_v10_0_average_bandwidth_vs_available_bandwidth(&wm_low) || + !dce_v10_0_check_latency_hiding(&wm_low) || + (adev->mode_info.disp_priority == 2)) { + DRM_DEBUG_KMS("force priority to high\n"); + } + } + + /* select wm A */ + wm_mask = RREG32(mmDPG_WATERMARK_MASK_CONTROL + amdgpu_crtc->crtc_offset); + tmp = REG_SET_FIELD(wm_mask, DPG_WATERMARK_MASK_CONTROL, URGENCY_WATERMARK_MASK, 1); + WREG32(mmDPG_WATERMARK_MASK_CONTROL + amdgpu_crtc->crtc_offset, tmp); + tmp = RREG32(mmDPG_PIPE_URGENCY_CONTROL + amdgpu_crtc->crtc_offset); + tmp = REG_SET_FIELD(tmp, DPG_PIPE_URGENCY_CONTROL, URGENCY_LOW_WATERMARK, latency_watermark_a); + tmp = REG_SET_FIELD(tmp, DPG_PIPE_URGENCY_CONTROL, URGENCY_HIGH_WATERMARK, line_time); + WREG32(mmDPG_PIPE_URGENCY_CONTROL + amdgpu_crtc->crtc_offset, tmp); + /* select wm B */ + tmp = REG_SET_FIELD(wm_mask, DPG_WATERMARK_MASK_CONTROL, URGENCY_WATERMARK_MASK, 2); + WREG32(mmDPG_WATERMARK_MASK_CONTROL + amdgpu_crtc->crtc_offset, tmp); + tmp = RREG32(mmDPG_PIPE_URGENCY_CONTROL + amdgpu_crtc->crtc_offset); + tmp = REG_SET_FIELD(tmp, DPG_PIPE_URGENCY_CONTROL, URGENCY_LOW_WATERMARK, latency_watermark_a); + tmp = REG_SET_FIELD(tmp, DPG_PIPE_URGENCY_CONTROL, URGENCY_HIGH_WATERMARK, line_time); + WREG32(mmDPG_PIPE_URGENCY_CONTROL + amdgpu_crtc->crtc_offset, tmp); + /* restore original selection */ + WREG32(mmDPG_WATERMARK_MASK_CONTROL + amdgpu_crtc->crtc_offset, wm_mask); + + /* save values for DPM */ + amdgpu_crtc->line_time = line_time; + amdgpu_crtc->wm_high = latency_watermark_a; + amdgpu_crtc->wm_low = latency_watermark_b; +} + +/** + * dce_v10_0_bandwidth_update - program display watermarks + * + * @adev: amdgpu_device pointer + * + * Calculate and program the display watermarks and line + * buffer allocation (CIK). + */ +static void dce_v10_0_bandwidth_update(struct amdgpu_device *adev) +{ + struct drm_display_mode *mode = NULL; + u32 num_heads = 0, lb_size; + int i; + + amdgpu_update_display_priority(adev); + + for (i = 0; i < adev->mode_info.num_crtc; i++) { + if (adev->mode_info.crtcs[i]->base.enabled) + num_heads++; + } + for (i = 0; i < adev->mode_info.num_crtc; i++) { + mode = &adev->mode_info.crtcs[i]->base.mode; + lb_size = dce_v10_0_line_buffer_adjust(adev, adev->mode_info.crtcs[i], mode); + dce_v10_0_program_watermarks(adev, adev->mode_info.crtcs[i], + lb_size, num_heads); + } +} + +static void dce_v10_0_audio_get_connected_pins(struct amdgpu_device *adev) +{ + int i; + u32 offset, tmp; + + for (i = 0; i < adev->mode_info.audio.num_pins; i++) { + offset = adev->mode_info.audio.pin[i].offset; + tmp = RREG32_AUDIO_ENDPT(offset, + ixAZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT); + if (((tmp & + AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__PORT_CONNECTIVITY_MASK) >> + AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__PORT_CONNECTIVITY__SHIFT) == 1) + adev->mode_info.audio.pin[i].connected = false; + else + adev->mode_info.audio.pin[i].connected = true; + } +} + +static struct amdgpu_audio_pin *dce_v10_0_audio_get_pin(struct amdgpu_device *adev) +{ + int i; + + dce_v10_0_audio_get_connected_pins(adev); + + for (i = 0; i < adev->mode_info.audio.num_pins; i++) { + if (adev->mode_info.audio.pin[i].connected) + return &adev->mode_info.audio.pin[i]; + } + DRM_ERROR("No connected audio pins found!\n"); + return NULL; +} + +static void dce_v10_0_afmt_audio_select_pin(struct drm_encoder *encoder) +{ + struct amdgpu_device *adev = encoder->dev->dev_private; + struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); + struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; + u32 tmp; + + if (!dig || !dig->afmt || !dig->afmt->pin) + return; + + tmp = RREG32(mmAFMT_AUDIO_SRC_CONTROL + dig->afmt->offset); + tmp = REG_SET_FIELD(tmp, AFMT_AUDIO_SRC_CONTROL, AFMT_AUDIO_SRC_SELECT, dig->afmt->pin->id); + WREG32(mmAFMT_AUDIO_SRC_CONTROL + dig->afmt->offset, tmp); +} + +static void dce_v10_0_audio_write_latency_fields(struct drm_encoder *encoder, + struct drm_display_mode *mode) +{ + struct amdgpu_device *adev = encoder->dev->dev_private; + struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); + struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; + struct drm_connector *connector; + struct amdgpu_connector *amdgpu_connector = NULL; + u32 tmp; + int interlace = 0; + + if (!dig || !dig->afmt || !dig->afmt->pin) + return; + + list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) { + if (connector->encoder == encoder) { + amdgpu_connector = to_amdgpu_connector(connector); + break; + } + } + + if (!amdgpu_connector) { + DRM_ERROR("Couldn't find encoder's connector\n"); + return; + } + + if (mode->flags & DRM_MODE_FLAG_INTERLACE) + interlace = 1; + if (connector->latency_present[interlace]) { + tmp = REG_SET_FIELD(0, AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC, + VIDEO_LIPSYNC, connector->video_latency[interlace]); + tmp = REG_SET_FIELD(0, AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC, + AUDIO_LIPSYNC, connector->audio_latency[interlace]); + } else { + tmp = REG_SET_FIELD(0, AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC, + VIDEO_LIPSYNC, 0); + tmp = REG_SET_FIELD(0, AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC, + AUDIO_LIPSYNC, 0); + } + WREG32_AUDIO_ENDPT(dig->afmt->pin->offset, + ixAZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC, tmp); +} + +static void dce_v10_0_audio_write_speaker_allocation(struct drm_encoder *encoder) +{ + struct amdgpu_device *adev = encoder->dev->dev_private; + struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); + struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; + struct drm_connector *connector; + struct amdgpu_connector *amdgpu_connector = NULL; + u32 tmp; + u8 *sadb = NULL; + int sad_count; + + if (!dig || !dig->afmt || !dig->afmt->pin) + return; + + list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) { + if (connector->encoder == encoder) { + amdgpu_connector = to_amdgpu_connector(connector); + break; + } + } + + if (!amdgpu_connector) { + DRM_ERROR("Couldn't find encoder's connector\n"); + return; + } + + sad_count = drm_edid_to_speaker_allocation(amdgpu_connector_edid(connector), &sadb); + if (sad_count < 0) { + DRM_ERROR("Couldn't read Speaker Allocation Data Block: %d\n", sad_count); + sad_count = 0; + } + + /* program the speaker allocation */ + tmp = RREG32_AUDIO_ENDPT(dig->afmt->pin->offset, + ixAZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER); + tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER, + DP_CONNECTION, 0); + /* set HDMI mode */ + tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER, + HDMI_CONNECTION, 1); + if (sad_count) + tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER, + SPEAKER_ALLOCATION, sadb[0]); + else + tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER, + SPEAKER_ALLOCATION, 5); /* stereo */ + WREG32_AUDIO_ENDPT(dig->afmt->pin->offset, + ixAZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER, tmp); + + kfree(sadb); +} + +static void dce_v10_0_audio_write_sad_regs(struct drm_encoder *encoder) +{ + struct amdgpu_device *adev = encoder->dev->dev_private; + struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); + struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; + struct drm_connector *connector; + struct amdgpu_connector *amdgpu_connector = NULL; + struct cea_sad *sads; + int i, sad_count; + + static const u16 eld_reg_to_type[][2] = { + { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0, HDMI_AUDIO_CODING_TYPE_PCM }, + { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1, HDMI_AUDIO_CODING_TYPE_AC3 }, + { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2, HDMI_AUDIO_CODING_TYPE_MPEG1 }, + { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3, HDMI_AUDIO_CODING_TYPE_MP3 }, + { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4, HDMI_AUDIO_CODING_TYPE_MPEG2 }, + { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5, HDMI_AUDIO_CODING_TYPE_AAC_LC }, + { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6, HDMI_AUDIO_CODING_TYPE_DTS }, + { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7, HDMI_AUDIO_CODING_TYPE_ATRAC }, + { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9, HDMI_AUDIO_CODING_TYPE_EAC3 }, + { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10, HDMI_AUDIO_CODING_TYPE_DTS_HD }, + { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11, HDMI_AUDIO_CODING_TYPE_MLP }, + { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13, HDMI_AUDIO_CODING_TYPE_WMA_PRO }, + }; + + if (!dig || !dig->afmt || !dig->afmt->pin) + return; + + list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) { + if (connector->encoder == encoder) { + amdgpu_connector = to_amdgpu_connector(connector); + break; + } + } + + if (!amdgpu_connector) { + DRM_ERROR("Couldn't find encoder's connector\n"); + return; + } + + sad_count = drm_edid_to_sad(amdgpu_connector_edid(connector), &sads); + if (sad_count <= 0) { + DRM_ERROR("Couldn't read SADs: %d\n", sad_count); + return; + } + BUG_ON(!sads); + + for (i = 0; i < ARRAY_SIZE(eld_reg_to_type); i++) { + u32 tmp = 0; + u8 stereo_freqs = 0; + int max_channels = -1; + int j; + + for (j = 0; j < sad_count; j++) { + struct cea_sad *sad = &sads[j]; + + if (sad->format == eld_reg_to_type[i][1]) { + if (sad->channels > max_channels) { + tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0, + MAX_CHANNELS, sad->channels); + tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0, + DESCRIPTOR_BYTE_2, sad->byte2); + tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0, + SUPPORTED_FREQUENCIES, sad->freq); + max_channels = sad->channels; + } + + if (sad->format == HDMI_AUDIO_CODING_TYPE_PCM) + stereo_freqs |= sad->freq; + else + break; + } + } + + tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0, + SUPPORTED_FREQUENCIES_STEREO, stereo_freqs); + WREG32_AUDIO_ENDPT(dig->afmt->pin->offset, eld_reg_to_type[i][0], tmp); + } + + kfree(sads); +} + +static void dce_v10_0_audio_enable(struct amdgpu_device *adev, + struct amdgpu_audio_pin *pin, + bool enable) +{ + if (!pin) + return; + + WREG32_AUDIO_ENDPT(pin->offset, ixAZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL, + enable ? AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__AUDIO_ENABLED_MASK : 0); +} + +static const u32 pin_offsets[] = +{ + AUD0_REGISTER_OFFSET, + AUD1_REGISTER_OFFSET, + AUD2_REGISTER_OFFSET, + AUD3_REGISTER_OFFSET, + AUD4_REGISTER_OFFSET, + AUD5_REGISTER_OFFSET, + AUD6_REGISTER_OFFSET, +}; + +static int dce_v10_0_audio_init(struct amdgpu_device *adev) +{ + int i; + + if (!amdgpu_audio) + return 0; + + adev->mode_info.audio.enabled = true; + + adev->mode_info.audio.num_pins = 7; + + for (i = 0; i < adev->mode_info.audio.num_pins; i++) { + adev->mode_info.audio.pin[i].channels = -1; + adev->mode_info.audio.pin[i].rate = -1; + adev->mode_info.audio.pin[i].bits_per_sample = -1; + adev->mode_info.audio.pin[i].status_bits = 0; + adev->mode_info.audio.pin[i].category_code = 0; + adev->mode_info.audio.pin[i].connected = false; + adev->mode_info.audio.pin[i].offset = pin_offsets[i]; + adev->mode_info.audio.pin[i].id = i; + /* disable audio. it will be set up later */ + /* XXX remove once we switch to ip funcs */ + dce_v10_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false); + } + + return 0; +} + +static void dce_v10_0_audio_fini(struct amdgpu_device *adev) +{ + int i; + + if (!adev->mode_info.audio.enabled) + return; + + for (i = 0; i < adev->mode_info.audio.num_pins; i++) + dce_v10_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false); + + adev->mode_info.audio.enabled = false; +} + +/* + * update the N and CTS parameters for a given pixel clock rate + */ +static void dce_v10_0_afmt_update_ACR(struct drm_encoder *encoder, uint32_t clock) +{ + struct drm_device *dev = encoder->dev; + struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_afmt_acr acr = amdgpu_afmt_acr(clock); + struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); + struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; + u32 tmp; + + tmp = RREG32(mmHDMI_ACR_32_0 + dig->afmt->offset); + tmp = REG_SET_FIELD(tmp, HDMI_ACR_32_0, HDMI_ACR_CTS_32, acr.cts_32khz); + WREG32(mmHDMI_ACR_32_0 + dig->afmt->offset, tmp); + tmp = RREG32(mmHDMI_ACR_32_1 + dig->afmt->offset); + tmp = REG_SET_FIELD(tmp, HDMI_ACR_32_1, HDMI_ACR_N_32, acr.n_32khz); + WREG32(mmHDMI_ACR_32_1 + dig->afmt->offset, tmp); + + tmp = RREG32(mmHDMI_ACR_44_0 + dig->afmt->offset); + tmp = REG_SET_FIELD(tmp, HDMI_ACR_44_0, HDMI_ACR_CTS_44, acr.cts_44_1khz); + WREG32(mmHDMI_ACR_44_0 + dig->afmt->offset, tmp); + tmp = RREG32(mmHDMI_ACR_44_1 + dig->afmt->offset); + tmp = REG_SET_FIELD(tmp, HDMI_ACR_44_1, HDMI_ACR_N_44, acr.n_44_1khz); + WREG32(mmHDMI_ACR_44_1 + dig->afmt->offset, tmp); + + tmp = RREG32(mmHDMI_ACR_48_0 + dig->afmt->offset); + tmp = REG_SET_FIELD(tmp, HDMI_ACR_48_0, HDMI_ACR_CTS_48, acr.cts_48khz); + WREG32(mmHDMI_ACR_48_0 + dig->afmt->offset, tmp); + tmp = RREG32(mmHDMI_ACR_48_1 + dig->afmt->offset); + tmp = REG_SET_FIELD(tmp, HDMI_ACR_48_1, HDMI_ACR_N_48, acr.n_48khz); + WREG32(mmHDMI_ACR_48_1 + dig->afmt->offset, tmp); + +} + +/* + * build a HDMI Video Info Frame + */ +static void dce_v10_0_afmt_update_avi_infoframe(struct drm_encoder *encoder, + void *buffer, size_t size) +{ + struct drm_device *dev = encoder->dev; + struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); + struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; + uint8_t *frame = buffer + 3; + uint8_t *header = buffer; + + WREG32(mmAFMT_AVI_INFO0 + dig->afmt->offset, + frame[0x0] | (frame[0x1] << 8) | (frame[0x2] << 16) | (frame[0x3] << 24)); + WREG32(mmAFMT_AVI_INFO1 + dig->afmt->offset, + frame[0x4] | (frame[0x5] << 8) | (frame[0x6] << 16) | (frame[0x7] << 24)); + WREG32(mmAFMT_AVI_INFO2 + dig->afmt->offset, + frame[0x8] | (frame[0x9] << 8) | (frame[0xA] << 16) | (frame[0xB] << 24)); + WREG32(mmAFMT_AVI_INFO3 + dig->afmt->offset, + frame[0xC] | (frame[0xD] << 8) | (header[1] << 24)); +} + +static void dce_v10_0_audio_set_dto(struct drm_encoder *encoder, u32 clock) +{ + struct drm_device *dev = encoder->dev; + struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); + struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; + struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(encoder->crtc); + u32 dto_phase = 24 * 1000; + u32 dto_modulo = clock; + u32 tmp; + + if (!dig || !dig->afmt) + return; + + /* XXX two dtos; generally use dto0 for hdmi */ + /* Express [24MHz / target pixel clock] as an exact rational + * number (coefficient of two integer numbers. DCCG_AUDIO_DTOx_PHASE + * is the numerator, DCCG_AUDIO_DTOx_MODULE is the denominator + */ + tmp = RREG32(mmDCCG_AUDIO_DTO_SOURCE); + tmp = REG_SET_FIELD(tmp, DCCG_AUDIO_DTO_SOURCE, DCCG_AUDIO_DTO0_SOURCE_SEL, + amdgpu_crtc->crtc_id); + WREG32(mmDCCG_AUDIO_DTO_SOURCE, tmp); + WREG32(mmDCCG_AUDIO_DTO0_PHASE, dto_phase); + WREG32(mmDCCG_AUDIO_DTO0_MODULE, dto_modulo); +} + +/* + * update the info frames with the data from the current display mode + */ +static void dce_v10_0_afmt_setmode(struct drm_encoder *encoder, + struct drm_display_mode *mode) +{ + struct drm_device *dev = encoder->dev; + struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); + struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; + struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder); + u8 buffer[HDMI_INFOFRAME_HEADER_SIZE + HDMI_AVI_INFOFRAME_SIZE]; + struct hdmi_avi_infoframe frame; + ssize_t err; + u32 tmp; + int bpc = 8; + + if (!dig || !dig->afmt) + return; + + /* Silent, r600_hdmi_enable will raise WARN for us */ + if (!dig->afmt->enabled) + return; + + /* hdmi deep color mode general control packets setup, if bpc > 8 */ + if (encoder->crtc) { + struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(encoder->crtc); + bpc = amdgpu_crtc->bpc; + } + + /* disable audio prior to setting up hw */ + dig->afmt->pin = dce_v10_0_audio_get_pin(adev); + dce_v10_0_audio_enable(adev, dig->afmt->pin, false); + + dce_v10_0_audio_set_dto(encoder, mode->clock); + + tmp = RREG32(mmHDMI_VBI_PACKET_CONTROL + dig->afmt->offset); + tmp = REG_SET_FIELD(tmp, HDMI_VBI_PACKET_CONTROL, HDMI_NULL_SEND, 1); + WREG32(mmHDMI_VBI_PACKET_CONTROL + dig->afmt->offset, tmp); /* send null packets when required */ + + WREG32(mmAFMT_AUDIO_CRC_CONTROL + dig->afmt->offset, 0x1000); + + tmp = RREG32(mmHDMI_CONTROL + dig->afmt->offset); + switch (bpc) { + case 0: + case 6: + case 8: + case 16: + default: + tmp = REG_SET_FIELD(tmp, HDMI_CONTROL, HDMI_DEEP_COLOR_ENABLE, 0); + tmp = REG_SET_FIELD(tmp, HDMI_CONTROL, HDMI_DEEP_COLOR_DEPTH, 0); + DRM_DEBUG("%s: Disabling hdmi deep color for %d bpc.\n", + connector->name, bpc); + break; + case 10: + tmp = REG_SET_FIELD(tmp, HDMI_CONTROL, HDMI_DEEP_COLOR_ENABLE, 1); + tmp = REG_SET_FIELD(tmp, HDMI_CONTROL, HDMI_DEEP_COLOR_DEPTH, 1); + DRM_DEBUG("%s: Enabling hdmi deep color 30 for 10 bpc.\n", + connector->name); + break; + case 12: + tmp = REG_SET_FIELD(tmp, HDMI_CONTROL, HDMI_DEEP_COLOR_ENABLE, 1); + tmp = REG_SET_FIELD(tmp, HDMI_CONTROL, HDMI_DEEP_COLOR_DEPTH, 2); + DRM_DEBUG("%s: Enabling hdmi deep color 36 for 12 bpc.\n", + connector->name); + break; + } + WREG32(mmHDMI_CONTROL + dig->afmt->offset, tmp); + + tmp = RREG32(mmHDMI_VBI_PACKET_CONTROL + dig->afmt->offset); + tmp = REG_SET_FIELD(tmp, HDMI_VBI_PACKET_CONTROL, HDMI_NULL_SEND, 1); /* send null packets when required */ + tmp = REG_SET_FIELD(tmp, HDMI_VBI_PACKET_CONTROL, HDMI_GC_SEND, 1); /* send general control packets */ + tmp = REG_SET_FIELD(tmp, HDMI_VBI_PACKET_CONTROL, HDMI_GC_CONT, 1); /* send general control packets every frame */ + WREG32(mmHDMI_VBI_PACKET_CONTROL + dig->afmt->offset, tmp); + + tmp = RREG32(mmHDMI_INFOFRAME_CONTROL0 + dig->afmt->offset); + /* enable audio info frames (frames won't be set until audio is enabled) */ + tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL0, HDMI_AUDIO_INFO_SEND, 1); + /* required for audio info values to be updated */ + tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL0, HDMI_AUDIO_INFO_CONT, 1); + WREG32(mmHDMI_INFOFRAME_CONTROL0 + dig->afmt->offset, tmp); + + tmp = RREG32(mmAFMT_INFOFRAME_CONTROL0 + dig->afmt->offset); + /* required for audio info values to be updated */ + tmp = REG_SET_FIELD(tmp, AFMT_INFOFRAME_CONTROL0, AFMT_AUDIO_INFO_UPDATE, 1); + WREG32(mmAFMT_INFOFRAME_CONTROL0 + dig->afmt->offset, tmp); + + tmp = RREG32(mmHDMI_INFOFRAME_CONTROL1 + dig->afmt->offset); + /* anything other than 0 */ + tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL1, HDMI_AUDIO_INFO_LINE, 2); + WREG32(mmHDMI_INFOFRAME_CONTROL1 + dig->afmt->offset, tmp); + + WREG32(mmHDMI_GC + dig->afmt->offset, 0); /* unset HDMI_GC_AVMUTE */ + + tmp = RREG32(mmHDMI_AUDIO_PACKET_CONTROL + dig->afmt->offset); + /* set the default audio delay */ + tmp = REG_SET_FIELD(tmp, HDMI_AUDIO_PACKET_CONTROL, HDMI_AUDIO_DELAY_EN, 1); + /* should be suffient for all audio modes and small enough for all hblanks */ + tmp = REG_SET_FIELD(tmp, HDMI_AUDIO_PACKET_CONTROL, HDMI_AUDIO_PACKETS_PER_LINE, 3); + WREG32(mmHDMI_AUDIO_PACKET_CONTROL + dig->afmt->offset, tmp); + + tmp = RREG32(mmAFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset); + /* allow 60958 channel status fields to be updated */ + tmp = REG_SET_FIELD(tmp, AFMT_AUDIO_PACKET_CONTROL, AFMT_60958_CS_UPDATE, 1); + WREG32(mmAFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset, tmp); + + tmp = RREG32(mmHDMI_ACR_PACKET_CONTROL + dig->afmt->offset); + if (bpc > 8) + /* clear SW CTS value */ + tmp = REG_SET_FIELD(tmp, HDMI_ACR_PACKET_CONTROL, HDMI_ACR_SOURCE, 0); + else + /* select SW CTS value */ + tmp = REG_SET_FIELD(tmp, HDMI_ACR_PACKET_CONTROL, HDMI_ACR_SOURCE, 1); + /* allow hw to sent ACR packets when required */ + tmp = REG_SET_FIELD(tmp, HDMI_ACR_PACKET_CONTROL, HDMI_ACR_AUTO_SEND, 1); + WREG32(mmHDMI_ACR_PACKET_CONTROL + dig->afmt->offset, tmp); + + dce_v10_0_afmt_update_ACR(encoder, mode->clock); + + tmp = RREG32(mmAFMT_60958_0 + dig->afmt->offset); + tmp = REG_SET_FIELD(tmp, AFMT_60958_0, AFMT_60958_CS_CHANNEL_NUMBER_L, 1); + WREG32(mmAFMT_60958_0 + dig->afmt->offset, tmp); + + tmp = RREG32(mmAFMT_60958_1 + dig->afmt->offset); + tmp = REG_SET_FIELD(tmp, AFMT_60958_1, AFMT_60958_CS_CHANNEL_NUMBER_R, 2); + WREG32(mmAFMT_60958_1 + dig->afmt->offset, tmp); + + tmp = RREG32(mmAFMT_60958_2 + dig->afmt->offset); + tmp = REG_SET_FIELD(tmp, AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_2, 3); + tmp = REG_SET_FIELD(tmp, AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_3, 4); + tmp = REG_SET_FIELD(tmp, AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_4, 5); + tmp = REG_SET_FIELD(tmp, AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_5, 6); + tmp = REG_SET_FIELD(tmp, AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_6, 7); + tmp = REG_SET_FIELD(tmp, AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_7, 8); + WREG32(mmAFMT_60958_2 + dig->afmt->offset, tmp); + + dce_v10_0_audio_write_speaker_allocation(encoder); + + WREG32(mmAFMT_AUDIO_PACKET_CONTROL2 + dig->afmt->offset, + (0xff << AFMT_AUDIO_PACKET_CONTROL2__AFMT_AUDIO_CHANNEL_ENABLE__SHIFT)); + + dce_v10_0_afmt_audio_select_pin(encoder); + dce_v10_0_audio_write_sad_regs(encoder); + dce_v10_0_audio_write_latency_fields(encoder, mode); + + err = drm_hdmi_avi_infoframe_from_display_mode(&frame, mode); + if (err < 0) { + DRM_ERROR("failed to setup AVI infoframe: %zd\n", err); + return; + } + + err = hdmi_avi_infoframe_pack(&frame, buffer, sizeof(buffer)); + if (err < 0) { + DRM_ERROR("failed to pack AVI infoframe: %zd\n", err); + return; + } + + dce_v10_0_afmt_update_avi_infoframe(encoder, buffer, sizeof(buffer)); + + tmp = RREG32(mmHDMI_INFOFRAME_CONTROL0 + dig->afmt->offset); + /* enable AVI info frames */ + tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL0, HDMI_AVI_INFO_SEND, 1); + /* required for audio info values to be updated */ + tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL0, HDMI_AVI_INFO_CONT, 1); + WREG32(mmHDMI_INFOFRAME_CONTROL0 + dig->afmt->offset, tmp); + + tmp = RREG32(mmHDMI_INFOFRAME_CONTROL1 + dig->afmt->offset); + tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL1, HDMI_AVI_INFO_LINE, 2); + WREG32(mmHDMI_INFOFRAME_CONTROL1 + dig->afmt->offset, tmp); + + tmp = RREG32(mmAFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset); + /* send audio packets */ + tmp = REG_SET_FIELD(tmp, AFMT_AUDIO_PACKET_CONTROL, AFMT_AUDIO_SAMPLE_SEND, 1); + WREG32(mmAFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset, tmp); + + WREG32(mmAFMT_RAMP_CONTROL0 + dig->afmt->offset, 0x00FFFFFF); + WREG32(mmAFMT_RAMP_CONTROL1 + dig->afmt->offset, 0x007FFFFF); + WREG32(mmAFMT_RAMP_CONTROL2 + dig->afmt->offset, 0x00000001); + WREG32(mmAFMT_RAMP_CONTROL3 + dig->afmt->offset, 0x00000001); + + /* enable audio after to setting up hw */ + dce_v10_0_audio_enable(adev, dig->afmt->pin, true); +} + +static void dce_v10_0_afmt_enable(struct drm_encoder *encoder, bool enable) +{ + struct drm_device *dev = encoder->dev; + struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); + struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; + + if (!dig || !dig->afmt) + return; + + /* Silent, r600_hdmi_enable will raise WARN for us */ + if (enable && dig->afmt->enabled) + return; + if (!enable && !dig->afmt->enabled) + return; + + if (!enable && dig->afmt->pin) { + dce_v10_0_audio_enable(adev, dig->afmt->pin, false); + dig->afmt->pin = NULL; + } + + dig->afmt->enabled = enable; + + DRM_DEBUG("%sabling AFMT interface @ 0x%04X for encoder 0x%x\n", + enable ? "En" : "Dis", dig->afmt->offset, amdgpu_encoder->encoder_id); +} + +static void dce_v10_0_afmt_init(struct amdgpu_device *adev) +{ + int i; + + for (i = 0; i < adev->mode_info.num_dig; i++) + adev->mode_info.afmt[i] = NULL; + + /* DCE10 has audio blocks tied to DIG encoders */ + for (i = 0; i < adev->mode_info.num_dig; i++) { + adev->mode_info.afmt[i] = kzalloc(sizeof(struct amdgpu_afmt), GFP_KERNEL); + if (adev->mode_info.afmt[i]) { + adev->mode_info.afmt[i]->offset = dig_offsets[i]; + adev->mode_info.afmt[i]->id = i; + } + } +} + +static void dce_v10_0_afmt_fini(struct amdgpu_device *adev) +{ + int i; + + for (i = 0; i < adev->mode_info.num_dig; i++) { + kfree(adev->mode_info.afmt[i]); + adev->mode_info.afmt[i] = NULL; + } +} + +static const u32 vga_control_regs[6] = +{ + mmD1VGA_CONTROL, + mmD2VGA_CONTROL, + mmD3VGA_CONTROL, + mmD4VGA_CONTROL, + mmD5VGA_CONTROL, + mmD6VGA_CONTROL, +}; + +static void dce_v10_0_vga_enable(struct drm_crtc *crtc, bool enable) +{ + struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); + struct drm_device *dev = crtc->dev; + struct amdgpu_device *adev = dev->dev_private; + u32 vga_control; + + vga_control = RREG32(vga_control_regs[amdgpu_crtc->crtc_id]) & ~1; + if (enable) + WREG32(vga_control_regs[amdgpu_crtc->crtc_id], vga_control | 1); + else + WREG32(vga_control_regs[amdgpu_crtc->crtc_id], vga_control); +} + +static void dce_v10_0_grph_enable(struct drm_crtc *crtc, bool enable) +{ + struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); + struct drm_device *dev = crtc->dev; + struct amdgpu_device *adev = dev->dev_private; + + if (enable) + WREG32(mmGRPH_ENABLE + amdgpu_crtc->crtc_offset, 1); + else + WREG32(mmGRPH_ENABLE + amdgpu_crtc->crtc_offset, 0); +} + +static void dce_v10_0_tiling_fields(uint64_t tiling_flags, unsigned *bankw, + unsigned *bankh, unsigned *mtaspect, + unsigned *tile_split) +{ + *bankw = (tiling_flags >> AMDGPU_TILING_EG_BANKW_SHIFT) & AMDGPU_TILING_EG_BANKW_MASK; + *bankh = (tiling_flags >> AMDGPU_TILING_EG_BANKH_SHIFT) & AMDGPU_TILING_EG_BANKH_MASK; + *mtaspect = (tiling_flags >> AMDGPU_TILING_EG_MACRO_TILE_ASPECT_SHIFT) & AMDGPU_TILING_EG_MACRO_TILE_ASPECT_MASK; + *tile_split = (tiling_flags >> AMDGPU_TILING_EG_TILE_SPLIT_SHIFT) & AMDGPU_TILING_EG_TILE_SPLIT_MASK; + switch (*bankw) { + default: + case 1: + *bankw = ADDR_SURF_BANK_WIDTH_1; + break; + case 2: + *bankw = ADDR_SURF_BANK_WIDTH_2; + break; + case 4: + *bankw = ADDR_SURF_BANK_WIDTH_4; + break; + case 8: + *bankw = ADDR_SURF_BANK_WIDTH_8; + break; + } + switch (*bankh) { + default: + case 1: + *bankh = ADDR_SURF_BANK_HEIGHT_1; + break; + case 2: + *bankh = ADDR_SURF_BANK_HEIGHT_2; + break; + case 4: + *bankh = ADDR_SURF_BANK_HEIGHT_4; + break; + case 8: + *bankh = ADDR_SURF_BANK_HEIGHT_8; + break; + } + switch (*mtaspect) { + default: + case 1: + *mtaspect = ADDR_SURF_MACRO_ASPECT_1; + break; + case 2: + *mtaspect = ADDR_SURF_MACRO_ASPECT_2; + break; + case 4: + *mtaspect = ADDR_SURF_MACRO_ASPECT_4; + break; + case 8: + *mtaspect = ADDR_SURF_MACRO_ASPECT_8; + break; + } +} + +static int dce_v10_0_crtc_do_set_base(struct drm_crtc *crtc, + struct drm_framebuffer *fb, + int x, int y, int atomic) +{ + struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); + struct drm_device *dev = crtc->dev; + struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_framebuffer *amdgpu_fb; + struct drm_framebuffer *target_fb; + struct drm_gem_object *obj; + struct amdgpu_bo *rbo; + uint64_t fb_location, tiling_flags; + uint32_t fb_format, fb_pitch_pixels; + unsigned bankw, bankh, mtaspect, tile_split; + u32 fb_swap = REG_SET_FIELD(0, GRPH_SWAP_CNTL, GRPH_ENDIAN_SWAP, ENDIAN_NONE); + /* XXX change to VI */ + u32 pipe_config = (adev->gfx.config.tile_mode_array[10] >> 6) & 0x1f; + u32 tmp, viewport_w, viewport_h; + int r; + bool bypass_lut = false; + + /* no fb bound */ + if (!atomic && !crtc->primary->fb) { + DRM_DEBUG_KMS("No FB bound\n"); + return 0; + } + + if (atomic) { + amdgpu_fb = to_amdgpu_framebuffer(fb); + target_fb = fb; + } + else { + amdgpu_fb = to_amdgpu_framebuffer(crtc->primary->fb); + target_fb = crtc->primary->fb; + } + + /* If atomic, assume fb object is pinned & idle & fenced and + * just update base pointers + */ + obj = amdgpu_fb->obj; + rbo = gem_to_amdgpu_bo(obj); + r = amdgpu_bo_reserve(rbo, false); + if (unlikely(r != 0)) + return r; + + if (atomic) + fb_location = amdgpu_bo_gpu_offset(rbo); + else { + r = amdgpu_bo_pin(rbo, AMDGPU_GEM_DOMAIN_VRAM, &fb_location); + if (unlikely(r != 0)) { + amdgpu_bo_unreserve(rbo); + return -EINVAL; + } + } + + amdgpu_bo_get_tiling_flags(rbo, &tiling_flags); + amdgpu_bo_unreserve(rbo); + + switch (target_fb->pixel_format) { + case DRM_FORMAT_C8: + fb_format = REG_SET_FIELD(0, GRPH_CONTROL, GRPH_DEPTH, 0); + fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_FORMAT, 0); + break; + case DRM_FORMAT_XRGB4444: + case DRM_FORMAT_ARGB4444: + fb_format = REG_SET_FIELD(0, GRPH_CONTROL, GRPH_DEPTH, 1); + fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_FORMAT, 2); +#ifdef __BIG_ENDIAN + fb_swap = REG_SET_FIELD(fb_swap, GRPH_SWAP_CNTL, GRPH_ENDIAN_SWAP, + ENDIAN_8IN16); +#endif + break; + case DRM_FORMAT_XRGB1555: + case DRM_FORMAT_ARGB1555: + fb_format = REG_SET_FIELD(0, GRPH_CONTROL, GRPH_DEPTH, 1); + fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_FORMAT, 0); +#ifdef __BIG_ENDIAN + fb_swap = REG_SET_FIELD(fb_swap, GRPH_SWAP_CNTL, GRPH_ENDIAN_SWAP, + ENDIAN_8IN16); +#endif + break; + case DRM_FORMAT_BGRX5551: + case DRM_FORMAT_BGRA5551: + fb_format = REG_SET_FIELD(0, GRPH_CONTROL, GRPH_DEPTH, 1); + fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_FORMAT, 5); +#ifdef __BIG_ENDIAN + fb_swap = REG_SET_FIELD(fb_swap, GRPH_SWAP_CNTL, GRPH_ENDIAN_SWAP, + ENDIAN_8IN16); +#endif + break; + case DRM_FORMAT_RGB565: + fb_format = REG_SET_FIELD(0, GRPH_CONTROL, GRPH_DEPTH, 1); + fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_FORMAT, 1); +#ifdef __BIG_ENDIAN + fb_swap = REG_SET_FIELD(fb_swap, GRPH_SWAP_CNTL, GRPH_ENDIAN_SWAP, + ENDIAN_8IN16); +#endif + break; + case DRM_FORMAT_XRGB8888: + case DRM_FORMAT_ARGB8888: + fb_format = REG_SET_FIELD(0, GRPH_CONTROL, GRPH_DEPTH, 2); + fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_FORMAT, 0); +#ifdef __BIG_ENDIAN + fb_swap = REG_SET_FIELD(fb_swap, GRPH_SWAP_CNTL, GRPH_ENDIAN_SWAP, + ENDIAN_8IN32); +#endif + break; + case DRM_FORMAT_XRGB2101010: + case DRM_FORMAT_ARGB2101010: + fb_format = REG_SET_FIELD(0, GRPH_CONTROL, GRPH_DEPTH, 2); + fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_FORMAT, 1); +#ifdef __BIG_ENDIAN + fb_swap = REG_SET_FIELD(fb_swap, GRPH_SWAP_CNTL, GRPH_ENDIAN_SWAP, + ENDIAN_8IN32); +#endif + /* Greater 8 bpc fb needs to bypass hw-lut to retain precision */ + bypass_lut = true; + break; + case DRM_FORMAT_BGRX1010102: + case DRM_FORMAT_BGRA1010102: + fb_format = REG_SET_FIELD(0, GRPH_CONTROL, GRPH_DEPTH, 2); + fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_FORMAT, 4); +#ifdef __BIG_ENDIAN + fb_swap = REG_SET_FIELD(fb_swap, GRPH_SWAP_CNTL, GRPH_ENDIAN_SWAP, + ENDIAN_8IN32); +#endif + /* Greater 8 bpc fb needs to bypass hw-lut to retain precision */ + bypass_lut = true; + break; + default: + DRM_ERROR("Unsupported screen format %s\n", + drm_get_format_name(target_fb->pixel_format)); + return -EINVAL; + } + + if (tiling_flags & AMDGPU_TILING_MACRO) { + unsigned tileb, index, num_banks, tile_split_bytes; + + dce_v10_0_tiling_fields(tiling_flags, &bankw, &bankh, &mtaspect, &tile_split); + /* Set NUM_BANKS. */ + /* Calculate the macrotile mode index. */ + tile_split_bytes = 64 << tile_split; + tileb = 8 * 8 * target_fb->bits_per_pixel / 8; + tileb = min(tile_split_bytes, tileb); + + for (index = 0; tileb > 64; index++) { + tileb >>= 1; + } + + if (index >= 16) { + DRM_ERROR("Wrong screen bpp (%u) or tile split (%u)\n", + target_fb->bits_per_pixel, tile_split); + return -EINVAL; + } + + num_banks = (adev->gfx.config.macrotile_mode_array[index] >> 6) & 0x3; + fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_NUM_BANKS, num_banks); + fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_ARRAY_MODE, + ARRAY_2D_TILED_THIN1); + fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_TILE_SPLIT, + tile_split); + fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_BANK_WIDTH, bankw); + fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_BANK_HEIGHT, bankh); + fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_MACRO_TILE_ASPECT, + mtaspect); + fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_MICRO_TILE_MODE, + ADDR_SURF_MICRO_TILING_DISPLAY); + } else if (tiling_flags & AMDGPU_TILING_MICRO) { + fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_ARRAY_MODE, + ARRAY_1D_TILED_THIN1); + } + + /* Read the pipe config from the 2D TILED SCANOUT mode. + * It should be the same for the other modes too, but not all + * modes set the pipe config field. */ + fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_PIPE_CONFIG, + pipe_config); + + dce_v10_0_vga_enable(crtc, false); + + WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset, + upper_32_bits(fb_location)); + WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset, + upper_32_bits(fb_location)); + WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset, + (u32)fb_location & GRPH_PRIMARY_SURFACE_ADDRESS__GRPH_PRIMARY_SURFACE_ADDRESS_MASK); + WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset, + (u32) fb_location & GRPH_SECONDARY_SURFACE_ADDRESS__GRPH_SECONDARY_SURFACE_ADDRESS_MASK); + WREG32(mmGRPH_CONTROL + amdgpu_crtc->crtc_offset, fb_format); + WREG32(mmGRPH_SWAP_CNTL + amdgpu_crtc->crtc_offset, fb_swap); + + /* + * The LUT only has 256 slots for indexing by a 8 bpc fb. Bypass the LUT + * for > 8 bpc scanout to avoid truncation of fb indices to 8 msb's, to + * retain the full precision throughout the pipeline. + */ + tmp = RREG32(mmGRPH_LUT_10BIT_BYPASS + amdgpu_crtc->crtc_offset); + if (bypass_lut) + tmp = REG_SET_FIELD(tmp, GRPH_LUT_10BIT_BYPASS, GRPH_LUT_10BIT_BYPASS_EN, 1); + else + tmp = REG_SET_FIELD(tmp, GRPH_LUT_10BIT_BYPASS, GRPH_LUT_10BIT_BYPASS_EN, 0); + WREG32(mmGRPH_LUT_10BIT_BYPASS + amdgpu_crtc->crtc_offset, tmp); + + if (bypass_lut) + DRM_DEBUG_KMS("Bypassing hardware LUT due to 10 bit fb scanout.\n"); + + WREG32(mmGRPH_SURFACE_OFFSET_X + amdgpu_crtc->crtc_offset, 0); + WREG32(mmGRPH_SURFACE_OFFSET_Y + amdgpu_crtc->crtc_offset, 0); + WREG32(mmGRPH_X_START + amdgpu_crtc->crtc_offset, 0); + WREG32(mmGRPH_Y_START + amdgpu_crtc->crtc_offset, 0); + WREG32(mmGRPH_X_END + amdgpu_crtc->crtc_offset, target_fb->width); + WREG32(mmGRPH_Y_END + amdgpu_crtc->crtc_offset, target_fb->height); + + fb_pitch_pixels = target_fb->pitches[0] / (target_fb->bits_per_pixel / 8); + WREG32(mmGRPH_PITCH + amdgpu_crtc->crtc_offset, fb_pitch_pixels); + + dce_v10_0_grph_enable(crtc, true); + + WREG32(mmLB_DESKTOP_HEIGHT + amdgpu_crtc->crtc_offset, + target_fb->height); + + x &= ~3; + y &= ~1; + WREG32(mmVIEWPORT_START + amdgpu_crtc->crtc_offset, + (x << 16) | y); + viewport_w = crtc->mode.hdisplay; + viewport_h = (crtc->mode.vdisplay + 1) & ~1; + WREG32(mmVIEWPORT_SIZE + amdgpu_crtc->crtc_offset, + (viewport_w << 16) | viewport_h); + + /* pageflip setup */ + /* make sure flip is at vb rather than hb */ + tmp = RREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset); + tmp = REG_SET_FIELD(tmp, GRPH_FLIP_CONTROL, + GRPH_SURFACE_UPDATE_H_RETRACE_EN, 0); + WREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset, tmp); + + /* set pageflip to happen only at start of vblank interval (front porch) */ + WREG32(mmMASTER_UPDATE_MODE + amdgpu_crtc->crtc_offset, 3); + + if (!atomic && fb && fb != crtc->primary->fb) { + amdgpu_fb = to_amdgpu_framebuffer(fb); + rbo = gem_to_amdgpu_bo(amdgpu_fb->obj); + r = amdgpu_bo_reserve(rbo, false); + if (unlikely(r != 0)) + return r; + amdgpu_bo_unpin(rbo); + amdgpu_bo_unreserve(rbo); + } + + /* Bytes per pixel may have changed */ + dce_v10_0_bandwidth_update(adev); + + return 0; +} + +static void dce_v10_0_set_interleave(struct drm_crtc *crtc, + struct drm_display_mode *mode) +{ + struct drm_device *dev = crtc->dev; + struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); + u32 tmp; + + tmp = RREG32(mmLB_DATA_FORMAT + amdgpu_crtc->crtc_offset); + if (mode->flags & DRM_MODE_FLAG_INTERLACE) + tmp = REG_SET_FIELD(tmp, LB_DATA_FORMAT, INTERLEAVE_EN, 1); + else + tmp = REG_SET_FIELD(tmp, LB_DATA_FORMAT, INTERLEAVE_EN, 0); + WREG32(mmLB_DATA_FORMAT + amdgpu_crtc->crtc_offset, tmp); +} + +static void dce_v10_0_crtc_load_lut(struct drm_crtc *crtc) +{ + struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); + struct drm_device *dev = crtc->dev; + struct amdgpu_device *adev = dev->dev_private; + int i; + u32 tmp; + + DRM_DEBUG_KMS("%d\n", amdgpu_crtc->crtc_id); + + tmp = RREG32(mmINPUT_CSC_CONTROL + amdgpu_crtc->crtc_offset); + tmp = REG_SET_FIELD(tmp, INPUT_CSC_CONTROL, INPUT_CSC_GRPH_MODE, 0); + tmp = REG_SET_FIELD(tmp, INPUT_CSC_CONTROL, INPUT_CSC_OVL_MODE, 0); + WREG32(mmINPUT_CSC_CONTROL + amdgpu_crtc->crtc_offset, tmp); + + tmp = RREG32(mmPRESCALE_GRPH_CONTROL + amdgpu_crtc->crtc_offset); + tmp = REG_SET_FIELD(tmp, PRESCALE_GRPH_CONTROL, GRPH_PRESCALE_BYPASS, 1); + WREG32(mmPRESCALE_GRPH_CONTROL + amdgpu_crtc->crtc_offset, tmp); + + tmp = RREG32(mmPRESCALE_OVL_CONTROL + amdgpu_crtc->crtc_offset); + tmp = REG_SET_FIELD(tmp, PRESCALE_OVL_CONTROL, OVL_PRESCALE_BYPASS, 1); + WREG32(mmPRESCALE_OVL_CONTROL + amdgpu_crtc->crtc_offset, tmp); + + tmp = RREG32(mmINPUT_GAMMA_CONTROL + amdgpu_crtc->crtc_offset); + tmp = REG_SET_FIELD(tmp, INPUT_GAMMA_CONTROL, GRPH_INPUT_GAMMA_MODE, 0); + tmp = REG_SET_FIELD(tmp, INPUT_GAMMA_CONTROL, OVL_INPUT_GAMMA_MODE, 0); + WREG32(mmINPUT_GAMMA_CONTROL + amdgpu_crtc->crtc_offset, tmp); + + WREG32(mmDC_LUT_CONTROL + amdgpu_crtc->crtc_offset, 0); + + WREG32(mmDC_LUT_BLACK_OFFSET_BLUE + amdgpu_crtc->crtc_offset, 0); + WREG32(mmDC_LUT_BLACK_OFFSET_GREEN + amdgpu_crtc->crtc_offset, 0); + WREG32(mmDC_LUT_BLACK_OFFSET_RED + amdgpu_crtc->crtc_offset, 0); + + WREG32(mmDC_LUT_WHITE_OFFSET_BLUE + amdgpu_crtc->crtc_offset, 0xffff); + WREG32(mmDC_LUT_WHITE_OFFSET_GREEN + amdgpu_crtc->crtc_offset, 0xffff); + WREG32(mmDC_LUT_WHITE_OFFSET_RED + amdgpu_crtc->crtc_offset, 0xffff); + + WREG32(mmDC_LUT_RW_MODE + amdgpu_crtc->crtc_offset, 0); + WREG32(mmDC_LUT_WRITE_EN_MASK + amdgpu_crtc->crtc_offset, 0x00000007); + + WREG32(mmDC_LUT_RW_INDEX + amdgpu_crtc->crtc_offset, 0); + for (i = 0; i < 256; i++) { + WREG32(mmDC_LUT_30_COLOR + amdgpu_crtc->crtc_offset, + (amdgpu_crtc->lut_r[i] << 20) | + (amdgpu_crtc->lut_g[i] << 10) | + (amdgpu_crtc->lut_b[i] << 0)); + } + + tmp = RREG32(mmDEGAMMA_CONTROL + amdgpu_crtc->crtc_offset); + tmp = REG_SET_FIELD(tmp, DEGAMMA_CONTROL, GRPH_DEGAMMA_MODE, 0); + tmp = REG_SET_FIELD(tmp, DEGAMMA_CONTROL, OVL_DEGAMMA_MODE, 0); + tmp = REG_SET_FIELD(tmp, DEGAMMA_CONTROL, CURSOR_DEGAMMA_MODE, 0); + WREG32(mmDEGAMMA_CONTROL + amdgpu_crtc->crtc_offset, tmp); + + tmp = RREG32(mmGAMUT_REMAP_CONTROL + amdgpu_crtc->crtc_offset); + tmp = REG_SET_FIELD(tmp, GAMUT_REMAP_CONTROL, GRPH_GAMUT_REMAP_MODE, 0); + tmp = REG_SET_FIELD(tmp, GAMUT_REMAP_CONTROL, OVL_GAMUT_REMAP_MODE, 0); + WREG32(mmGAMUT_REMAP_CONTROL + amdgpu_crtc->crtc_offset, tmp); + + tmp = RREG32(mmREGAMMA_CONTROL + amdgpu_crtc->crtc_offset); + tmp = REG_SET_FIELD(tmp, REGAMMA_CONTROL, GRPH_REGAMMA_MODE, 0); + tmp = REG_SET_FIELD(tmp, REGAMMA_CONTROL, OVL_REGAMMA_MODE, 0); + WREG32(mmREGAMMA_CONTROL + amdgpu_crtc->crtc_offset, tmp); + + tmp = RREG32(mmOUTPUT_CSC_CONTROL + amdgpu_crtc->crtc_offset); + tmp = REG_SET_FIELD(tmp, OUTPUT_CSC_CONTROL, OUTPUT_CSC_GRPH_MODE, 0); + tmp = REG_SET_FIELD(tmp, OUTPUT_CSC_CONTROL, OUTPUT_CSC_OVL_MODE, 0); + WREG32(mmOUTPUT_CSC_CONTROL + amdgpu_crtc->crtc_offset, tmp); + + /* XXX match this to the depth of the crtc fmt block, move to modeset? */ + WREG32(mmDENORM_CONTROL + amdgpu_crtc->crtc_offset, 0); + /* XXX this only needs to be programmed once per crtc at startup, + * not sure where the best place for it is + */ + tmp = RREG32(mmALPHA_CONTROL + amdgpu_crtc->crtc_offset); + tmp = REG_SET_FIELD(tmp, ALPHA_CONTROL, CURSOR_ALPHA_BLND_ENA, 1); + WREG32(mmALPHA_CONTROL + amdgpu_crtc->crtc_offset, tmp); +} + +static int dce_v10_0_pick_dig_encoder(struct drm_encoder *encoder) +{ + struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); + struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; + + switch (amdgpu_encoder->encoder_id) { + case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: + if (dig->linkb) + return 1; + else + return 0; + break; + case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: + if (dig->linkb) + return 3; + else + return 2; + break; + case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: + if (dig->linkb) + return 5; + else + return 4; + break; + case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3: + return 6; + break; + default: + DRM_ERROR("invalid encoder_id: 0x%x\n", amdgpu_encoder->encoder_id); + return 0; + } +} + +/** + * dce_v10_0_pick_pll - Allocate a PPLL for use by the crtc. + * + * @crtc: drm crtc + * + * Returns the PPLL (Pixel PLL) to be used by the crtc. For DP monitors + * a single PPLL can be used for all DP crtcs/encoders. For non-DP + * monitors a dedicated PPLL must be used. If a particular board has + * an external DP PLL, return ATOM_PPLL_INVALID to skip PLL programming + * as there is no need to program the PLL itself. If we are not able to + * allocate a PLL, return ATOM_PPLL_INVALID to skip PLL programming to + * avoid messing up an existing monitor. + * + * Asic specific PLL information + * + * DCE 10.x + * Tonga + * - PPLL1, PPLL2 are available for all UNIPHY (both DP and non-DP) + * CI + * - PPLL0, PPLL1, PPLL2 are available for all UNIPHY (both DP and non-DP) and DAC + * + */ +static u32 dce_v10_0_pick_pll(struct drm_crtc *crtc) +{ + struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); + struct drm_device *dev = crtc->dev; + struct amdgpu_device *adev = dev->dev_private; + u32 pll_in_use; + int pll; + + if (ENCODER_MODE_IS_DP(amdgpu_atombios_encoder_get_encoder_mode(amdgpu_crtc->encoder))) { + if (adev->clock.dp_extclk) + /* skip PPLL programming if using ext clock */ + return ATOM_PPLL_INVALID; + else { + /* use the same PPLL for all DP monitors */ + pll = amdgpu_pll_get_shared_dp_ppll(crtc); + if (pll != ATOM_PPLL_INVALID) + return pll; + } + } else { + /* use the same PPLL for all monitors with the same clock */ + pll = amdgpu_pll_get_shared_nondp_ppll(crtc); + if (pll != ATOM_PPLL_INVALID) + return pll; + } + + /* DCE10 has PPLL0, PPLL1, and PPLL2 */ + pll_in_use = amdgpu_pll_get_use_mask(crtc); + if (!(pll_in_use & (1 << ATOM_PPLL2))) + return ATOM_PPLL2; + if (!(pll_in_use & (1 << ATOM_PPLL1))) + return ATOM_PPLL1; + if (!(pll_in_use & (1 << ATOM_PPLL0))) + return ATOM_PPLL0; + DRM_ERROR("unable to allocate a PPLL\n"); + return ATOM_PPLL_INVALID; +} + +static void dce_v10_0_lock_cursor(struct drm_crtc *crtc, bool lock) +{ + struct amdgpu_device *adev = crtc->dev->dev_private; + struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); + uint32_t cur_lock; + + cur_lock = RREG32(mmCUR_UPDATE + amdgpu_crtc->crtc_offset); + if (lock) + cur_lock = REG_SET_FIELD(cur_lock, CUR_UPDATE, CURSOR_UPDATE_LOCK, 1); + else + cur_lock = REG_SET_FIELD(cur_lock, CUR_UPDATE, CURSOR_UPDATE_LOCK, 0); + WREG32(mmCUR_UPDATE + amdgpu_crtc->crtc_offset, cur_lock); +} + +static void dce_v10_0_hide_cursor(struct drm_crtc *crtc) +{ + struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); + struct amdgpu_device *adev = crtc->dev->dev_private; + u32 tmp; + + tmp = RREG32_IDX(mmCUR_CONTROL + amdgpu_crtc->crtc_offset); + tmp = REG_SET_FIELD(tmp, CUR_CONTROL, CURSOR_EN, 0); + WREG32_IDX(mmCUR_CONTROL + amdgpu_crtc->crtc_offset, tmp); +} + +static void dce_v10_0_show_cursor(struct drm_crtc *crtc) +{ + struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); + struct amdgpu_device *adev = crtc->dev->dev_private; + u32 tmp; + + tmp = RREG32_IDX(mmCUR_CONTROL + amdgpu_crtc->crtc_offset); + tmp = REG_SET_FIELD(tmp, CUR_CONTROL, CURSOR_EN, 1); + tmp = REG_SET_FIELD(tmp, CUR_CONTROL, CURSOR_MODE, 2); + WREG32_IDX(mmCUR_CONTROL + amdgpu_crtc->crtc_offset, tmp); +} + +static void dce_v10_0_set_cursor(struct drm_crtc *crtc, struct drm_gem_object *obj, + uint64_t gpu_addr) +{ + struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); + struct amdgpu_device *adev = crtc->dev->dev_private; + + WREG32(mmCUR_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset, + upper_32_bits(gpu_addr)); + WREG32(mmCUR_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset, + lower_32_bits(gpu_addr)); +} + +static int dce_v10_0_crtc_cursor_move(struct drm_crtc *crtc, + int x, int y) +{ + struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); + struct amdgpu_device *adev = crtc->dev->dev_private; + int xorigin = 0, yorigin = 0; + + /* avivo cursor are offset into the total surface */ + x += crtc->x; + y += crtc->y; + DRM_DEBUG("x %d y %d c->x %d c->y %d\n", x, y, crtc->x, crtc->y); + + if (x < 0) { + xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1); + x = 0; + } + if (y < 0) { + yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1); + y = 0; + } + + dce_v10_0_lock_cursor(crtc, true); + WREG32(mmCUR_POSITION + amdgpu_crtc->crtc_offset, (x << 16) | y); + WREG32(mmCUR_HOT_SPOT + amdgpu_crtc->crtc_offset, (xorigin << 16) | yorigin); + WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset, + ((amdgpu_crtc->cursor_width - 1) << 16) | (amdgpu_crtc->cursor_height - 1)); + dce_v10_0_lock_cursor(crtc, false); + + return 0; +} + +static int dce_v10_0_crtc_cursor_set(struct drm_crtc *crtc, + struct drm_file *file_priv, + uint32_t handle, + uint32_t width, + uint32_t height) +{ + struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); + struct drm_gem_object *obj; + struct amdgpu_bo *robj; + uint64_t gpu_addr; + int ret; + + if (!handle) { + /* turn off cursor */ + dce_v10_0_hide_cursor(crtc); + obj = NULL; + goto unpin; + } + + if ((width > amdgpu_crtc->max_cursor_width) || + (height > amdgpu_crtc->max_cursor_height)) { + DRM_ERROR("bad cursor width or height %d x %d\n", width, height); + return -EINVAL; + } + + obj = drm_gem_object_lookup(crtc->dev, file_priv, handle); + if (!obj) { + DRM_ERROR("Cannot find cursor object %x for crtc %d\n", handle, amdgpu_crtc->crtc_id); + return -ENOENT; + } + + robj = gem_to_amdgpu_bo(obj); + ret = amdgpu_bo_reserve(robj, false); + if (unlikely(ret != 0)) + goto fail; + ret = amdgpu_bo_pin_restricted(robj, AMDGPU_GEM_DOMAIN_VRAM, + 0, &gpu_addr); + amdgpu_bo_unreserve(robj); + if (ret) + goto fail; + + amdgpu_crtc->cursor_width = width; + amdgpu_crtc->cursor_height = height; + + dce_v10_0_lock_cursor(crtc, true); + dce_v10_0_set_cursor(crtc, obj, gpu_addr); + dce_v10_0_show_cursor(crtc); + dce_v10_0_lock_cursor(crtc, false); + +unpin: + if (amdgpu_crtc->cursor_bo) { + robj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo); + ret = amdgpu_bo_reserve(robj, false); + if (likely(ret == 0)) { + amdgpu_bo_unpin(robj); + amdgpu_bo_unreserve(robj); + } + drm_gem_object_unreference_unlocked(amdgpu_crtc->cursor_bo); + } + + amdgpu_crtc->cursor_bo = obj; + return 0; +fail: + drm_gem_object_unreference_unlocked(obj); + + return ret; +} + +static void dce_v10_0_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green, + u16 *blue, uint32_t start, uint32_t size) +{ + struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); + int end = (start + size > 256) ? 256 : start + size, i; + + /* userspace palettes are always correct as is */ + for (i = start; i < end; i++) { + amdgpu_crtc->lut_r[i] = red[i] >> 6; + amdgpu_crtc->lut_g[i] = green[i] >> 6; + amdgpu_crtc->lut_b[i] = blue[i] >> 6; + } + dce_v10_0_crtc_load_lut(crtc); +} + +static void dce_v10_0_crtc_destroy(struct drm_crtc *crtc) +{ + struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); + + drm_crtc_cleanup(crtc); + destroy_workqueue(amdgpu_crtc->pflip_queue); + kfree(amdgpu_crtc); +} + +static const struct drm_crtc_funcs dce_v10_0_crtc_funcs = { + .cursor_set = dce_v10_0_crtc_cursor_set, + .cursor_move = dce_v10_0_crtc_cursor_move, + .gamma_set = dce_v10_0_crtc_gamma_set, + .set_config = amdgpu_crtc_set_config, + .destroy = dce_v10_0_crtc_destroy, + .page_flip = amdgpu_crtc_page_flip, +}; + +static void dce_v10_0_crtc_dpms(struct drm_crtc *crtc, int mode) +{ + struct drm_device *dev = crtc->dev; + struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); + + switch (mode) { + case DRM_MODE_DPMS_ON: + amdgpu_crtc->enabled = true; + amdgpu_atombios_crtc_enable(crtc, ATOM_ENABLE); + dce_v10_0_vga_enable(crtc, true); + amdgpu_atombios_crtc_blank(crtc, ATOM_DISABLE); + dce_v10_0_vga_enable(crtc, false); + drm_vblank_post_modeset(dev, amdgpu_crtc->crtc_id); + dce_v10_0_crtc_load_lut(crtc); + break; + case DRM_MODE_DPMS_STANDBY: + case DRM_MODE_DPMS_SUSPEND: + case DRM_MODE_DPMS_OFF: + drm_vblank_pre_modeset(dev, amdgpu_crtc->crtc_id); + if (amdgpu_crtc->enabled) { + dce_v10_0_vga_enable(crtc, true); + amdgpu_atombios_crtc_blank(crtc, ATOM_ENABLE); + dce_v10_0_vga_enable(crtc, false); + } + amdgpu_atombios_crtc_enable(crtc, ATOM_DISABLE); + amdgpu_crtc->enabled = false; + break; + } + /* adjust pm to dpms */ + amdgpu_pm_compute_clocks(adev); +} + +static void dce_v10_0_crtc_prepare(struct drm_crtc *crtc) +{ + /* disable crtc pair power gating before programming */ + amdgpu_atombios_crtc_powergate(crtc, ATOM_DISABLE); + amdgpu_atombios_crtc_lock(crtc, ATOM_ENABLE); + dce_v10_0_crtc_dpms(crtc, DRM_MODE_DPMS_OFF); +} + +static void dce_v10_0_crtc_commit(struct drm_crtc *crtc) +{ + dce_v10_0_crtc_dpms(crtc, DRM_MODE_DPMS_ON); + amdgpu_atombios_crtc_lock(crtc, ATOM_DISABLE); +} + +static void dce_v10_0_crtc_disable(struct drm_crtc *crtc) +{ + struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); + struct drm_device *dev = crtc->dev; + struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_atom_ss ss; + int i; + + dce_v10_0_crtc_dpms(crtc, DRM_MODE_DPMS_OFF); + if (crtc->primary->fb) { + int r; + struct amdgpu_framebuffer *amdgpu_fb; + struct amdgpu_bo *rbo; + + amdgpu_fb = to_amdgpu_framebuffer(crtc->primary->fb); + rbo = gem_to_amdgpu_bo(amdgpu_fb->obj); + r = amdgpu_bo_reserve(rbo, false); + if (unlikely(r)) + DRM_ERROR("failed to reserve rbo before unpin\n"); + else { + amdgpu_bo_unpin(rbo); + amdgpu_bo_unreserve(rbo); + } + } + /* disable the GRPH */ + dce_v10_0_grph_enable(crtc, false); + + amdgpu_atombios_crtc_powergate(crtc, ATOM_ENABLE); + + for (i = 0; i < adev->mode_info.num_crtc; i++) { + if (adev->mode_info.crtcs[i] && + adev->mode_info.crtcs[i]->enabled && + i != amdgpu_crtc->crtc_id && + amdgpu_crtc->pll_id == adev->mode_info.crtcs[i]->pll_id) { + /* one other crtc is using this pll don't turn + * off the pll + */ + goto done; + } + } + + switch (amdgpu_crtc->pll_id) { + case ATOM_PPLL0: + case ATOM_PPLL1: + case ATOM_PPLL2: + /* disable the ppll */ + amdgpu_atombios_crtc_program_pll(crtc, amdgpu_crtc->crtc_id, amdgpu_crtc->pll_id, + 0, 0, ATOM_DISABLE, 0, 0, 0, 0, 0, false, &ss); + break; + default: + break; + } +done: + amdgpu_crtc->pll_id = ATOM_PPLL_INVALID; + amdgpu_crtc->adjusted_clock = 0; + amdgpu_crtc->encoder = NULL; + amdgpu_crtc->connector = NULL; +} + +static int dce_v10_0_crtc_mode_set(struct drm_crtc *crtc, + struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode, + int x, int y, struct drm_framebuffer *old_fb) +{ + struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); + + if (!amdgpu_crtc->adjusted_clock) + return -EINVAL; + + amdgpu_atombios_crtc_set_pll(crtc, adjusted_mode); + amdgpu_atombios_crtc_set_dtd_timing(crtc, adjusted_mode); + dce_v10_0_crtc_do_set_base(crtc, old_fb, x, y, 0); + amdgpu_atombios_crtc_overscan_setup(crtc, mode, adjusted_mode); + amdgpu_atombios_crtc_scaler_setup(crtc); + /* update the hw version fpr dpm */ + amdgpu_crtc->hw_mode = *adjusted_mode; + + return 0; +} + +static bool dce_v10_0_crtc_mode_fixup(struct drm_crtc *crtc, + const struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode) +{ + struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); + struct drm_device *dev = crtc->dev; + struct drm_encoder *encoder; + + /* assign the encoder to the amdgpu crtc to avoid repeated lookups later */ + list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { + if (encoder->crtc == crtc) { + amdgpu_crtc->encoder = encoder; + amdgpu_crtc->connector = amdgpu_get_connector_for_encoder(encoder); + break; + } + } + if ((amdgpu_crtc->encoder == NULL) || (amdgpu_crtc->connector == NULL)) { + amdgpu_crtc->encoder = NULL; + amdgpu_crtc->connector = NULL; + return false; + } + if (!amdgpu_crtc_scaling_mode_fixup(crtc, mode, adjusted_mode)) + return false; + if (amdgpu_atombios_crtc_prepare_pll(crtc, adjusted_mode)) + return false; + /* pick pll */ + amdgpu_crtc->pll_id = dce_v10_0_pick_pll(crtc); + /* if we can't get a PPLL for a non-DP encoder, fail */ + if ((amdgpu_crtc->pll_id == ATOM_PPLL_INVALID) && + !ENCODER_MODE_IS_DP(amdgpu_atombios_encoder_get_encoder_mode(amdgpu_crtc->encoder))) + return false; + + return true; +} + +static int dce_v10_0_crtc_set_base(struct drm_crtc *crtc, int x, int y, + struct drm_framebuffer *old_fb) +{ + return dce_v10_0_crtc_do_set_base(crtc, old_fb, x, y, 0); +} + +static int dce_v10_0_crtc_set_base_atomic(struct drm_crtc *crtc, + struct drm_framebuffer *fb, + int x, int y, enum mode_set_atomic state) +{ + return dce_v10_0_crtc_do_set_base(crtc, fb, x, y, 1); +} + +static const struct drm_crtc_helper_funcs dce_v10_0_crtc_helper_funcs = { + .dpms = dce_v10_0_crtc_dpms, + .mode_fixup = dce_v10_0_crtc_mode_fixup, + .mode_set = dce_v10_0_crtc_mode_set, + .mode_set_base = dce_v10_0_crtc_set_base, + .mode_set_base_atomic = dce_v10_0_crtc_set_base_atomic, + .prepare = dce_v10_0_crtc_prepare, + .commit = dce_v10_0_crtc_commit, + .load_lut = dce_v10_0_crtc_load_lut, + .disable = dce_v10_0_crtc_disable, +}; + +static int dce_v10_0_crtc_init(struct amdgpu_device *adev, int index) +{ + struct amdgpu_crtc *amdgpu_crtc; + int i; + + amdgpu_crtc = kzalloc(sizeof(struct amdgpu_crtc) + + (AMDGPUFB_CONN_LIMIT * sizeof(struct drm_connector *)), GFP_KERNEL); + if (amdgpu_crtc == NULL) + return -ENOMEM; + + drm_crtc_init(adev->ddev, &amdgpu_crtc->base, &dce_v10_0_crtc_funcs); + + drm_mode_crtc_set_gamma_size(&amdgpu_crtc->base, 256); + amdgpu_crtc->crtc_id = index; + amdgpu_crtc->pflip_queue = create_singlethread_workqueue("amdgpu-pageflip-queue"); + adev->mode_info.crtcs[index] = amdgpu_crtc; + + amdgpu_crtc->max_cursor_width = 128; + amdgpu_crtc->max_cursor_height = 128; + adev->ddev->mode_config.cursor_width = amdgpu_crtc->max_cursor_width; + adev->ddev->mode_config.cursor_height = amdgpu_crtc->max_cursor_height; + + for (i = 0; i < 256; i++) { + amdgpu_crtc->lut_r[i] = i << 2; + amdgpu_crtc->lut_g[i] = i << 2; + amdgpu_crtc->lut_b[i] = i << 2; + } + + switch (amdgpu_crtc->crtc_id) { + case 0: + default: + amdgpu_crtc->crtc_offset = CRTC0_REGISTER_OFFSET; + break; + case 1: + amdgpu_crtc->crtc_offset = CRTC1_REGISTER_OFFSET; + break; + case 2: + amdgpu_crtc->crtc_offset = CRTC2_REGISTER_OFFSET; + break; + case 3: + amdgpu_crtc->crtc_offset = CRTC3_REGISTER_OFFSET; + break; + case 4: + amdgpu_crtc->crtc_offset = CRTC4_REGISTER_OFFSET; + break; + case 5: + amdgpu_crtc->crtc_offset = CRTC5_REGISTER_OFFSET; + break; + } + + amdgpu_crtc->pll_id = ATOM_PPLL_INVALID; + amdgpu_crtc->adjusted_clock = 0; + amdgpu_crtc->encoder = NULL; + amdgpu_crtc->connector = NULL; + drm_crtc_helper_add(&amdgpu_crtc->base, &dce_v10_0_crtc_helper_funcs); + + return 0; +} + +static int dce_v10_0_early_init(struct amdgpu_device *adev) +{ + adev->audio_endpt_rreg = &dce_v10_0_audio_endpt_rreg; + adev->audio_endpt_wreg = &dce_v10_0_audio_endpt_wreg; + + dce_v10_0_set_display_funcs(adev); + dce_v10_0_set_irq_funcs(adev); + + switch (adev->asic_type) { + case CHIP_TONGA: + adev->mode_info.num_crtc = 6; /* XXX 7??? */ + adev->mode_info.num_hpd = 6; + adev->mode_info.num_dig = 7; + break; + default: + /* FIXME: not supported yet */ + return -EINVAL; + } + + return 0; +} + +static int dce_v10_0_sw_init(struct amdgpu_device *adev) +{ + int r, i; + + for (i = 0; i < adev->mode_info.num_crtc; i++) { + r = amdgpu_irq_add_id(adev, i + 1, &adev->crtc_irq); + if (r) + return r; + } + + for (i = 8; i < 20; i += 2) { + r = amdgpu_irq_add_id(adev, i, &adev->pageflip_irq); + if (r) + return r; + } + + /* HPD hotplug */ + r = amdgpu_irq_add_id(adev, 42, &adev->hpd_irq); + if (r) + return r; + + adev->mode_info.mode_config_initialized = true; + + adev->ddev->mode_config.funcs = &amdgpu_mode_funcs; + + adev->ddev->mode_config.max_width = 16384; + adev->ddev->mode_config.max_height = 16384; + + adev->ddev->mode_config.preferred_depth = 24; + adev->ddev->mode_config.prefer_shadow = 1; + + adev->ddev->mode_config.fb_base = adev->mc.aper_base; + + r = amdgpu_modeset_create_props(adev); + if (r) + return r; + + adev->ddev->mode_config.max_width = 16384; + adev->ddev->mode_config.max_height = 16384; + + /* allocate crtcs */ + for (i = 0; i < adev->mode_info.num_crtc; i++) { + r = dce_v10_0_crtc_init(adev, i); + if (r) + return r; + } + + if (amdgpu_atombios_get_connector_info_from_object_table(adev)) + amdgpu_print_display_setup(adev->ddev); + else + return -EINVAL; + + /* setup afmt */ + dce_v10_0_afmt_init(adev); + + r = dce_v10_0_audio_init(adev); + if (r) + return r; + + drm_kms_helper_poll_init(adev->ddev); + + return r; +} + +static int dce_v10_0_sw_fini(struct amdgpu_device *adev) +{ + kfree(adev->mode_info.bios_hardcoded_edid); + + drm_kms_helper_poll_fini(adev->ddev); + + dce_v10_0_audio_fini(adev); + + dce_v10_0_afmt_fini(adev); + + drm_mode_config_cleanup(adev->ddev); + adev->mode_info.mode_config_initialized = false; + + return 0; +} + +static int dce_v10_0_hw_init(struct amdgpu_device *adev) +{ + int i; + + dce_v10_0_init_golden_registers(adev); + + /* init dig PHYs, disp eng pll */ + amdgpu_atombios_encoder_init_dig(adev); + amdgpu_atombios_crtc_set_disp_eng_pll(adev, adev->clock.default_dispclk); + + /* initialize hpd */ + dce_v10_0_hpd_init(adev); + + for (i = 0; i < adev->mode_info.audio.num_pins; i++) { + dce_v10_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false); + } + + return 0; +} + +static int dce_v10_0_hw_fini(struct amdgpu_device *adev) +{ + int i; + + dce_v10_0_hpd_fini(adev); + + for (i = 0; i < adev->mode_info.audio.num_pins; i++) { + dce_v10_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false); + } + + return 0; +} + +static int dce_v10_0_suspend(struct amdgpu_device *adev) +{ + struct drm_connector *connector; + + drm_kms_helper_poll_disable(adev->ddev); + + /* turn off display hw */ + list_for_each_entry(connector, &adev->ddev->mode_config.connector_list, head) { + drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF); + } + + amdgpu_atombios_scratch_regs_save(adev); + + dce_v10_0_hpd_fini(adev); + + return 0; +} + +static int dce_v10_0_resume(struct amdgpu_device *adev) +{ + struct drm_connector *connector; + + dce_v10_0_init_golden_registers(adev); + + amdgpu_atombios_scratch_regs_restore(adev); + + /* init dig PHYs, disp eng pll */ + amdgpu_atombios_encoder_init_dig(adev); + amdgpu_atombios_crtc_set_disp_eng_pll(adev, adev->clock.default_dispclk); + /* turn on the BL */ + if (adev->mode_info.bl_encoder) { + u8 bl_level = amdgpu_display_backlight_get_level(adev, + adev->mode_info.bl_encoder); + amdgpu_display_backlight_set_level(adev, adev->mode_info.bl_encoder, + bl_level); + } + + /* initialize hpd */ + dce_v10_0_hpd_init(adev); + + /* blat the mode back in */ + drm_helper_resume_force_mode(adev->ddev); + /* turn on display hw */ + list_for_each_entry(connector, &adev->ddev->mode_config.connector_list, head) { + drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON); + } + + drm_kms_helper_poll_enable(adev->ddev); + + return 0; +} + +static bool dce_v10_0_is_idle(struct amdgpu_device *adev) +{ + /* XXX todo */ + return true; +} + +static int dce_v10_0_wait_for_idle(struct amdgpu_device *adev) +{ + /* XXX todo */ + return 0; +} + +static void dce_v10_0_print_status(struct amdgpu_device *adev) +{ + dev_info(adev->dev, "DCE 10.x registers\n"); + /* XXX todo */ +} + +static int dce_v10_0_soft_reset(struct amdgpu_device *adev) +{ + u32 srbm_soft_reset = 0, tmp; + + if (dce_v10_0_is_display_hung(adev)) + srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_DC_MASK; + + if (srbm_soft_reset) { + dce_v10_0_print_status(adev); + + tmp = RREG32(mmSRBM_SOFT_RESET); + tmp |= srbm_soft_reset; + dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp); + WREG32(mmSRBM_SOFT_RESET, tmp); + tmp = RREG32(mmSRBM_SOFT_RESET); + + udelay(50); + + tmp &= ~srbm_soft_reset; + WREG32(mmSRBM_SOFT_RESET, tmp); + tmp = RREG32(mmSRBM_SOFT_RESET); + + /* Wait a little for things to settle down */ + udelay(50); + dce_v10_0_print_status(adev); + } + return 0; +} + +static void dce_v10_0_set_crtc_vblank_interrupt_state(struct amdgpu_device *adev, + int crtc, + enum amdgpu_interrupt_state state) +{ + u32 lb_interrupt_mask; + + if (crtc >= adev->mode_info.num_crtc) { + DRM_DEBUG("invalid crtc %d\n", crtc); + return; + } + + switch (state) { + case AMDGPU_IRQ_STATE_DISABLE: + lb_interrupt_mask = RREG32(mmLB_INTERRUPT_MASK + crtc_offsets[crtc]); + lb_interrupt_mask = REG_SET_FIELD(lb_interrupt_mask, LB_INTERRUPT_MASK, + VBLANK_INTERRUPT_MASK, 0); + WREG32(mmLB_INTERRUPT_MASK + crtc_offsets[crtc], lb_interrupt_mask); + break; + case AMDGPU_IRQ_STATE_ENABLE: + lb_interrupt_mask = RREG32(mmLB_INTERRUPT_MASK + crtc_offsets[crtc]); + lb_interrupt_mask = REG_SET_FIELD(lb_interrupt_mask, LB_INTERRUPT_MASK, + VBLANK_INTERRUPT_MASK, 1); + WREG32(mmLB_INTERRUPT_MASK + crtc_offsets[crtc], lb_interrupt_mask); + break; + default: + break; + } +} + +static void dce_v10_0_set_crtc_vline_interrupt_state(struct amdgpu_device *adev, + int crtc, + enum amdgpu_interrupt_state state) +{ + u32 lb_interrupt_mask; + + if (crtc >= adev->mode_info.num_crtc) { + DRM_DEBUG("invalid crtc %d\n", crtc); + return; + } + + switch (state) { + case AMDGPU_IRQ_STATE_DISABLE: + lb_interrupt_mask = RREG32(mmLB_INTERRUPT_MASK + crtc_offsets[crtc]); + lb_interrupt_mask = REG_SET_FIELD(lb_interrupt_mask, LB_INTERRUPT_MASK, + VLINE_INTERRUPT_MASK, 0); + WREG32(mmLB_INTERRUPT_MASK + crtc_offsets[crtc], lb_interrupt_mask); + break; + case AMDGPU_IRQ_STATE_ENABLE: + lb_interrupt_mask = RREG32(mmLB_INTERRUPT_MASK + crtc_offsets[crtc]); + lb_interrupt_mask = REG_SET_FIELD(lb_interrupt_mask, LB_INTERRUPT_MASK, + VLINE_INTERRUPT_MASK, 1); + WREG32(mmLB_INTERRUPT_MASK + crtc_offsets[crtc], lb_interrupt_mask); + break; + default: + break; + } +} + +static int dce_v10_0_set_hpd_irq_state(struct amdgpu_device *adev, + struct amdgpu_irq_src *source, + unsigned hpd, + enum amdgpu_interrupt_state state) +{ + u32 tmp; + + if (hpd >= adev->mode_info.num_hpd) { + DRM_DEBUG("invalid hdp %d\n", hpd); + return 0; + } + + switch (state) { + case AMDGPU_IRQ_STATE_DISABLE: + tmp = RREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[hpd]); + tmp = REG_SET_FIELD(tmp, DC_HPD_INT_CONTROL, DC_HPD_INT_EN, 0); + WREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[hpd], tmp); + break; + case AMDGPU_IRQ_STATE_ENABLE: + tmp = RREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[hpd]); + tmp = REG_SET_FIELD(tmp, DC_HPD_INT_CONTROL, DC_HPD_INT_EN, 1); + WREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[hpd], tmp); + break; + default: + break; + } + + return 0; +} + +static int dce_v10_0_set_crtc_irq_state(struct amdgpu_device *adev, + struct amdgpu_irq_src *source, + unsigned type, + enum amdgpu_interrupt_state state) +{ + switch (type) { + case AMDGPU_CRTC_IRQ_VBLANK1: + dce_v10_0_set_crtc_vblank_interrupt_state(adev, 0, state); + break; + case AMDGPU_CRTC_IRQ_VBLANK2: + dce_v10_0_set_crtc_vblank_interrupt_state(adev, 1, state); + break; + case AMDGPU_CRTC_IRQ_VBLANK3: + dce_v10_0_set_crtc_vblank_interrupt_state(adev, 2, state); + break; + case AMDGPU_CRTC_IRQ_VBLANK4: + dce_v10_0_set_crtc_vblank_interrupt_state(adev, 3, state); + break; + case AMDGPU_CRTC_IRQ_VBLANK5: + dce_v10_0_set_crtc_vblank_interrupt_state(adev, 4, state); + break; + case AMDGPU_CRTC_IRQ_VBLANK6: + dce_v10_0_set_crtc_vblank_interrupt_state(adev, 5, state); + break; + case AMDGPU_CRTC_IRQ_VLINE1: + dce_v10_0_set_crtc_vline_interrupt_state(adev, 0, state); + break; + case AMDGPU_CRTC_IRQ_VLINE2: + dce_v10_0_set_crtc_vline_interrupt_state(adev, 1, state); + break; + case AMDGPU_CRTC_IRQ_VLINE3: + dce_v10_0_set_crtc_vline_interrupt_state(adev, 2, state); + break; + case AMDGPU_CRTC_IRQ_VLINE4: + dce_v10_0_set_crtc_vline_interrupt_state(adev, 3, state); + break; + case AMDGPU_CRTC_IRQ_VLINE5: + dce_v10_0_set_crtc_vline_interrupt_state(adev, 4, state); + break; + case AMDGPU_CRTC_IRQ_VLINE6: + dce_v10_0_set_crtc_vline_interrupt_state(adev, 5, state); + break; + default: + break; + } + return 0; +} + +static int dce_v10_0_set_pageflip_irq_state(struct amdgpu_device *adev, + struct amdgpu_irq_src *src, + unsigned type, + enum amdgpu_interrupt_state state) +{ + u32 reg, reg_block; + /* now deal with page flip IRQ */ + switch (type) { + case AMDGPU_PAGEFLIP_IRQ_D1: + reg_block = CRTC0_REGISTER_OFFSET; + break; + case AMDGPU_PAGEFLIP_IRQ_D2: + reg_block = CRTC1_REGISTER_OFFSET; + break; + case AMDGPU_PAGEFLIP_IRQ_D3: + reg_block = CRTC2_REGISTER_OFFSET; + break; + case AMDGPU_PAGEFLIP_IRQ_D4: + reg_block = CRTC3_REGISTER_OFFSET; + break; + case AMDGPU_PAGEFLIP_IRQ_D5: + reg_block = CRTC4_REGISTER_OFFSET; + break; + case AMDGPU_PAGEFLIP_IRQ_D6: + reg_block = CRTC5_REGISTER_OFFSET; + break; + default: + DRM_ERROR("invalid pageflip crtc %d\n", type); + return -EINVAL; + } + + reg = RREG32(mmGRPH_INTERRUPT_CONTROL + reg_block); + if (state == AMDGPU_IRQ_STATE_DISABLE) + WREG32(mmGRPH_INTERRUPT_CONTROL + reg_block, reg & ~GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK_MASK); + else + WREG32(mmGRPH_INTERRUPT_CONTROL + reg_block, reg | GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK_MASK); + + return 0; +} + +static int dce_v10_0_pageflip_irq(struct amdgpu_device *adev, + struct amdgpu_irq_src *source, + struct amdgpu_iv_entry *entry) +{ + int reg_block; + unsigned long flags; + unsigned crtc_id; + struct amdgpu_crtc *amdgpu_crtc; + struct amdgpu_flip_work *works; + + crtc_id = (entry->src_id - 8) >> 1; + amdgpu_crtc = adev->mode_info.crtcs[crtc_id]; + + /* ack the interrupt */ + switch(crtc_id){ + case AMDGPU_PAGEFLIP_IRQ_D1: + reg_block = CRTC0_REGISTER_OFFSET; + break; + case AMDGPU_PAGEFLIP_IRQ_D2: + reg_block = CRTC1_REGISTER_OFFSET; + break; + case AMDGPU_PAGEFLIP_IRQ_D3: + reg_block = CRTC2_REGISTER_OFFSET; + break; + case AMDGPU_PAGEFLIP_IRQ_D4: + reg_block = CRTC3_REGISTER_OFFSET; + break; + case AMDGPU_PAGEFLIP_IRQ_D5: + reg_block = CRTC4_REGISTER_OFFSET; + break; + case AMDGPU_PAGEFLIP_IRQ_D6: + reg_block = CRTC5_REGISTER_OFFSET; + break; + default: + DRM_ERROR("invalid pageflip crtc %d\n", crtc_id); + return -EINVAL; + } + + if (RREG32(mmGRPH_INTERRUPT_STATUS + reg_block) & GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_OCCURRED_MASK) + WREG32(mmGRPH_INTERRUPT_STATUS + reg_block, GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_CLEAR_MASK); + + /* IRQ could occur when in initial stage */ + if (amdgpu_crtc == NULL) + return 0; + + spin_lock_irqsave(&adev->ddev->event_lock, flags); + works = amdgpu_crtc->pflip_works; + if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED) { + DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d != " + "AMDGPU_FLIP_SUBMITTED(%d)\n", + amdgpu_crtc->pflip_status, + AMDGPU_FLIP_SUBMITTED); + spin_unlock_irqrestore(&adev->ddev->event_lock, flags); + return 0; + } + + /* page flip completed. clean up */ + amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE; + amdgpu_crtc->pflip_works = NULL; + + /* wakeup usersapce */ + if (works->event) + drm_send_vblank_event(adev->ddev, crtc_id, works->event); + + spin_unlock_irqrestore(&adev->ddev->event_lock, flags); + + drm_vblank_put(adev->ddev, amdgpu_crtc->crtc_id); + amdgpu_irq_put(adev, &adev->pageflip_irq, crtc_id); + queue_work(amdgpu_crtc->pflip_queue, &works->unpin_work); + + return 0; +} + +static void dce_v10_0_hpd_int_ack(struct amdgpu_device *adev, + int hpd) +{ + u32 tmp; + + if (hpd >= adev->mode_info.num_hpd) { + DRM_DEBUG("invalid hdp %d\n", hpd); + return; + } + + tmp = RREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[hpd]); + tmp = REG_SET_FIELD(tmp, DC_HPD_INT_CONTROL, DC_HPD_INT_ACK, 1); + WREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[hpd], tmp); +} + +static void dce_v10_0_crtc_vblank_int_ack(struct amdgpu_device *adev, + int crtc) +{ + u32 tmp; + + if (crtc >= adev->mode_info.num_crtc) { + DRM_DEBUG("invalid crtc %d\n", crtc); + return; + } + + tmp = RREG32(mmLB_VBLANK_STATUS + crtc_offsets[crtc]); + tmp = REG_SET_FIELD(tmp, LB_VBLANK_STATUS, VBLANK_ACK, 1); + WREG32(mmLB_VBLANK_STATUS + crtc_offsets[crtc], tmp); +} + +static void dce_v10_0_crtc_vline_int_ack(struct amdgpu_device *adev, + int crtc) +{ + u32 tmp; + + if (crtc >= adev->mode_info.num_crtc) { + DRM_DEBUG("invalid crtc %d\n", crtc); + return; + } + + tmp = RREG32(mmLB_VLINE_STATUS + crtc_offsets[crtc]); + tmp = REG_SET_FIELD(tmp, LB_VLINE_STATUS, VLINE_ACK, 1); + WREG32(mmLB_VLINE_STATUS + crtc_offsets[crtc], tmp); +} + +static int dce_v10_0_crtc_irq(struct amdgpu_device *adev, + struct amdgpu_irq_src *source, + struct amdgpu_iv_entry *entry) +{ + unsigned crtc = entry->src_id - 1; + uint32_t disp_int = RREG32(interrupt_status_offsets[crtc].reg); + unsigned irq_type = amdgpu_crtc_idx_to_irq_type(adev, crtc); + + switch (entry->src_data) { + case 0: /* vblank */ + if (disp_int & interrupt_status_offsets[crtc].vblank) { + dce_v10_0_crtc_vblank_int_ack(adev, crtc); + if (amdgpu_irq_enabled(adev, source, irq_type)) { + drm_handle_vblank(adev->ddev, crtc); + } + DRM_DEBUG("IH: D%d vblank\n", crtc + 1); + } + break; + case 1: /* vline */ + if (disp_int & interrupt_status_offsets[crtc].vline) { + dce_v10_0_crtc_vline_int_ack(adev, crtc); + DRM_DEBUG("IH: D%d vline\n", crtc + 1); + } + break; + default: + DRM_DEBUG("Unhandled interrupt: %d %d\n", entry->src_id, entry->src_data); + break; + } + + return 0; +} + +static int dce_v10_0_hpd_irq(struct amdgpu_device *adev, + struct amdgpu_irq_src *source, + struct amdgpu_iv_entry *entry) +{ + uint32_t disp_int, mask; + unsigned hpd; + + if (entry->src_data >= adev->mode_info.num_hpd) { + DRM_DEBUG("Unhandled interrupt: %d %d\n", entry->src_id, entry->src_data); + return 0; + } + + hpd = entry->src_data; + disp_int = RREG32(interrupt_status_offsets[hpd].reg); + mask = interrupt_status_offsets[hpd].hpd; + + if (disp_int & mask) { + dce_v10_0_hpd_int_ack(adev, hpd); + schedule_work(&adev->hotplug_work); + DRM_DEBUG("IH: HPD%d\n", hpd + 1); + } + + return 0; +} + +static int dce_v10_0_set_clockgating_state(struct amdgpu_device *adev, + enum amdgpu_clockgating_state state) +{ + return 0; +} + +static int dce_v10_0_set_powergating_state(struct amdgpu_device *adev, + enum amdgpu_powergating_state state) +{ + return 0; +} + +const struct amdgpu_ip_funcs dce_v10_0_ip_funcs = { + .early_init = dce_v10_0_early_init, + .late_init = NULL, + .sw_init = dce_v10_0_sw_init, + .sw_fini = dce_v10_0_sw_fini, + .hw_init = dce_v10_0_hw_init, + .hw_fini = dce_v10_0_hw_fini, + .suspend = dce_v10_0_suspend, + .resume = dce_v10_0_resume, + .is_idle = dce_v10_0_is_idle, + .wait_for_idle = dce_v10_0_wait_for_idle, + .soft_reset = dce_v10_0_soft_reset, + .print_status = dce_v10_0_print_status, + .set_clockgating_state = dce_v10_0_set_clockgating_state, + .set_powergating_state = dce_v10_0_set_powergating_state, +}; + +static void +dce_v10_0_encoder_mode_set(struct drm_encoder *encoder, + struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode) +{ + struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); + + amdgpu_encoder->pixel_clock = adjusted_mode->clock; + + /* need to call this here rather than in prepare() since we need some crtc info */ + amdgpu_atombios_encoder_dpms(encoder, DRM_MODE_DPMS_OFF); + + /* set scaler clears this on some chips */ + dce_v10_0_set_interleave(encoder->crtc, mode); + + if (amdgpu_atombios_encoder_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI) { + dce_v10_0_afmt_enable(encoder, true); + dce_v10_0_afmt_setmode(encoder, adjusted_mode); + } +} + +static void dce_v10_0_encoder_prepare(struct drm_encoder *encoder) +{ + struct amdgpu_device *adev = encoder->dev->dev_private; + struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); + struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder); + + if ((amdgpu_encoder->active_device & + (ATOM_DEVICE_DFP_SUPPORT | ATOM_DEVICE_LCD_SUPPORT)) || + (amdgpu_encoder_get_dp_bridge_encoder_id(encoder) != + ENCODER_OBJECT_ID_NONE)) { + struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; + if (dig) { + dig->dig_encoder = dce_v10_0_pick_dig_encoder(encoder); + if (amdgpu_encoder->active_device & ATOM_DEVICE_DFP_SUPPORT) + dig->afmt = adev->mode_info.afmt[dig->dig_encoder]; + } + } + + amdgpu_atombios_scratch_regs_lock(adev, true); + + if (connector) { + struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector); + + /* select the clock/data port if it uses a router */ + if (amdgpu_connector->router.cd_valid) + amdgpu_i2c_router_select_cd_port(amdgpu_connector); + + /* turn eDP panel on for mode set */ + if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) + amdgpu_atombios_encoder_set_edp_panel_power(connector, + ATOM_TRANSMITTER_ACTION_POWER_ON); + } + + /* this is needed for the pll/ss setup to work correctly in some cases */ + amdgpu_atombios_encoder_set_crtc_source(encoder); + /* set up the FMT blocks */ + dce_v10_0_program_fmt(encoder); +} + +static void dce_v10_0_encoder_commit(struct drm_encoder *encoder) +{ + struct drm_device *dev = encoder->dev; + struct amdgpu_device *adev = dev->dev_private; + + /* need to call this here as we need the crtc set up */ + amdgpu_atombios_encoder_dpms(encoder, DRM_MODE_DPMS_ON); + amdgpu_atombios_scratch_regs_lock(adev, false); +} + +static void dce_v10_0_encoder_disable(struct drm_encoder *encoder) +{ + struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); + struct amdgpu_encoder_atom_dig *dig; + + amdgpu_atombios_encoder_dpms(encoder, DRM_MODE_DPMS_OFF); + + if (amdgpu_atombios_encoder_is_digital(encoder)) { + if (amdgpu_atombios_encoder_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI) + dce_v10_0_afmt_enable(encoder, false); + dig = amdgpu_encoder->enc_priv; + dig->dig_encoder = -1; + } + amdgpu_encoder->active_device = 0; +} + +/* these are handled by the primary encoders */ +static void dce_v10_0_ext_prepare(struct drm_encoder *encoder) +{ + +} + +static void dce_v10_0_ext_commit(struct drm_encoder *encoder) +{ + +} + +static void +dce_v10_0_ext_mode_set(struct drm_encoder *encoder, + struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode) +{ + +} + +static void dce_v10_0_ext_disable(struct drm_encoder *encoder) +{ + +} + +static void +dce_v10_0_ext_dpms(struct drm_encoder *encoder, int mode) +{ + +} + +static bool dce_v10_0_ext_mode_fixup(struct drm_encoder *encoder, + const struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode) +{ + return true; +} + +static const struct drm_encoder_helper_funcs dce_v10_0_ext_helper_funcs = { + .dpms = dce_v10_0_ext_dpms, + .mode_fixup = dce_v10_0_ext_mode_fixup, + .prepare = dce_v10_0_ext_prepare, + .mode_set = dce_v10_0_ext_mode_set, + .commit = dce_v10_0_ext_commit, + .disable = dce_v10_0_ext_disable, + /* no detect for TMDS/LVDS yet */ +}; + +static const struct drm_encoder_helper_funcs dce_v10_0_dig_helper_funcs = { + .dpms = amdgpu_atombios_encoder_dpms, + .mode_fixup = amdgpu_atombios_encoder_mode_fixup, + .prepare = dce_v10_0_encoder_prepare, + .mode_set = dce_v10_0_encoder_mode_set, + .commit = dce_v10_0_encoder_commit, + .disable = dce_v10_0_encoder_disable, + .detect = amdgpu_atombios_encoder_dig_detect, +}; + +static const struct drm_encoder_helper_funcs dce_v10_0_dac_helper_funcs = { + .dpms = amdgpu_atombios_encoder_dpms, + .mode_fixup = amdgpu_atombios_encoder_mode_fixup, + .prepare = dce_v10_0_encoder_prepare, + .mode_set = dce_v10_0_encoder_mode_set, + .commit = dce_v10_0_encoder_commit, + .detect = amdgpu_atombios_encoder_dac_detect, +}; + +static void dce_v10_0_encoder_destroy(struct drm_encoder *encoder) +{ + struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); + if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) + amdgpu_atombios_encoder_fini_backlight(amdgpu_encoder); + kfree(amdgpu_encoder->enc_priv); + drm_encoder_cleanup(encoder); + kfree(amdgpu_encoder); +} + +static const struct drm_encoder_funcs dce_v10_0_encoder_funcs = { + .destroy = dce_v10_0_encoder_destroy, +}; + +static void dce_v10_0_encoder_add(struct amdgpu_device *adev, + uint32_t encoder_enum, + uint32_t supported_device, + u16 caps) +{ + struct drm_device *dev = adev->ddev; + struct drm_encoder *encoder; + struct amdgpu_encoder *amdgpu_encoder; + + /* see if we already added it */ + list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { + amdgpu_encoder = to_amdgpu_encoder(encoder); + if (amdgpu_encoder->encoder_enum == encoder_enum) { + amdgpu_encoder->devices |= supported_device; + return; + } + + } + + /* add a new one */ + amdgpu_encoder = kzalloc(sizeof(struct amdgpu_encoder), GFP_KERNEL); + if (!amdgpu_encoder) + return; + + encoder = &amdgpu_encoder->base; + switch (adev->mode_info.num_crtc) { + case 1: + encoder->possible_crtcs = 0x1; + break; + case 2: + default: + encoder->possible_crtcs = 0x3; + break; + case 4: + encoder->possible_crtcs = 0xf; + break; + case 6: + encoder->possible_crtcs = 0x3f; + break; + } + + amdgpu_encoder->enc_priv = NULL; + + amdgpu_encoder->encoder_enum = encoder_enum; + amdgpu_encoder->encoder_id = (encoder_enum & OBJECT_ID_MASK) >> OBJECT_ID_SHIFT; + amdgpu_encoder->devices = supported_device; + amdgpu_encoder->rmx_type = RMX_OFF; + amdgpu_encoder->underscan_type = UNDERSCAN_OFF; + amdgpu_encoder->is_ext_encoder = false; + amdgpu_encoder->caps = caps; + + switch (amdgpu_encoder->encoder_id) { + case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1: + case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2: + drm_encoder_init(dev, encoder, &dce_v10_0_encoder_funcs, + DRM_MODE_ENCODER_DAC); + drm_encoder_helper_add(encoder, &dce_v10_0_dac_helper_funcs); + break; + case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1: + case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: + case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: + case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: + case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3: + if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) { + amdgpu_encoder->rmx_type = RMX_FULL; + drm_encoder_init(dev, encoder, &dce_v10_0_encoder_funcs, + DRM_MODE_ENCODER_LVDS); + amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_lcd_info(amdgpu_encoder); + } else if (amdgpu_encoder->devices & (ATOM_DEVICE_CRT_SUPPORT)) { + drm_encoder_init(dev, encoder, &dce_v10_0_encoder_funcs, + DRM_MODE_ENCODER_DAC); + amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_dig_info(amdgpu_encoder); + } else { + drm_encoder_init(dev, encoder, &dce_v10_0_encoder_funcs, + DRM_MODE_ENCODER_TMDS); + amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_dig_info(amdgpu_encoder); + } + drm_encoder_helper_add(encoder, &dce_v10_0_dig_helper_funcs); + break; + case ENCODER_OBJECT_ID_SI170B: + case ENCODER_OBJECT_ID_CH7303: + case ENCODER_OBJECT_ID_EXTERNAL_SDVOA: + case ENCODER_OBJECT_ID_EXTERNAL_SDVOB: + case ENCODER_OBJECT_ID_TITFP513: + case ENCODER_OBJECT_ID_VT1623: + case ENCODER_OBJECT_ID_HDMI_SI1930: + case ENCODER_OBJECT_ID_TRAVIS: + case ENCODER_OBJECT_ID_NUTMEG: + /* these are handled by the primary encoders */ + amdgpu_encoder->is_ext_encoder = true; + if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) + drm_encoder_init(dev, encoder, &dce_v10_0_encoder_funcs, + DRM_MODE_ENCODER_LVDS); + else if (amdgpu_encoder->devices & (ATOM_DEVICE_CRT_SUPPORT)) + drm_encoder_init(dev, encoder, &dce_v10_0_encoder_funcs, + DRM_MODE_ENCODER_DAC); + else + drm_encoder_init(dev, encoder, &dce_v10_0_encoder_funcs, + DRM_MODE_ENCODER_TMDS); + drm_encoder_helper_add(encoder, &dce_v10_0_ext_helper_funcs); + break; + } +} + +static const struct amdgpu_display_funcs dce_v10_0_display_funcs = { + .set_vga_render_state = &dce_v10_0_set_vga_render_state, + .bandwidth_update = &dce_v10_0_bandwidth_update, + .vblank_get_counter = &dce_v10_0_vblank_get_counter, + .vblank_wait = &dce_v10_0_vblank_wait, + .is_display_hung = &dce_v10_0_is_display_hung, + .backlight_set_level = &amdgpu_atombios_encoder_set_backlight_level, + .backlight_get_level = &amdgpu_atombios_encoder_get_backlight_level, + .hpd_sense = &dce_v10_0_hpd_sense, + .hpd_set_polarity = &dce_v10_0_hpd_set_polarity, + .hpd_get_gpio_reg = &dce_v10_0_hpd_get_gpio_reg, + .page_flip = &dce_v10_0_page_flip, + .page_flip_get_scanoutpos = &dce_v10_0_crtc_get_scanoutpos, + .add_encoder = &dce_v10_0_encoder_add, + .add_connector = &amdgpu_connector_add, + .stop_mc_access = &dce_v10_0_stop_mc_access, + .resume_mc_access = &dce_v10_0_resume_mc_access, +}; + +static void dce_v10_0_set_display_funcs(struct amdgpu_device *adev) +{ + if (adev->mode_info.funcs == NULL) + adev->mode_info.funcs = &dce_v10_0_display_funcs; +} + +static const struct amdgpu_irq_src_funcs dce_v10_0_crtc_irq_funcs = { + .set = dce_v10_0_set_crtc_irq_state, + .process = dce_v10_0_crtc_irq, +}; + +static const struct amdgpu_irq_src_funcs dce_v10_0_pageflip_irq_funcs = { + .set = dce_v10_0_set_pageflip_irq_state, + .process = dce_v10_0_pageflip_irq, +}; + +static const struct amdgpu_irq_src_funcs dce_v10_0_hpd_irq_funcs = { + .set = dce_v10_0_set_hpd_irq_state, + .process = dce_v10_0_hpd_irq, +}; + +static void dce_v10_0_set_irq_funcs(struct amdgpu_device *adev) +{ + adev->crtc_irq.num_types = AMDGPU_CRTC_IRQ_LAST; + adev->crtc_irq.funcs = &dce_v10_0_crtc_irq_funcs; + + adev->pageflip_irq.num_types = AMDGPU_PAGEFLIP_IRQ_LAST; + adev->pageflip_irq.funcs = &dce_v10_0_pageflip_irq_funcs; + + adev->hpd_irq.num_types = AMDGPU_HPD_LAST; + adev->hpd_irq.funcs = &dce_v10_0_hpd_irq_funcs; +} diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.h b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.h new file mode 100644 index 000000000000..72ca20d1793c --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.h @@ -0,0 +1,29 @@ +/* + * Copyright 2014 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef __DCE_V10_0_H__ +#define __DCE_V10_0_H__ + +extern const struct amdgpu_ip_funcs dce_v10_0_ip_funcs; + +#endif diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c new file mode 100644 index 000000000000..55fef15a4fcf --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c @@ -0,0 +1,3871 @@ +/* + * Copyright 2014 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#include "drmP.h" +#include "amdgpu.h" +#include "amdgpu_pm.h" +#include "amdgpu_i2c.h" +#include "vid.h" +#include "atom.h" +#include "amdgpu_atombios.h" +#include "atombios_crtc.h" +#include "atombios_encoders.h" +#include "amdgpu_pll.h" +#include "amdgpu_connectors.h" + +#include "dce/dce_11_0_d.h" +#include "dce/dce_11_0_sh_mask.h" +#include "dce/dce_11_0_enum.h" +#include "oss/oss_3_0_d.h" +#include "oss/oss_3_0_sh_mask.h" +#include "gmc/gmc_8_1_d.h" +#include "gmc/gmc_8_1_sh_mask.h" + +static void dce_v11_0_set_display_funcs(struct amdgpu_device *adev); +static void dce_v11_0_set_irq_funcs(struct amdgpu_device *adev); + +static const u32 crtc_offsets[] = +{ + CRTC0_REGISTER_OFFSET, + CRTC1_REGISTER_OFFSET, + CRTC2_REGISTER_OFFSET, + CRTC3_REGISTER_OFFSET, + CRTC4_REGISTER_OFFSET, + CRTC5_REGISTER_OFFSET, + CRTC6_REGISTER_OFFSET +}; + +static const u32 hpd_offsets[] = +{ + HPD0_REGISTER_OFFSET, + HPD1_REGISTER_OFFSET, + HPD2_REGISTER_OFFSET, + HPD3_REGISTER_OFFSET, + HPD4_REGISTER_OFFSET, + HPD5_REGISTER_OFFSET +}; + +static const uint32_t dig_offsets[] = { + DIG0_REGISTER_OFFSET, + DIG1_REGISTER_OFFSET, + DIG2_REGISTER_OFFSET, + DIG3_REGISTER_OFFSET, + DIG4_REGISTER_OFFSET, + DIG5_REGISTER_OFFSET, + DIG6_REGISTER_OFFSET, + DIG7_REGISTER_OFFSET, + DIG8_REGISTER_OFFSET +}; + +static const struct { + uint32_t reg; + uint32_t vblank; + uint32_t vline; + uint32_t hpd; + +} interrupt_status_offsets[] = { { + .reg = mmDISP_INTERRUPT_STATUS, + .vblank = DISP_INTERRUPT_STATUS__LB_D1_VBLANK_INTERRUPT_MASK, + .vline = DISP_INTERRUPT_STATUS__LB_D1_VLINE_INTERRUPT_MASK, + .hpd = DISP_INTERRUPT_STATUS__DC_HPD1_INTERRUPT_MASK +}, { + .reg = mmDISP_INTERRUPT_STATUS_CONTINUE, + .vblank = DISP_INTERRUPT_STATUS_CONTINUE__LB_D2_VBLANK_INTERRUPT_MASK, + .vline = DISP_INTERRUPT_STATUS_CONTINUE__LB_D2_VLINE_INTERRUPT_MASK, + .hpd = DISP_INTERRUPT_STATUS_CONTINUE__DC_HPD2_INTERRUPT_MASK +}, { + .reg = mmDISP_INTERRUPT_STATUS_CONTINUE2, + .vblank = DISP_INTERRUPT_STATUS_CONTINUE2__LB_D3_VBLANK_INTERRUPT_MASK, + .vline = DISP_INTERRUPT_STATUS_CONTINUE2__LB_D3_VLINE_INTERRUPT_MASK, + .hpd = DISP_INTERRUPT_STATUS_CONTINUE2__DC_HPD3_INTERRUPT_MASK +}, { + .reg = mmDISP_INTERRUPT_STATUS_CONTINUE3, + .vblank = DISP_INTERRUPT_STATUS_CONTINUE3__LB_D4_VBLANK_INTERRUPT_MASK, + .vline = DISP_INTERRUPT_STATUS_CONTINUE3__LB_D4_VLINE_INTERRUPT_MASK, + .hpd = DISP_INTERRUPT_STATUS_CONTINUE3__DC_HPD4_INTERRUPT_MASK +}, { + .reg = mmDISP_INTERRUPT_STATUS_CONTINUE4, + .vblank = DISP_INTERRUPT_STATUS_CONTINUE4__LB_D5_VBLANK_INTERRUPT_MASK, + .vline = DISP_INTERRUPT_STATUS_CONTINUE4__LB_D5_VLINE_INTERRUPT_MASK, + .hpd = DISP_INTERRUPT_STATUS_CONTINUE4__DC_HPD5_INTERRUPT_MASK +}, { + .reg = mmDISP_INTERRUPT_STATUS_CONTINUE5, + .vblank = DISP_INTERRUPT_STATUS_CONTINUE5__LB_D6_VBLANK_INTERRUPT_MASK, + .vline = DISP_INTERRUPT_STATUS_CONTINUE5__LB_D6_VLINE_INTERRUPT_MASK, + .hpd = DISP_INTERRUPT_STATUS_CONTINUE5__DC_HPD6_INTERRUPT_MASK +} }; + +static const u32 cz_golden_settings_a11[] = +{ + mmCRTC_DOUBLE_BUFFER_CONTROL, 0x00010101, 0x00010000, + mmFBC_MISC, 0x1f311fff, 0x14300000, +}; + +static void dce_v11_0_init_golden_registers(struct amdgpu_device *adev) +{ + switch (adev->asic_type) { + case CHIP_CARRIZO: + amdgpu_program_register_sequence(adev, + cz_golden_settings_a11, + (const u32)ARRAY_SIZE(cz_golden_settings_a11)); + break; + default: + break; + } +} + +static u32 dce_v11_0_audio_endpt_rreg(struct amdgpu_device *adev, + u32 block_offset, u32 reg) +{ + unsigned long flags; + u32 r; + + spin_lock_irqsave(&adev->audio_endpt_idx_lock, flags); + WREG32(mmAZALIA_F0_CODEC_ENDPOINT_INDEX + block_offset, reg); + r = RREG32(mmAZALIA_F0_CODEC_ENDPOINT_DATA + block_offset); + spin_unlock_irqrestore(&adev->audio_endpt_idx_lock, flags); + + return r; +} + +static void dce_v11_0_audio_endpt_wreg(struct amdgpu_device *adev, + u32 block_offset, u32 reg, u32 v) +{ + unsigned long flags; + + spin_lock_irqsave(&adev->audio_endpt_idx_lock, flags); + WREG32(mmAZALIA_F0_CODEC_ENDPOINT_INDEX + block_offset, reg); + WREG32(mmAZALIA_F0_CODEC_ENDPOINT_DATA + block_offset, v); + spin_unlock_irqrestore(&adev->audio_endpt_idx_lock, flags); +} + +static bool dce_v11_0_is_in_vblank(struct amdgpu_device *adev, int crtc) +{ + if (RREG32(mmCRTC_STATUS + crtc_offsets[crtc]) & + CRTC_V_BLANK_START_END__CRTC_V_BLANK_START_MASK) + return true; + else + return false; +} + +static bool dce_v11_0_is_counter_moving(struct amdgpu_device *adev, int crtc) +{ + u32 pos1, pos2; + + pos1 = RREG32(mmCRTC_STATUS_POSITION + crtc_offsets[crtc]); + pos2 = RREG32(mmCRTC_STATUS_POSITION + crtc_offsets[crtc]); + + if (pos1 != pos2) + return true; + else + return false; +} + +/** + * dce_v11_0_vblank_wait - vblank wait asic callback. + * + * @adev: amdgpu_device pointer + * @crtc: crtc to wait for vblank on + * + * Wait for vblank on the requested crtc (evergreen+). + */ +static void dce_v11_0_vblank_wait(struct amdgpu_device *adev, int crtc) +{ + unsigned i = 0; + + if (crtc >= adev->mode_info.num_crtc) + return; + + if (!(RREG32(mmCRTC_CONTROL + crtc_offsets[crtc]) & CRTC_CONTROL__CRTC_MASTER_EN_MASK)) + return; + + /* depending on when we hit vblank, we may be close to active; if so, + * wait for another frame. + */ + while (dce_v11_0_is_in_vblank(adev, crtc)) { + if (i++ % 100 == 0) { + if (!dce_v11_0_is_counter_moving(adev, crtc)) + break; + } + } + + while (!dce_v11_0_is_in_vblank(adev, crtc)) { + if (i++ % 100 == 0) { + if (!dce_v11_0_is_counter_moving(adev, crtc)) + break; + } + } +} + +static u32 dce_v11_0_vblank_get_counter(struct amdgpu_device *adev, int crtc) +{ + if (crtc >= adev->mode_info.num_crtc) + return 0; + else + return RREG32(mmCRTC_STATUS_FRAME_COUNT + crtc_offsets[crtc]); +} + +/** + * dce_v11_0_page_flip - pageflip callback. + * + * @adev: amdgpu_device pointer + * @crtc_id: crtc to cleanup pageflip on + * @crtc_base: new address of the crtc (GPU MC address) + * + * Does the actual pageflip (evergreen+). + * During vblank we take the crtc lock and wait for the update_pending + * bit to go high, when it does, we release the lock, and allow the + * double buffered update to take place. + * Returns the current update pending status. + */ +static void dce_v11_0_page_flip(struct amdgpu_device *adev, + int crtc_id, u64 crtc_base) +{ + struct amdgpu_crtc *amdgpu_crtc = adev->mode_info.crtcs[crtc_id]; + u32 tmp = RREG32(mmGRPH_UPDATE + amdgpu_crtc->crtc_offset); + int i; + + /* Lock the graphics update lock */ + tmp = REG_SET_FIELD(tmp, GRPH_UPDATE, GRPH_UPDATE_LOCK, 1); + WREG32(mmGRPH_UPDATE + amdgpu_crtc->crtc_offset, tmp); + + /* update the scanout addresses */ + WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset, + upper_32_bits(crtc_base)); + WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset, + lower_32_bits(crtc_base)); + + WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset, + upper_32_bits(crtc_base)); + WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset, + lower_32_bits(crtc_base)); + + /* Wait for update_pending to go high. */ + for (i = 0; i < adev->usec_timeout; i++) { + if (RREG32(mmGRPH_UPDATE + amdgpu_crtc->crtc_offset) & + GRPH_UPDATE__GRPH_SURFACE_UPDATE_PENDING_MASK) + break; + udelay(1); + } + DRM_DEBUG("Update pending now high. Unlocking vupdate_lock.\n"); + + /* Unlock the lock, so double-buffering can take place inside vblank */ + tmp = REG_SET_FIELD(tmp, GRPH_UPDATE, GRPH_UPDATE_LOCK, 0); + WREG32(mmGRPH_UPDATE + amdgpu_crtc->crtc_offset, tmp); +} + +static int dce_v11_0_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc, + u32 *vbl, u32 *position) +{ + if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc)) + return -EINVAL; + + *vbl = RREG32(mmCRTC_V_BLANK_START_END + crtc_offsets[crtc]); + *position = RREG32(mmCRTC_STATUS_POSITION + crtc_offsets[crtc]); + + return 0; +} + +/** + * dce_v11_0_hpd_sense - hpd sense callback. + * + * @adev: amdgpu_device pointer + * @hpd: hpd (hotplug detect) pin + * + * Checks if a digital monitor is connected (evergreen+). + * Returns true if connected, false if not connected. + */ +static bool dce_v11_0_hpd_sense(struct amdgpu_device *adev, + enum amdgpu_hpd_id hpd) +{ + int idx; + bool connected = false; + + switch (hpd) { + case AMDGPU_HPD_1: + idx = 0; + break; + case AMDGPU_HPD_2: + idx = 1; + break; + case AMDGPU_HPD_3: + idx = 2; + break; + case AMDGPU_HPD_4: + idx = 3; + break; + case AMDGPU_HPD_5: + idx = 4; + break; + case AMDGPU_HPD_6: + idx = 5; + break; + default: + return connected; + } + + if (RREG32(mmDC_HPD_INT_STATUS + hpd_offsets[idx]) & + DC_HPD_INT_STATUS__DC_HPD_SENSE_MASK) + connected = true; + + return connected; +} + +/** + * dce_v11_0_hpd_set_polarity - hpd set polarity callback. + * + * @adev: amdgpu_device pointer + * @hpd: hpd (hotplug detect) pin + * + * Set the polarity of the hpd pin (evergreen+). + */ +static void dce_v11_0_hpd_set_polarity(struct amdgpu_device *adev, + enum amdgpu_hpd_id hpd) +{ + u32 tmp; + bool connected = dce_v11_0_hpd_sense(adev, hpd); + int idx; + + switch (hpd) { + case AMDGPU_HPD_1: + idx = 0; + break; + case AMDGPU_HPD_2: + idx = 1; + break; + case AMDGPU_HPD_3: + idx = 2; + break; + case AMDGPU_HPD_4: + idx = 3; + break; + case AMDGPU_HPD_5: + idx = 4; + break; + case AMDGPU_HPD_6: + idx = 5; + break; + default: + return; + } + + tmp = RREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[idx]); + if (connected) + tmp = REG_SET_FIELD(tmp, DC_HPD_INT_CONTROL, DC_HPD_INT_POLARITY, 0); + else + tmp = REG_SET_FIELD(tmp, DC_HPD_INT_CONTROL, DC_HPD_INT_POLARITY, 1); + WREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[idx], tmp); +} + +/** + * dce_v11_0_hpd_init - hpd setup callback. + * + * @adev: amdgpu_device pointer + * + * Setup the hpd pins used by the card (evergreen+). + * Enable the pin, set the polarity, and enable the hpd interrupts. + */ +static void dce_v11_0_hpd_init(struct amdgpu_device *adev) +{ + struct drm_device *dev = adev->ddev; + struct drm_connector *connector; + u32 tmp; + int idx; + + list_for_each_entry(connector, &dev->mode_config.connector_list, head) { + struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector); + + if (connector->connector_type == DRM_MODE_CONNECTOR_eDP || + connector->connector_type == DRM_MODE_CONNECTOR_LVDS) { + /* don't try to enable hpd on eDP or LVDS avoid breaking the + * aux dp channel on imac and help (but not completely fix) + * https://bugzilla.redhat.com/show_bug.cgi?id=726143 + * also avoid interrupt storms during dpms. + */ + continue; + } + + switch (amdgpu_connector->hpd.hpd) { + case AMDGPU_HPD_1: + idx = 0; + break; + case AMDGPU_HPD_2: + idx = 1; + break; + case AMDGPU_HPD_3: + idx = 2; + break; + case AMDGPU_HPD_4: + idx = 3; + break; + case AMDGPU_HPD_5: + idx = 4; + break; + case AMDGPU_HPD_6: + idx = 5; + break; + default: + continue; + } + + tmp = RREG32(mmDC_HPD_CONTROL + hpd_offsets[idx]); + tmp = REG_SET_FIELD(tmp, DC_HPD_CONTROL, DC_HPD_EN, 1); + WREG32(mmDC_HPD_CONTROL + hpd_offsets[idx], tmp); + + tmp = RREG32(mmDC_HPD_TOGGLE_FILT_CNTL + hpd_offsets[idx]); + tmp = REG_SET_FIELD(tmp, DC_HPD_TOGGLE_FILT_CNTL, + DC_HPD_CONNECT_INT_DELAY, + AMDGPU_HPD_CONNECT_INT_DELAY_IN_MS); + tmp = REG_SET_FIELD(tmp, DC_HPD_TOGGLE_FILT_CNTL, + DC_HPD_DISCONNECT_INT_DELAY, + AMDGPU_HPD_DISCONNECT_INT_DELAY_IN_MS); + WREG32(mmDC_HPD_TOGGLE_FILT_CNTL + hpd_offsets[idx], tmp); + + dce_v11_0_hpd_set_polarity(adev, amdgpu_connector->hpd.hpd); + amdgpu_irq_get(adev, &adev->hpd_irq, amdgpu_connector->hpd.hpd); + } +} + +/** + * dce_v11_0_hpd_fini - hpd tear down callback. + * + * @adev: amdgpu_device pointer + * + * Tear down the hpd pins used by the card (evergreen+). + * Disable the hpd interrupts. + */ +static void dce_v11_0_hpd_fini(struct amdgpu_device *adev) +{ + struct drm_device *dev = adev->ddev; + struct drm_connector *connector; + u32 tmp; + int idx; + + list_for_each_entry(connector, &dev->mode_config.connector_list, head) { + struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector); + + switch (amdgpu_connector->hpd.hpd) { + case AMDGPU_HPD_1: + idx = 0; + break; + case AMDGPU_HPD_2: + idx = 1; + break; + case AMDGPU_HPD_3: + idx = 2; + break; + case AMDGPU_HPD_4: + idx = 3; + break; + case AMDGPU_HPD_5: + idx = 4; + break; + case AMDGPU_HPD_6: + idx = 5; + break; + default: + continue; + } + + tmp = RREG32(mmDC_HPD_CONTROL + hpd_offsets[idx]); + tmp = REG_SET_FIELD(tmp, DC_HPD_CONTROL, DC_HPD_EN, 0); + WREG32(mmDC_HPD_CONTROL + hpd_offsets[idx], tmp); + + amdgpu_irq_put(adev, &adev->hpd_irq, amdgpu_connector->hpd.hpd); + } +} + +static u32 dce_v11_0_hpd_get_gpio_reg(struct amdgpu_device *adev) +{ + return mmDC_GPIO_HPD_A; +} + +static bool dce_v11_0_is_display_hung(struct amdgpu_device *adev) +{ + u32 crtc_hung = 0; + u32 crtc_status[6]; + u32 i, j, tmp; + + for (i = 0; i < adev->mode_info.num_crtc; i++) { + tmp = RREG32(mmCRTC_CONTROL + crtc_offsets[i]); + if (REG_GET_FIELD(tmp, CRTC_CONTROL, CRTC_MASTER_EN)) { + crtc_status[i] = RREG32(mmCRTC_STATUS_HV_COUNT + crtc_offsets[i]); + crtc_hung |= (1 << i); + } + } + + for (j = 0; j < 10; j++) { + for (i = 0; i < adev->mode_info.num_crtc; i++) { + if (crtc_hung & (1 << i)) { + tmp = RREG32(mmCRTC_STATUS_HV_COUNT + crtc_offsets[i]); + if (tmp != crtc_status[i]) + crtc_hung &= ~(1 << i); + } + } + if (crtc_hung == 0) + return false; + udelay(100); + } + + return true; +} + +static void dce_v11_0_stop_mc_access(struct amdgpu_device *adev, + struct amdgpu_mode_mc_save *save) +{ + u32 crtc_enabled, tmp; + int i; + + save->vga_render_control = RREG32(mmVGA_RENDER_CONTROL); + save->vga_hdp_control = RREG32(mmVGA_HDP_CONTROL); + + /* disable VGA render */ + tmp = RREG32(mmVGA_RENDER_CONTROL); + tmp = REG_SET_FIELD(tmp, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 0); + WREG32(mmVGA_RENDER_CONTROL, tmp); + + /* blank the display controllers */ + for (i = 0; i < adev->mode_info.num_crtc; i++) { + crtc_enabled = REG_GET_FIELD(RREG32(mmCRTC_CONTROL + crtc_offsets[i]), + CRTC_CONTROL, CRTC_MASTER_EN); + if (crtc_enabled) { +#if 0 + u32 frame_count; + int j; + + save->crtc_enabled[i] = true; + tmp = RREG32(mmCRTC_BLANK_CONTROL + crtc_offsets[i]); + if (REG_GET_FIELD(tmp, CRTC_BLANK_CONTROL, CRTC_BLANK_DATA_EN) == 0) { + amdgpu_display_vblank_wait(adev, i); + WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 1); + tmp = REG_SET_FIELD(tmp, CRTC_BLANK_CONTROL, CRTC_BLANK_DATA_EN, 1); + WREG32(mmCRTC_BLANK_CONTROL + crtc_offsets[i], tmp); + WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 0); + } + /* wait for the next frame */ + frame_count = amdgpu_display_vblank_get_counter(adev, i); + for (j = 0; j < adev->usec_timeout; j++) { + if (amdgpu_display_vblank_get_counter(adev, i) != frame_count) + break; + udelay(1); + } + tmp = RREG32(mmGRPH_UPDATE + crtc_offsets[i]); + if (REG_GET_FIELD(tmp, GRPH_UPDATE, GRPH_UPDATE_LOCK) == 0) { + tmp = REG_SET_FIELD(tmp, GRPH_UPDATE, GRPH_UPDATE_LOCK, 1); + WREG32(mmGRPH_UPDATE + crtc_offsets[i], tmp); + } + tmp = RREG32(mmCRTC_MASTER_UPDATE_LOCK + crtc_offsets[i]); + if (REG_GET_FIELD(tmp, CRTC_MASTER_UPDATE_LOCK, MASTER_UPDATE_LOCK) == 0) { + tmp = REG_SET_FIELD(tmp, CRTC_MASTER_UPDATE_LOCK, MASTER_UPDATE_LOCK, 1); + WREG32(mmCRTC_MASTER_UPDATE_LOCK + crtc_offsets[i], tmp); + } +#else + /* XXX this is a hack to avoid strange behavior with EFI on certain systems */ + WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 1); + tmp = RREG32(mmCRTC_CONTROL + crtc_offsets[i]); + tmp = REG_SET_FIELD(tmp, CRTC_CONTROL, CRTC_MASTER_EN, 0); + WREG32(mmCRTC_CONTROL + crtc_offsets[i], tmp); + WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 0); + save->crtc_enabled[i] = false; + /* ***** */ +#endif + } else { + save->crtc_enabled[i] = false; + } + } +} + +static void dce_v11_0_resume_mc_access(struct amdgpu_device *adev, + struct amdgpu_mode_mc_save *save) +{ + u32 tmp, frame_count; + int i, j; + + /* update crtc base addresses */ + for (i = 0; i < adev->mode_info.num_crtc; i++) { + WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + crtc_offsets[i], + upper_32_bits(adev->mc.vram_start)); + WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS_HIGH + crtc_offsets[i], + upper_32_bits(adev->mc.vram_start)); + WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + crtc_offsets[i], + (u32)adev->mc.vram_start); + WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS + crtc_offsets[i], + (u32)adev->mc.vram_start); + + if (save->crtc_enabled[i]) { + tmp = RREG32(mmCRTC_MASTER_UPDATE_MODE + crtc_offsets[i]); + if (REG_GET_FIELD(tmp, CRTC_MASTER_UPDATE_MODE, MASTER_UPDATE_MODE) != 3) { + tmp = REG_SET_FIELD(tmp, CRTC_MASTER_UPDATE_MODE, MASTER_UPDATE_MODE, 3); + WREG32(mmCRTC_MASTER_UPDATE_MODE + crtc_offsets[i], tmp); + } + tmp = RREG32(mmGRPH_UPDATE + crtc_offsets[i]); + if (REG_GET_FIELD(tmp, GRPH_UPDATE, GRPH_UPDATE_LOCK)) { + tmp = REG_SET_FIELD(tmp, GRPH_UPDATE, GRPH_UPDATE_LOCK, 0); + WREG32(mmGRPH_UPDATE + crtc_offsets[i], tmp); + } + tmp = RREG32(mmCRTC_MASTER_UPDATE_LOCK + crtc_offsets[i]); + if (REG_GET_FIELD(tmp, CRTC_MASTER_UPDATE_LOCK, MASTER_UPDATE_LOCK)) { + tmp = REG_SET_FIELD(tmp, CRTC_MASTER_UPDATE_LOCK, MASTER_UPDATE_LOCK, 0); + WREG32(mmCRTC_MASTER_UPDATE_LOCK + crtc_offsets[i], tmp); + } + for (j = 0; j < adev->usec_timeout; j++) { + tmp = RREG32(mmGRPH_UPDATE + crtc_offsets[i]); + if (REG_GET_FIELD(tmp, GRPH_UPDATE, GRPH_SURFACE_UPDATE_PENDING) == 0) + break; + udelay(1); + } + tmp = RREG32(mmCRTC_BLANK_CONTROL + crtc_offsets[i]); + tmp = REG_SET_FIELD(tmp, CRTC_BLANK_CONTROL, CRTC_BLANK_DATA_EN, 0); + WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 1); + WREG32(mmCRTC_BLANK_CONTROL + crtc_offsets[i], tmp); + WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 0); + /* wait for the next frame */ + frame_count = amdgpu_display_vblank_get_counter(adev, i); + for (j = 0; j < adev->usec_timeout; j++) { + if (amdgpu_display_vblank_get_counter(adev, i) != frame_count) + break; + udelay(1); + } + } + } + + WREG32(mmVGA_MEMORY_BASE_ADDRESS_HIGH, upper_32_bits(adev->mc.vram_start)); + WREG32(mmVGA_MEMORY_BASE_ADDRESS, lower_32_bits(adev->mc.vram_start)); + + /* Unlock vga access */ + WREG32(mmVGA_HDP_CONTROL, save->vga_hdp_control); + mdelay(1); + WREG32(mmVGA_RENDER_CONTROL, save->vga_render_control); +} + +static void dce_v11_0_set_vga_render_state(struct amdgpu_device *adev, + bool render) +{ + u32 tmp; + + /* Lockout access through VGA aperture*/ + tmp = RREG32(mmVGA_HDP_CONTROL); + if (render) + tmp = REG_SET_FIELD(tmp, VGA_HDP_CONTROL, VGA_MEMORY_DISABLE, 0); + else + tmp = REG_SET_FIELD(tmp, VGA_HDP_CONTROL, VGA_MEMORY_DISABLE, 1); + WREG32(mmVGA_HDP_CONTROL, tmp); + + /* disable VGA render */ + tmp = RREG32(mmVGA_RENDER_CONTROL); + if (render) + tmp = REG_SET_FIELD(tmp, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 1); + else + tmp = REG_SET_FIELD(tmp, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 0); + WREG32(mmVGA_RENDER_CONTROL, tmp); +} + +static void dce_v11_0_program_fmt(struct drm_encoder *encoder) +{ + struct drm_device *dev = encoder->dev; + struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); + struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(encoder->crtc); + struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder); + int bpc = 0; + u32 tmp = 0; + enum amdgpu_connector_dither dither = AMDGPU_FMT_DITHER_DISABLE; + + if (connector) { + struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector); + bpc = amdgpu_connector_get_monitor_bpc(connector); + dither = amdgpu_connector->dither; + } + + /* LVDS/eDP FMT is set up by atom */ + if (amdgpu_encoder->devices & ATOM_DEVICE_LCD_SUPPORT) + return; + + /* not needed for analog */ + if ((amdgpu_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1) || + (amdgpu_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2)) + return; + + if (bpc == 0) + return; + + switch (bpc) { + case 6: + if (dither == AMDGPU_FMT_DITHER_ENABLE) { + /* XXX sort out optimal dither settings */ + tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_FRAME_RANDOM_ENABLE, 1); + tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_HIGHPASS_RANDOM_ENABLE, 1); + tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_SPATIAL_DITHER_EN, 1); + tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_SPATIAL_DITHER_DEPTH, 0); + } else { + tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_TRUNCATE_EN, 1); + tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_TRUNCATE_DEPTH, 0); + } + break; + case 8: + if (dither == AMDGPU_FMT_DITHER_ENABLE) { + /* XXX sort out optimal dither settings */ + tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_FRAME_RANDOM_ENABLE, 1); + tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_HIGHPASS_RANDOM_ENABLE, 1); + tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_RGB_RANDOM_ENABLE, 1); + tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_SPATIAL_DITHER_EN, 1); + tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_SPATIAL_DITHER_DEPTH, 1); + } else { + tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_TRUNCATE_EN, 1); + tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_TRUNCATE_DEPTH, 1); + } + break; + case 10: + if (dither == AMDGPU_FMT_DITHER_ENABLE) { + /* XXX sort out optimal dither settings */ + tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_FRAME_RANDOM_ENABLE, 1); + tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_HIGHPASS_RANDOM_ENABLE, 1); + tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_RGB_RANDOM_ENABLE, 1); + tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_SPATIAL_DITHER_EN, 1); + tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_SPATIAL_DITHER_DEPTH, 2); + } else { + tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_TRUNCATE_EN, 1); + tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_TRUNCATE_DEPTH, 2); + } + break; + default: + /* not needed */ + break; + } + + WREG32(mmFMT_BIT_DEPTH_CONTROL + amdgpu_crtc->crtc_offset, tmp); +} + + +/* display watermark setup */ +/** + * dce_v11_0_line_buffer_adjust - Set up the line buffer + * + * @adev: amdgpu_device pointer + * @amdgpu_crtc: the selected display controller + * @mode: the current display mode on the selected display + * controller + * + * Setup up the line buffer allocation for + * the selected display controller (CIK). + * Returns the line buffer size in pixels. + */ +static u32 dce_v11_0_line_buffer_adjust(struct amdgpu_device *adev, + struct amdgpu_crtc *amdgpu_crtc, + struct drm_display_mode *mode) +{ + u32 tmp, buffer_alloc, i, mem_cfg; + u32 pipe_offset = amdgpu_crtc->crtc_id; + /* + * Line Buffer Setup + * There are 6 line buffers, one for each display controllers. + * There are 3 partitions per LB. Select the number of partitions + * to enable based on the display width. For display widths larger + * than 4096, you need use to use 2 display controllers and combine + * them using the stereo blender. + */ + if (amdgpu_crtc->base.enabled && mode) { + if (mode->crtc_hdisplay < 1920) { + mem_cfg = 1; + buffer_alloc = 2; + } else if (mode->crtc_hdisplay < 2560) { + mem_cfg = 2; + buffer_alloc = 2; + } else if (mode->crtc_hdisplay < 4096) { + mem_cfg = 0; + buffer_alloc = (adev->flags & AMDGPU_IS_APU) ? 2 : 4; + } else { + DRM_DEBUG_KMS("Mode too big for LB!\n"); + mem_cfg = 0; + buffer_alloc = (adev->flags & AMDGPU_IS_APU) ? 2 : 4; + } + } else { + mem_cfg = 1; + buffer_alloc = 0; + } + + tmp = RREG32(mmLB_MEMORY_CTRL + amdgpu_crtc->crtc_offset); + tmp = REG_SET_FIELD(tmp, LB_MEMORY_CTRL, LB_MEMORY_CONFIG, mem_cfg); + WREG32(mmLB_MEMORY_CTRL + amdgpu_crtc->crtc_offset, tmp); + + tmp = RREG32(mmPIPE0_DMIF_BUFFER_CONTROL + pipe_offset); + tmp = REG_SET_FIELD(tmp, PIPE0_DMIF_BUFFER_CONTROL, DMIF_BUFFERS_ALLOCATED, buffer_alloc); + WREG32(mmPIPE0_DMIF_BUFFER_CONTROL + pipe_offset, tmp); + + for (i = 0; i < adev->usec_timeout; i++) { + tmp = RREG32(mmPIPE0_DMIF_BUFFER_CONTROL + pipe_offset); + if (REG_GET_FIELD(tmp, PIPE0_DMIF_BUFFER_CONTROL, DMIF_BUFFERS_ALLOCATION_COMPLETED)) + break; + udelay(1); + } + + if (amdgpu_crtc->base.enabled && mode) { + switch (mem_cfg) { + case 0: + default: + return 4096 * 2; + case 1: + return 1920 * 2; + case 2: + return 2560 * 2; + } + } + + /* controller not enabled, so no lb used */ + return 0; +} + +/** + * cik_get_number_of_dram_channels - get the number of dram channels + * + * @adev: amdgpu_device pointer + * + * Look up the number of video ram channels (CIK). + * Used for display watermark bandwidth calculations + * Returns the number of dram channels + */ +static u32 cik_get_number_of_dram_channels(struct amdgpu_device *adev) +{ + u32 tmp = RREG32(mmMC_SHARED_CHMAP); + + switch (REG_GET_FIELD(tmp, MC_SHARED_CHMAP, NOOFCHAN)) { + case 0: + default: + return 1; + case 1: + return 2; + case 2: + return 4; + case 3: + return 8; + case 4: + return 3; + case 5: + return 6; + case 6: + return 10; + case 7: + return 12; + case 8: + return 16; + } +} + +struct dce10_wm_params { + u32 dram_channels; /* number of dram channels */ + u32 yclk; /* bandwidth per dram data pin in kHz */ + u32 sclk; /* engine clock in kHz */ + u32 disp_clk; /* display clock in kHz */ + u32 src_width; /* viewport width */ + u32 active_time; /* active display time in ns */ + u32 blank_time; /* blank time in ns */ + bool interlaced; /* mode is interlaced */ + fixed20_12 vsc; /* vertical scale ratio */ + u32 num_heads; /* number of active crtcs */ + u32 bytes_per_pixel; /* bytes per pixel display + overlay */ + u32 lb_size; /* line buffer allocated to pipe */ + u32 vtaps; /* vertical scaler taps */ +}; + +/** + * dce_v11_0_dram_bandwidth - get the dram bandwidth + * + * @wm: watermark calculation data + * + * Calculate the raw dram bandwidth (CIK). + * Used for display watermark bandwidth calculations + * Returns the dram bandwidth in MBytes/s + */ +static u32 dce_v11_0_dram_bandwidth(struct dce10_wm_params *wm) +{ + /* Calculate raw DRAM Bandwidth */ + fixed20_12 dram_efficiency; /* 0.7 */ + fixed20_12 yclk, dram_channels, bandwidth; + fixed20_12 a; + + a.full = dfixed_const(1000); + yclk.full = dfixed_const(wm->yclk); + yclk.full = dfixed_div(yclk, a); + dram_channels.full = dfixed_const(wm->dram_channels * 4); + a.full = dfixed_const(10); + dram_efficiency.full = dfixed_const(7); + dram_efficiency.full = dfixed_div(dram_efficiency, a); + bandwidth.full = dfixed_mul(dram_channels, yclk); + bandwidth.full = dfixed_mul(bandwidth, dram_efficiency); + + return dfixed_trunc(bandwidth); +} + +/** + * dce_v11_0_dram_bandwidth_for_display - get the dram bandwidth for display + * + * @wm: watermark calculation data + * + * Calculate the dram bandwidth used for display (CIK). + * Used for display watermark bandwidth calculations + * Returns the dram bandwidth for display in MBytes/s + */ +static u32 dce_v11_0_dram_bandwidth_for_display(struct dce10_wm_params *wm) +{ + /* Calculate DRAM Bandwidth and the part allocated to display. */ + fixed20_12 disp_dram_allocation; /* 0.3 to 0.7 */ + fixed20_12 yclk, dram_channels, bandwidth; + fixed20_12 a; + + a.full = dfixed_const(1000); + yclk.full = dfixed_const(wm->yclk); + yclk.full = dfixed_div(yclk, a); + dram_channels.full = dfixed_const(wm->dram_channels * 4); + a.full = dfixed_const(10); + disp_dram_allocation.full = dfixed_const(3); /* XXX worse case value 0.3 */ + disp_dram_allocation.full = dfixed_div(disp_dram_allocation, a); + bandwidth.full = dfixed_mul(dram_channels, yclk); + bandwidth.full = dfixed_mul(bandwidth, disp_dram_allocation); + + return dfixed_trunc(bandwidth); +} + +/** + * dce_v11_0_data_return_bandwidth - get the data return bandwidth + * + * @wm: watermark calculation data + * + * Calculate the data return bandwidth used for display (CIK). + * Used for display watermark bandwidth calculations + * Returns the data return bandwidth in MBytes/s + */ +static u32 dce_v11_0_data_return_bandwidth(struct dce10_wm_params *wm) +{ + /* Calculate the display Data return Bandwidth */ + fixed20_12 return_efficiency; /* 0.8 */ + fixed20_12 sclk, bandwidth; + fixed20_12 a; + + a.full = dfixed_const(1000); + sclk.full = dfixed_const(wm->sclk); + sclk.full = dfixed_div(sclk, a); + a.full = dfixed_const(10); + return_efficiency.full = dfixed_const(8); + return_efficiency.full = dfixed_div(return_efficiency, a); + a.full = dfixed_const(32); + bandwidth.full = dfixed_mul(a, sclk); + bandwidth.full = dfixed_mul(bandwidth, return_efficiency); + + return dfixed_trunc(bandwidth); +} + +/** + * dce_v11_0_dmif_request_bandwidth - get the dmif bandwidth + * + * @wm: watermark calculation data + * + * Calculate the dmif bandwidth used for display (CIK). + * Used for display watermark bandwidth calculations + * Returns the dmif bandwidth in MBytes/s + */ +static u32 dce_v11_0_dmif_request_bandwidth(struct dce10_wm_params *wm) +{ + /* Calculate the DMIF Request Bandwidth */ + fixed20_12 disp_clk_request_efficiency; /* 0.8 */ + fixed20_12 disp_clk, bandwidth; + fixed20_12 a, b; + + a.full = dfixed_const(1000); + disp_clk.full = dfixed_const(wm->disp_clk); + disp_clk.full = dfixed_div(disp_clk, a); + a.full = dfixed_const(32); + b.full = dfixed_mul(a, disp_clk); + + a.full = dfixed_const(10); + disp_clk_request_efficiency.full = dfixed_const(8); + disp_clk_request_efficiency.full = dfixed_div(disp_clk_request_efficiency, a); + + bandwidth.full = dfixed_mul(b, disp_clk_request_efficiency); + + return dfixed_trunc(bandwidth); +} + +/** + * dce_v11_0_available_bandwidth - get the min available bandwidth + * + * @wm: watermark calculation data + * + * Calculate the min available bandwidth used for display (CIK). + * Used for display watermark bandwidth calculations + * Returns the min available bandwidth in MBytes/s + */ +static u32 dce_v11_0_available_bandwidth(struct dce10_wm_params *wm) +{ + /* Calculate the Available bandwidth. Display can use this temporarily but not in average. */ + u32 dram_bandwidth = dce_v11_0_dram_bandwidth(wm); + u32 data_return_bandwidth = dce_v11_0_data_return_bandwidth(wm); + u32 dmif_req_bandwidth = dce_v11_0_dmif_request_bandwidth(wm); + + return min(dram_bandwidth, min(data_return_bandwidth, dmif_req_bandwidth)); +} + +/** + * dce_v11_0_average_bandwidth - get the average available bandwidth + * + * @wm: watermark calculation data + * + * Calculate the average available bandwidth used for display (CIK). + * Used for display watermark bandwidth calculations + * Returns the average available bandwidth in MBytes/s + */ +static u32 dce_v11_0_average_bandwidth(struct dce10_wm_params *wm) +{ + /* Calculate the display mode Average Bandwidth + * DisplayMode should contain the source and destination dimensions, + * timing, etc. + */ + fixed20_12 bpp; + fixed20_12 line_time; + fixed20_12 src_width; + fixed20_12 bandwidth; + fixed20_12 a; + + a.full = dfixed_const(1000); + line_time.full = dfixed_const(wm->active_time + wm->blank_time); + line_time.full = dfixed_div(line_time, a); + bpp.full = dfixed_const(wm->bytes_per_pixel); + src_width.full = dfixed_const(wm->src_width); + bandwidth.full = dfixed_mul(src_width, bpp); + bandwidth.full = dfixed_mul(bandwidth, wm->vsc); + bandwidth.full = dfixed_div(bandwidth, line_time); + + return dfixed_trunc(bandwidth); +} + +/** + * dce_v11_0_latency_watermark - get the latency watermark + * + * @wm: watermark calculation data + * + * Calculate the latency watermark (CIK). + * Used for display watermark bandwidth calculations + * Returns the latency watermark in ns + */ +static u32 dce_v11_0_latency_watermark(struct dce10_wm_params *wm) +{ + /* First calculate the latency in ns */ + u32 mc_latency = 2000; /* 2000 ns. */ + u32 available_bandwidth = dce_v11_0_available_bandwidth(wm); + u32 worst_chunk_return_time = (512 * 8 * 1000) / available_bandwidth; + u32 cursor_line_pair_return_time = (128 * 4 * 1000) / available_bandwidth; + u32 dc_latency = 40000000 / wm->disp_clk; /* dc pipe latency */ + u32 other_heads_data_return_time = ((wm->num_heads + 1) * worst_chunk_return_time) + + (wm->num_heads * cursor_line_pair_return_time); + u32 latency = mc_latency + other_heads_data_return_time + dc_latency; + u32 max_src_lines_per_dst_line, lb_fill_bw, line_fill_time; + u32 tmp, dmif_size = 12288; + fixed20_12 a, b, c; + + if (wm->num_heads == 0) + return 0; + + a.full = dfixed_const(2); + b.full = dfixed_const(1); + if ((wm->vsc.full > a.full) || + ((wm->vsc.full > b.full) && (wm->vtaps >= 3)) || + (wm->vtaps >= 5) || + ((wm->vsc.full >= a.full) && wm->interlaced)) + max_src_lines_per_dst_line = 4; + else + max_src_lines_per_dst_line = 2; + + a.full = dfixed_const(available_bandwidth); + b.full = dfixed_const(wm->num_heads); + a.full = dfixed_div(a, b); + + b.full = dfixed_const(mc_latency + 512); + c.full = dfixed_const(wm->disp_clk); + b.full = dfixed_div(b, c); + + c.full = dfixed_const(dmif_size); + b.full = dfixed_div(c, b); + + tmp = min(dfixed_trunc(a), dfixed_trunc(b)); + + b.full = dfixed_const(1000); + c.full = dfixed_const(wm->disp_clk); + b.full = dfixed_div(c, b); + c.full = dfixed_const(wm->bytes_per_pixel); + b.full = dfixed_mul(b, c); + + lb_fill_bw = min(tmp, dfixed_trunc(b)); + + a.full = dfixed_const(max_src_lines_per_dst_line * wm->src_width * wm->bytes_per_pixel); + b.full = dfixed_const(1000); + c.full = dfixed_const(lb_fill_bw); + b.full = dfixed_div(c, b); + a.full = dfixed_div(a, b); + line_fill_time = dfixed_trunc(a); + + if (line_fill_time < wm->active_time) + return latency; + else + return latency + (line_fill_time - wm->active_time); + +} + +/** + * dce_v11_0_average_bandwidth_vs_dram_bandwidth_for_display - check + * average and available dram bandwidth + * + * @wm: watermark calculation data + * + * Check if the display average bandwidth fits in the display + * dram bandwidth (CIK). + * Used for display watermark bandwidth calculations + * Returns true if the display fits, false if not. + */ +static bool dce_v11_0_average_bandwidth_vs_dram_bandwidth_for_display(struct dce10_wm_params *wm) +{ + if (dce_v11_0_average_bandwidth(wm) <= + (dce_v11_0_dram_bandwidth_for_display(wm) / wm->num_heads)) + return true; + else + return false; +} + +/** + * dce_v11_0_average_bandwidth_vs_available_bandwidth - check + * average and available bandwidth + * + * @wm: watermark calculation data + * + * Check if the display average bandwidth fits in the display + * available bandwidth (CIK). + * Used for display watermark bandwidth calculations + * Returns true if the display fits, false if not. + */ +static bool dce_v11_0_average_bandwidth_vs_available_bandwidth(struct dce10_wm_params *wm) +{ + if (dce_v11_0_average_bandwidth(wm) <= + (dce_v11_0_available_bandwidth(wm) / wm->num_heads)) + return true; + else + return false; +} + +/** + * dce_v11_0_check_latency_hiding - check latency hiding + * + * @wm: watermark calculation data + * + * Check latency hiding (CIK). + * Used for display watermark bandwidth calculations + * Returns true if the display fits, false if not. + */ +static bool dce_v11_0_check_latency_hiding(struct dce10_wm_params *wm) +{ + u32 lb_partitions = wm->lb_size / wm->src_width; + u32 line_time = wm->active_time + wm->blank_time; + u32 latency_tolerant_lines; + u32 latency_hiding; + fixed20_12 a; + + a.full = dfixed_const(1); + if (wm->vsc.full > a.full) + latency_tolerant_lines = 1; + else { + if (lb_partitions <= (wm->vtaps + 1)) + latency_tolerant_lines = 1; + else + latency_tolerant_lines = 2; + } + + latency_hiding = (latency_tolerant_lines * line_time + wm->blank_time); + + if (dce_v11_0_latency_watermark(wm) <= latency_hiding) + return true; + else + return false; +} + +/** + * dce_v11_0_program_watermarks - program display watermarks + * + * @adev: amdgpu_device pointer + * @amdgpu_crtc: the selected display controller + * @lb_size: line buffer size + * @num_heads: number of display controllers in use + * + * Calculate and program the display watermarks for the + * selected display controller (CIK). + */ +static void dce_v11_0_program_watermarks(struct amdgpu_device *adev, + struct amdgpu_crtc *amdgpu_crtc, + u32 lb_size, u32 num_heads) +{ + struct drm_display_mode *mode = &amdgpu_crtc->base.mode; + struct dce10_wm_params wm_low, wm_high; + u32 pixel_period; + u32 line_time = 0; + u32 latency_watermark_a = 0, latency_watermark_b = 0; + u32 tmp, wm_mask; + + if (amdgpu_crtc->base.enabled && num_heads && mode) { + pixel_period = 1000000 / (u32)mode->clock; + line_time = min((u32)mode->crtc_htotal * pixel_period, (u32)65535); + + /* watermark for high clocks */ + if (adev->pm.dpm_enabled) { + wm_high.yclk = + amdgpu_dpm_get_mclk(adev, false) * 10; + wm_high.sclk = + amdgpu_dpm_get_sclk(adev, false) * 10; + } else { + wm_high.yclk = adev->pm.current_mclk * 10; + wm_high.sclk = adev->pm.current_sclk * 10; + } + + wm_high.disp_clk = mode->clock; + wm_high.src_width = mode->crtc_hdisplay; + wm_high.active_time = mode->crtc_hdisplay * pixel_period; + wm_high.blank_time = line_time - wm_high.active_time; + wm_high.interlaced = false; + if (mode->flags & DRM_MODE_FLAG_INTERLACE) + wm_high.interlaced = true; + wm_high.vsc = amdgpu_crtc->vsc; + wm_high.vtaps = 1; + if (amdgpu_crtc->rmx_type != RMX_OFF) + wm_high.vtaps = 2; + wm_high.bytes_per_pixel = 4; /* XXX: get this from fb config */ + wm_high.lb_size = lb_size; + wm_high.dram_channels = cik_get_number_of_dram_channels(adev); + wm_high.num_heads = num_heads; + + /* set for high clocks */ + latency_watermark_a = min(dce_v11_0_latency_watermark(&wm_high), (u32)65535); + + /* possibly force display priority to high */ + /* should really do this at mode validation time... */ + if (!dce_v11_0_average_bandwidth_vs_dram_bandwidth_for_display(&wm_high) || + !dce_v11_0_average_bandwidth_vs_available_bandwidth(&wm_high) || + !dce_v11_0_check_latency_hiding(&wm_high) || + (adev->mode_info.disp_priority == 2)) { + DRM_DEBUG_KMS("force priority to high\n"); + } + + /* watermark for low clocks */ + if (adev->pm.dpm_enabled) { + wm_low.yclk = + amdgpu_dpm_get_mclk(adev, true) * 10; + wm_low.sclk = + amdgpu_dpm_get_sclk(adev, true) * 10; + } else { + wm_low.yclk = adev->pm.current_mclk * 10; + wm_low.sclk = adev->pm.current_sclk * 10; + } + + wm_low.disp_clk = mode->clock; + wm_low.src_width = mode->crtc_hdisplay; + wm_low.active_time = mode->crtc_hdisplay * pixel_period; + wm_low.blank_time = line_time - wm_low.active_time; + wm_low.interlaced = false; + if (mode->flags & DRM_MODE_FLAG_INTERLACE) + wm_low.interlaced = true; + wm_low.vsc = amdgpu_crtc->vsc; + wm_low.vtaps = 1; + if (amdgpu_crtc->rmx_type != RMX_OFF) + wm_low.vtaps = 2; + wm_low.bytes_per_pixel = 4; /* XXX: get this from fb config */ + wm_low.lb_size = lb_size; + wm_low.dram_channels = cik_get_number_of_dram_channels(adev); + wm_low.num_heads = num_heads; + + /* set for low clocks */ + latency_watermark_b = min(dce_v11_0_latency_watermark(&wm_low), (u32)65535); + + /* possibly force display priority to high */ + /* should really do this at mode validation time... */ + if (!dce_v11_0_average_bandwidth_vs_dram_bandwidth_for_display(&wm_low) || + !dce_v11_0_average_bandwidth_vs_available_bandwidth(&wm_low) || + !dce_v11_0_check_latency_hiding(&wm_low) || + (adev->mode_info.disp_priority == 2)) { + DRM_DEBUG_KMS("force priority to high\n"); + } + } + + /* select wm A */ + wm_mask = RREG32(mmDPG_WATERMARK_MASK_CONTROL + amdgpu_crtc->crtc_offset); + tmp = REG_SET_FIELD(wm_mask, DPG_WATERMARK_MASK_CONTROL, URGENCY_WATERMARK_MASK, 1); + WREG32(mmDPG_WATERMARK_MASK_CONTROL + amdgpu_crtc->crtc_offset, tmp); + tmp = RREG32(mmDPG_PIPE_URGENCY_CONTROL + amdgpu_crtc->crtc_offset); + tmp = REG_SET_FIELD(tmp, DPG_PIPE_URGENCY_CONTROL, URGENCY_LOW_WATERMARK, latency_watermark_a); + tmp = REG_SET_FIELD(tmp, DPG_PIPE_URGENCY_CONTROL, URGENCY_HIGH_WATERMARK, line_time); + WREG32(mmDPG_PIPE_URGENCY_CONTROL + amdgpu_crtc->crtc_offset, tmp); + /* select wm B */ + tmp = REG_SET_FIELD(wm_mask, DPG_WATERMARK_MASK_CONTROL, URGENCY_WATERMARK_MASK, 2); + WREG32(mmDPG_WATERMARK_MASK_CONTROL + amdgpu_crtc->crtc_offset, tmp); + tmp = RREG32(mmDPG_PIPE_URGENCY_CONTROL + amdgpu_crtc->crtc_offset); + tmp = REG_SET_FIELD(tmp, DPG_PIPE_URGENCY_CONTROL, URGENCY_LOW_WATERMARK, latency_watermark_a); + tmp = REG_SET_FIELD(tmp, DPG_PIPE_URGENCY_CONTROL, URGENCY_HIGH_WATERMARK, line_time); + WREG32(mmDPG_PIPE_URGENCY_CONTROL + amdgpu_crtc->crtc_offset, tmp); + /* restore original selection */ + WREG32(mmDPG_WATERMARK_MASK_CONTROL + amdgpu_crtc->crtc_offset, wm_mask); + + /* save values for DPM */ + amdgpu_crtc->line_time = line_time; + amdgpu_crtc->wm_high = latency_watermark_a; + amdgpu_crtc->wm_low = latency_watermark_b; +} + +/** + * dce_v11_0_bandwidth_update - program display watermarks + * + * @adev: amdgpu_device pointer + * + * Calculate and program the display watermarks and line + * buffer allocation (CIK). + */ +static void dce_v11_0_bandwidth_update(struct amdgpu_device *adev) +{ + struct drm_display_mode *mode = NULL; + u32 num_heads = 0, lb_size; + int i; + + amdgpu_update_display_priority(adev); + + for (i = 0; i < adev->mode_info.num_crtc; i++) { + if (adev->mode_info.crtcs[i]->base.enabled) + num_heads++; + } + for (i = 0; i < adev->mode_info.num_crtc; i++) { + mode = &adev->mode_info.crtcs[i]->base.mode; + lb_size = dce_v11_0_line_buffer_adjust(adev, adev->mode_info.crtcs[i], mode); + dce_v11_0_program_watermarks(adev, adev->mode_info.crtcs[i], + lb_size, num_heads); + } +} + +static void dce_v11_0_audio_get_connected_pins(struct amdgpu_device *adev) +{ + int i; + u32 offset, tmp; + + for (i = 0; i < adev->mode_info.audio.num_pins; i++) { + offset = adev->mode_info.audio.pin[i].offset; + tmp = RREG32_AUDIO_ENDPT(offset, + ixAZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT); + if (((tmp & + AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__PORT_CONNECTIVITY_MASK) >> + AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__PORT_CONNECTIVITY__SHIFT) == 1) + adev->mode_info.audio.pin[i].connected = false; + else + adev->mode_info.audio.pin[i].connected = true; + } +} + +static struct amdgpu_audio_pin *dce_v11_0_audio_get_pin(struct amdgpu_device *adev) +{ + int i; + + dce_v11_0_audio_get_connected_pins(adev); + + for (i = 0; i < adev->mode_info.audio.num_pins; i++) { + if (adev->mode_info.audio.pin[i].connected) + return &adev->mode_info.audio.pin[i]; + } + DRM_ERROR("No connected audio pins found!\n"); + return NULL; +} + +static void dce_v11_0_afmt_audio_select_pin(struct drm_encoder *encoder) +{ + struct amdgpu_device *adev = encoder->dev->dev_private; + struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); + struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; + u32 tmp; + + if (!dig || !dig->afmt || !dig->afmt->pin) + return; + + tmp = RREG32(mmAFMT_AUDIO_SRC_CONTROL + dig->afmt->offset); + tmp = REG_SET_FIELD(tmp, AFMT_AUDIO_SRC_CONTROL, AFMT_AUDIO_SRC_SELECT, dig->afmt->pin->id); + WREG32(mmAFMT_AUDIO_SRC_CONTROL + dig->afmt->offset, tmp); +} + +static void dce_v11_0_audio_write_latency_fields(struct drm_encoder *encoder, + struct drm_display_mode *mode) +{ + struct amdgpu_device *adev = encoder->dev->dev_private; + struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); + struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; + struct drm_connector *connector; + struct amdgpu_connector *amdgpu_connector = NULL; + u32 tmp; + int interlace = 0; + + if (!dig || !dig->afmt || !dig->afmt->pin) + return; + + list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) { + if (connector->encoder == encoder) { + amdgpu_connector = to_amdgpu_connector(connector); + break; + } + } + + if (!amdgpu_connector) { + DRM_ERROR("Couldn't find encoder's connector\n"); + return; + } + + if (mode->flags & DRM_MODE_FLAG_INTERLACE) + interlace = 1; + if (connector->latency_present[interlace]) { + tmp = REG_SET_FIELD(0, AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC, + VIDEO_LIPSYNC, connector->video_latency[interlace]); + tmp = REG_SET_FIELD(0, AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC, + AUDIO_LIPSYNC, connector->audio_latency[interlace]); + } else { + tmp = REG_SET_FIELD(0, AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC, + VIDEO_LIPSYNC, 0); + tmp = REG_SET_FIELD(0, AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC, + AUDIO_LIPSYNC, 0); + } + WREG32_AUDIO_ENDPT(dig->afmt->pin->offset, + ixAZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC, tmp); +} + +static void dce_v11_0_audio_write_speaker_allocation(struct drm_encoder *encoder) +{ + struct amdgpu_device *adev = encoder->dev->dev_private; + struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); + struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; + struct drm_connector *connector; + struct amdgpu_connector *amdgpu_connector = NULL; + u32 tmp; + u8 *sadb = NULL; + int sad_count; + + if (!dig || !dig->afmt || !dig->afmt->pin) + return; + + list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) { + if (connector->encoder == encoder) { + amdgpu_connector = to_amdgpu_connector(connector); + break; + } + } + + if (!amdgpu_connector) { + DRM_ERROR("Couldn't find encoder's connector\n"); + return; + } + + sad_count = drm_edid_to_speaker_allocation(amdgpu_connector_edid(connector), &sadb); + if (sad_count < 0) { + DRM_ERROR("Couldn't read Speaker Allocation Data Block: %d\n", sad_count); + sad_count = 0; + } + + /* program the speaker allocation */ + tmp = RREG32_AUDIO_ENDPT(dig->afmt->pin->offset, + ixAZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER); + tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER, + DP_CONNECTION, 0); + /* set HDMI mode */ + tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER, + HDMI_CONNECTION, 1); + if (sad_count) + tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER, + SPEAKER_ALLOCATION, sadb[0]); + else + tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER, + SPEAKER_ALLOCATION, 5); /* stereo */ + WREG32_AUDIO_ENDPT(dig->afmt->pin->offset, + ixAZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER, tmp); + + kfree(sadb); +} + +static void dce_v11_0_audio_write_sad_regs(struct drm_encoder *encoder) +{ + struct amdgpu_device *adev = encoder->dev->dev_private; + struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); + struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; + struct drm_connector *connector; + struct amdgpu_connector *amdgpu_connector = NULL; + struct cea_sad *sads; + int i, sad_count; + + static const u16 eld_reg_to_type[][2] = { + { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0, HDMI_AUDIO_CODING_TYPE_PCM }, + { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1, HDMI_AUDIO_CODING_TYPE_AC3 }, + { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2, HDMI_AUDIO_CODING_TYPE_MPEG1 }, + { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3, HDMI_AUDIO_CODING_TYPE_MP3 }, + { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4, HDMI_AUDIO_CODING_TYPE_MPEG2 }, + { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5, HDMI_AUDIO_CODING_TYPE_AAC_LC }, + { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6, HDMI_AUDIO_CODING_TYPE_DTS }, + { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7, HDMI_AUDIO_CODING_TYPE_ATRAC }, + { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9, HDMI_AUDIO_CODING_TYPE_EAC3 }, + { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10, HDMI_AUDIO_CODING_TYPE_DTS_HD }, + { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11, HDMI_AUDIO_CODING_TYPE_MLP }, + { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13, HDMI_AUDIO_CODING_TYPE_WMA_PRO }, + }; + + if (!dig || !dig->afmt || !dig->afmt->pin) + return; + + list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) { + if (connector->encoder == encoder) { + amdgpu_connector = to_amdgpu_connector(connector); + break; + } + } + + if (!amdgpu_connector) { + DRM_ERROR("Couldn't find encoder's connector\n"); + return; + } + + sad_count = drm_edid_to_sad(amdgpu_connector_edid(connector), &sads); + if (sad_count <= 0) { + DRM_ERROR("Couldn't read SADs: %d\n", sad_count); + return; + } + BUG_ON(!sads); + + for (i = 0; i < ARRAY_SIZE(eld_reg_to_type); i++) { + u32 tmp = 0; + u8 stereo_freqs = 0; + int max_channels = -1; + int j; + + for (j = 0; j < sad_count; j++) { + struct cea_sad *sad = &sads[j]; + + if (sad->format == eld_reg_to_type[i][1]) { + if (sad->channels > max_channels) { + tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0, + MAX_CHANNELS, sad->channels); + tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0, + DESCRIPTOR_BYTE_2, sad->byte2); + tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0, + SUPPORTED_FREQUENCIES, sad->freq); + max_channels = sad->channels; + } + + if (sad->format == HDMI_AUDIO_CODING_TYPE_PCM) + stereo_freqs |= sad->freq; + else + break; + } + } + + tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0, + SUPPORTED_FREQUENCIES_STEREO, stereo_freqs); + WREG32_AUDIO_ENDPT(dig->afmt->pin->offset, eld_reg_to_type[i][0], tmp); + } + + kfree(sads); +} + +static void dce_v11_0_audio_enable(struct amdgpu_device *adev, + struct amdgpu_audio_pin *pin, + bool enable) +{ + if (!pin) + return; + + WREG32_AUDIO_ENDPT(pin->offset, ixAZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL, + enable ? AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__AUDIO_ENABLED_MASK : 0); +} + +static const u32 pin_offsets[] = +{ + AUD0_REGISTER_OFFSET, + AUD1_REGISTER_OFFSET, + AUD2_REGISTER_OFFSET, + AUD3_REGISTER_OFFSET, + AUD4_REGISTER_OFFSET, + AUD5_REGISTER_OFFSET, + AUD6_REGISTER_OFFSET, +}; + +static int dce_v11_0_audio_init(struct amdgpu_device *adev) +{ + int i; + + if (!amdgpu_audio) + return 0; + + adev->mode_info.audio.enabled = true; + + adev->mode_info.audio.num_pins = 7; + + for (i = 0; i < adev->mode_info.audio.num_pins; i++) { + adev->mode_info.audio.pin[i].channels = -1; + adev->mode_info.audio.pin[i].rate = -1; + adev->mode_info.audio.pin[i].bits_per_sample = -1; + adev->mode_info.audio.pin[i].status_bits = 0; + adev->mode_info.audio.pin[i].category_code = 0; + adev->mode_info.audio.pin[i].connected = false; + adev->mode_info.audio.pin[i].offset = pin_offsets[i]; + adev->mode_info.audio.pin[i].id = i; + /* disable audio. it will be set up later */ + /* XXX remove once we switch to ip funcs */ + dce_v11_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false); + } + + return 0; +} + +static void dce_v11_0_audio_fini(struct amdgpu_device *adev) +{ + int i; + + if (!adev->mode_info.audio.enabled) + return; + + for (i = 0; i < adev->mode_info.audio.num_pins; i++) + dce_v11_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false); + + adev->mode_info.audio.enabled = false; +} + +/* + * update the N and CTS parameters for a given pixel clock rate + */ +static void dce_v11_0_afmt_update_ACR(struct drm_encoder *encoder, uint32_t clock) +{ + struct drm_device *dev = encoder->dev; + struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_afmt_acr acr = amdgpu_afmt_acr(clock); + struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); + struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; + u32 tmp; + + tmp = RREG32(mmHDMI_ACR_32_0 + dig->afmt->offset); + tmp = REG_SET_FIELD(tmp, HDMI_ACR_32_0, HDMI_ACR_CTS_32, acr.cts_32khz); + WREG32(mmHDMI_ACR_32_0 + dig->afmt->offset, tmp); + tmp = RREG32(mmHDMI_ACR_32_1 + dig->afmt->offset); + tmp = REG_SET_FIELD(tmp, HDMI_ACR_32_1, HDMI_ACR_N_32, acr.n_32khz); + WREG32(mmHDMI_ACR_32_1 + dig->afmt->offset, tmp); + + tmp = RREG32(mmHDMI_ACR_44_0 + dig->afmt->offset); + tmp = REG_SET_FIELD(tmp, HDMI_ACR_44_0, HDMI_ACR_CTS_44, acr.cts_44_1khz); + WREG32(mmHDMI_ACR_44_0 + dig->afmt->offset, tmp); + tmp = RREG32(mmHDMI_ACR_44_1 + dig->afmt->offset); + tmp = REG_SET_FIELD(tmp, HDMI_ACR_44_1, HDMI_ACR_N_44, acr.n_44_1khz); + WREG32(mmHDMI_ACR_44_1 + dig->afmt->offset, tmp); + + tmp = RREG32(mmHDMI_ACR_48_0 + dig->afmt->offset); + tmp = REG_SET_FIELD(tmp, HDMI_ACR_48_0, HDMI_ACR_CTS_48, acr.cts_48khz); + WREG32(mmHDMI_ACR_48_0 + dig->afmt->offset, tmp); + tmp = RREG32(mmHDMI_ACR_48_1 + dig->afmt->offset); + tmp = REG_SET_FIELD(tmp, HDMI_ACR_48_1, HDMI_ACR_N_48, acr.n_48khz); + WREG32(mmHDMI_ACR_48_1 + dig->afmt->offset, tmp); + +} + +/* + * build a HDMI Video Info Frame + */ +static void dce_v11_0_afmt_update_avi_infoframe(struct drm_encoder *encoder, + void *buffer, size_t size) +{ + struct drm_device *dev = encoder->dev; + struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); + struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; + uint8_t *frame = buffer + 3; + uint8_t *header = buffer; + + WREG32(mmAFMT_AVI_INFO0 + dig->afmt->offset, + frame[0x0] | (frame[0x1] << 8) | (frame[0x2] << 16) | (frame[0x3] << 24)); + WREG32(mmAFMT_AVI_INFO1 + dig->afmt->offset, + frame[0x4] | (frame[0x5] << 8) | (frame[0x6] << 16) | (frame[0x7] << 24)); + WREG32(mmAFMT_AVI_INFO2 + dig->afmt->offset, + frame[0x8] | (frame[0x9] << 8) | (frame[0xA] << 16) | (frame[0xB] << 24)); + WREG32(mmAFMT_AVI_INFO3 + dig->afmt->offset, + frame[0xC] | (frame[0xD] << 8) | (header[1] << 24)); +} + +static void dce_v11_0_audio_set_dto(struct drm_encoder *encoder, u32 clock) +{ + struct drm_device *dev = encoder->dev; + struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); + struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; + struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(encoder->crtc); + u32 dto_phase = 24 * 1000; + u32 dto_modulo = clock; + u32 tmp; + + if (!dig || !dig->afmt) + return; + + /* XXX two dtos; generally use dto0 for hdmi */ + /* Express [24MHz / target pixel clock] as an exact rational + * number (coefficient of two integer numbers. DCCG_AUDIO_DTOx_PHASE + * is the numerator, DCCG_AUDIO_DTOx_MODULE is the denominator + */ + tmp = RREG32(mmDCCG_AUDIO_DTO_SOURCE); + tmp = REG_SET_FIELD(tmp, DCCG_AUDIO_DTO_SOURCE, DCCG_AUDIO_DTO0_SOURCE_SEL, + amdgpu_crtc->crtc_id); + WREG32(mmDCCG_AUDIO_DTO_SOURCE, tmp); + WREG32(mmDCCG_AUDIO_DTO0_PHASE, dto_phase); + WREG32(mmDCCG_AUDIO_DTO0_MODULE, dto_modulo); +} + +/* + * update the info frames with the data from the current display mode + */ +static void dce_v11_0_afmt_setmode(struct drm_encoder *encoder, + struct drm_display_mode *mode) +{ + struct drm_device *dev = encoder->dev; + struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); + struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; + struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder); + u8 buffer[HDMI_INFOFRAME_HEADER_SIZE + HDMI_AVI_INFOFRAME_SIZE]; + struct hdmi_avi_infoframe frame; + ssize_t err; + u32 tmp; + int bpc = 8; + + if (!dig || !dig->afmt) + return; + + /* Silent, r600_hdmi_enable will raise WARN for us */ + if (!dig->afmt->enabled) + return; + + /* hdmi deep color mode general control packets setup, if bpc > 8 */ + if (encoder->crtc) { + struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(encoder->crtc); + bpc = amdgpu_crtc->bpc; + } + + /* disable audio prior to setting up hw */ + dig->afmt->pin = dce_v11_0_audio_get_pin(adev); + dce_v11_0_audio_enable(adev, dig->afmt->pin, false); + + dce_v11_0_audio_set_dto(encoder, mode->clock); + + tmp = RREG32(mmHDMI_VBI_PACKET_CONTROL + dig->afmt->offset); + tmp = REG_SET_FIELD(tmp, HDMI_VBI_PACKET_CONTROL, HDMI_NULL_SEND, 1); + WREG32(mmHDMI_VBI_PACKET_CONTROL + dig->afmt->offset, tmp); /* send null packets when required */ + + WREG32(mmAFMT_AUDIO_CRC_CONTROL + dig->afmt->offset, 0x1000); + + tmp = RREG32(mmHDMI_CONTROL + dig->afmt->offset); + switch (bpc) { + case 0: + case 6: + case 8: + case 16: + default: + tmp = REG_SET_FIELD(tmp, HDMI_CONTROL, HDMI_DEEP_COLOR_ENABLE, 0); + tmp = REG_SET_FIELD(tmp, HDMI_CONTROL, HDMI_DEEP_COLOR_DEPTH, 0); + DRM_DEBUG("%s: Disabling hdmi deep color for %d bpc.\n", + connector->name, bpc); + break; + case 10: + tmp = REG_SET_FIELD(tmp, HDMI_CONTROL, HDMI_DEEP_COLOR_ENABLE, 1); + tmp = REG_SET_FIELD(tmp, HDMI_CONTROL, HDMI_DEEP_COLOR_DEPTH, 1); + DRM_DEBUG("%s: Enabling hdmi deep color 30 for 10 bpc.\n", + connector->name); + break; + case 12: + tmp = REG_SET_FIELD(tmp, HDMI_CONTROL, HDMI_DEEP_COLOR_ENABLE, 1); + tmp = REG_SET_FIELD(tmp, HDMI_CONTROL, HDMI_DEEP_COLOR_DEPTH, 2); + DRM_DEBUG("%s: Enabling hdmi deep color 36 for 12 bpc.\n", + connector->name); + break; + } + WREG32(mmHDMI_CONTROL + dig->afmt->offset, tmp); + + tmp = RREG32(mmHDMI_VBI_PACKET_CONTROL + dig->afmt->offset); + tmp = REG_SET_FIELD(tmp, HDMI_VBI_PACKET_CONTROL, HDMI_NULL_SEND, 1); /* send null packets when required */ + tmp = REG_SET_FIELD(tmp, HDMI_VBI_PACKET_CONTROL, HDMI_GC_SEND, 1); /* send general control packets */ + tmp = REG_SET_FIELD(tmp, HDMI_VBI_PACKET_CONTROL, HDMI_GC_CONT, 1); /* send general control packets every frame */ + WREG32(mmHDMI_VBI_PACKET_CONTROL + dig->afmt->offset, tmp); + + tmp = RREG32(mmHDMI_INFOFRAME_CONTROL0 + dig->afmt->offset); + /* enable audio info frames (frames won't be set until audio is enabled) */ + tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL0, HDMI_AUDIO_INFO_SEND, 1); + /* required for audio info values to be updated */ + tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL0, HDMI_AUDIO_INFO_CONT, 1); + WREG32(mmHDMI_INFOFRAME_CONTROL0 + dig->afmt->offset, tmp); + + tmp = RREG32(mmAFMT_INFOFRAME_CONTROL0 + dig->afmt->offset); + /* required for audio info values to be updated */ + tmp = REG_SET_FIELD(tmp, AFMT_INFOFRAME_CONTROL0, AFMT_AUDIO_INFO_UPDATE, 1); + WREG32(mmAFMT_INFOFRAME_CONTROL0 + dig->afmt->offset, tmp); + + tmp = RREG32(mmHDMI_INFOFRAME_CONTROL1 + dig->afmt->offset); + /* anything other than 0 */ + tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL1, HDMI_AUDIO_INFO_LINE, 2); + WREG32(mmHDMI_INFOFRAME_CONTROL1 + dig->afmt->offset, tmp); + + WREG32(mmHDMI_GC + dig->afmt->offset, 0); /* unset HDMI_GC_AVMUTE */ + + tmp = RREG32(mmHDMI_AUDIO_PACKET_CONTROL + dig->afmt->offset); + /* set the default audio delay */ + tmp = REG_SET_FIELD(tmp, HDMI_AUDIO_PACKET_CONTROL, HDMI_AUDIO_DELAY_EN, 1); + /* should be suffient for all audio modes and small enough for all hblanks */ + tmp = REG_SET_FIELD(tmp, HDMI_AUDIO_PACKET_CONTROL, HDMI_AUDIO_PACKETS_PER_LINE, 3); + WREG32(mmHDMI_AUDIO_PACKET_CONTROL + dig->afmt->offset, tmp); + + tmp = RREG32(mmAFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset); + /* allow 60958 channel status fields to be updated */ + tmp = REG_SET_FIELD(tmp, AFMT_AUDIO_PACKET_CONTROL, AFMT_60958_CS_UPDATE, 1); + WREG32(mmAFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset, tmp); + + tmp = RREG32(mmHDMI_ACR_PACKET_CONTROL + dig->afmt->offset); + if (bpc > 8) + /* clear SW CTS value */ + tmp = REG_SET_FIELD(tmp, HDMI_ACR_PACKET_CONTROL, HDMI_ACR_SOURCE, 0); + else + /* select SW CTS value */ + tmp = REG_SET_FIELD(tmp, HDMI_ACR_PACKET_CONTROL, HDMI_ACR_SOURCE, 1); + /* allow hw to sent ACR packets when required */ + tmp = REG_SET_FIELD(tmp, HDMI_ACR_PACKET_CONTROL, HDMI_ACR_AUTO_SEND, 1); + WREG32(mmHDMI_ACR_PACKET_CONTROL + dig->afmt->offset, tmp); + + dce_v11_0_afmt_update_ACR(encoder, mode->clock); + + tmp = RREG32(mmAFMT_60958_0 + dig->afmt->offset); + tmp = REG_SET_FIELD(tmp, AFMT_60958_0, AFMT_60958_CS_CHANNEL_NUMBER_L, 1); + WREG32(mmAFMT_60958_0 + dig->afmt->offset, tmp); + + tmp = RREG32(mmAFMT_60958_1 + dig->afmt->offset); + tmp = REG_SET_FIELD(tmp, AFMT_60958_1, AFMT_60958_CS_CHANNEL_NUMBER_R, 2); + WREG32(mmAFMT_60958_1 + dig->afmt->offset, tmp); + + tmp = RREG32(mmAFMT_60958_2 + dig->afmt->offset); + tmp = REG_SET_FIELD(tmp, AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_2, 3); + tmp = REG_SET_FIELD(tmp, AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_3, 4); + tmp = REG_SET_FIELD(tmp, AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_4, 5); + tmp = REG_SET_FIELD(tmp, AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_5, 6); + tmp = REG_SET_FIELD(tmp, AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_6, 7); + tmp = REG_SET_FIELD(tmp, AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_7, 8); + WREG32(mmAFMT_60958_2 + dig->afmt->offset, tmp); + + dce_v11_0_audio_write_speaker_allocation(encoder); + + WREG32(mmAFMT_AUDIO_PACKET_CONTROL2 + dig->afmt->offset, + (0xff << AFMT_AUDIO_PACKET_CONTROL2__AFMT_AUDIO_CHANNEL_ENABLE__SHIFT)); + + dce_v11_0_afmt_audio_select_pin(encoder); + dce_v11_0_audio_write_sad_regs(encoder); + dce_v11_0_audio_write_latency_fields(encoder, mode); + + err = drm_hdmi_avi_infoframe_from_display_mode(&frame, mode); + if (err < 0) { + DRM_ERROR("failed to setup AVI infoframe: %zd\n", err); + return; + } + + err = hdmi_avi_infoframe_pack(&frame, buffer, sizeof(buffer)); + if (err < 0) { + DRM_ERROR("failed to pack AVI infoframe: %zd\n", err); + return; + } + + dce_v11_0_afmt_update_avi_infoframe(encoder, buffer, sizeof(buffer)); + + tmp = RREG32(mmHDMI_INFOFRAME_CONTROL0 + dig->afmt->offset); + /* enable AVI info frames */ + tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL0, HDMI_AVI_INFO_SEND, 1); + /* required for audio info values to be updated */ + tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL0, HDMI_AVI_INFO_CONT, 1); + WREG32(mmHDMI_INFOFRAME_CONTROL0 + dig->afmt->offset, tmp); + + tmp = RREG32(mmHDMI_INFOFRAME_CONTROL1 + dig->afmt->offset); + tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL1, HDMI_AVI_INFO_LINE, 2); + WREG32(mmHDMI_INFOFRAME_CONTROL1 + dig->afmt->offset, tmp); + + tmp = RREG32(mmAFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset); + /* send audio packets */ + tmp = REG_SET_FIELD(tmp, AFMT_AUDIO_PACKET_CONTROL, AFMT_AUDIO_SAMPLE_SEND, 1); + WREG32(mmAFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset, tmp); + + WREG32(mmAFMT_RAMP_CONTROL0 + dig->afmt->offset, 0x00FFFFFF); + WREG32(mmAFMT_RAMP_CONTROL1 + dig->afmt->offset, 0x007FFFFF); + WREG32(mmAFMT_RAMP_CONTROL2 + dig->afmt->offset, 0x00000001); + WREG32(mmAFMT_RAMP_CONTROL3 + dig->afmt->offset, 0x00000001); + + /* enable audio after to setting up hw */ + dce_v11_0_audio_enable(adev, dig->afmt->pin, true); +} + +static void dce_v11_0_afmt_enable(struct drm_encoder *encoder, bool enable) +{ + struct drm_device *dev = encoder->dev; + struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); + struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; + + if (!dig || !dig->afmt) + return; + + /* Silent, r600_hdmi_enable will raise WARN for us */ + if (enable && dig->afmt->enabled) + return; + if (!enable && !dig->afmt->enabled) + return; + + if (!enable && dig->afmt->pin) { + dce_v11_0_audio_enable(adev, dig->afmt->pin, false); + dig->afmt->pin = NULL; + } + + dig->afmt->enabled = enable; + + DRM_DEBUG("%sabling AFMT interface @ 0x%04X for encoder 0x%x\n", + enable ? "En" : "Dis", dig->afmt->offset, amdgpu_encoder->encoder_id); +} + +static void dce_v11_0_afmt_init(struct amdgpu_device *adev) +{ + int i; + + for (i = 0; i < adev->mode_info.num_dig; i++) + adev->mode_info.afmt[i] = NULL; + + /* DCE11 has audio blocks tied to DIG encoders */ + for (i = 0; i < adev->mode_info.num_dig; i++) { + adev->mode_info.afmt[i] = kzalloc(sizeof(struct amdgpu_afmt), GFP_KERNEL); + if (adev->mode_info.afmt[i]) { + adev->mode_info.afmt[i]->offset = dig_offsets[i]; + adev->mode_info.afmt[i]->id = i; + } + } +} + +static void dce_v11_0_afmt_fini(struct amdgpu_device *adev) +{ + int i; + + for (i = 0; i < adev->mode_info.num_dig; i++) { + kfree(adev->mode_info.afmt[i]); + adev->mode_info.afmt[i] = NULL; + } +} + +static const u32 vga_control_regs[6] = +{ + mmD1VGA_CONTROL, + mmD2VGA_CONTROL, + mmD3VGA_CONTROL, + mmD4VGA_CONTROL, + mmD5VGA_CONTROL, + mmD6VGA_CONTROL, +}; + +static void dce_v11_0_vga_enable(struct drm_crtc *crtc, bool enable) +{ + struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); + struct drm_device *dev = crtc->dev; + struct amdgpu_device *adev = dev->dev_private; + u32 vga_control; + + vga_control = RREG32(vga_control_regs[amdgpu_crtc->crtc_id]) & ~1; + if (enable) + WREG32(vga_control_regs[amdgpu_crtc->crtc_id], vga_control | 1); + else + WREG32(vga_control_regs[amdgpu_crtc->crtc_id], vga_control); +} + +static void dce_v11_0_grph_enable(struct drm_crtc *crtc, bool enable) +{ + struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); + struct drm_device *dev = crtc->dev; + struct amdgpu_device *adev = dev->dev_private; + + if (enable) + WREG32(mmGRPH_ENABLE + amdgpu_crtc->crtc_offset, 1); + else + WREG32(mmGRPH_ENABLE + amdgpu_crtc->crtc_offset, 0); +} + +static void dce_v11_0_tiling_fields(uint64_t tiling_flags, unsigned *bankw, + unsigned *bankh, unsigned *mtaspect, + unsigned *tile_split) +{ + *bankw = (tiling_flags >> AMDGPU_TILING_EG_BANKW_SHIFT) & AMDGPU_TILING_EG_BANKW_MASK; + *bankh = (tiling_flags >> AMDGPU_TILING_EG_BANKH_SHIFT) & AMDGPU_TILING_EG_BANKH_MASK; + *mtaspect = (tiling_flags >> AMDGPU_TILING_EG_MACRO_TILE_ASPECT_SHIFT) & AMDGPU_TILING_EG_MACRO_TILE_ASPECT_MASK; + *tile_split = (tiling_flags >> AMDGPU_TILING_EG_TILE_SPLIT_SHIFT) & AMDGPU_TILING_EG_TILE_SPLIT_MASK; + switch (*bankw) { + default: + case 1: + *bankw = ADDR_SURF_BANK_WIDTH_1; + break; + case 2: + *bankw = ADDR_SURF_BANK_WIDTH_2; + break; + case 4: + *bankw = ADDR_SURF_BANK_WIDTH_4; + break; + case 8: + *bankw = ADDR_SURF_BANK_WIDTH_8; + break; + } + switch (*bankh) { + default: + case 1: + *bankh = ADDR_SURF_BANK_HEIGHT_1; + break; + case 2: + *bankh = ADDR_SURF_BANK_HEIGHT_2; + break; + case 4: + *bankh = ADDR_SURF_BANK_HEIGHT_4; + break; + case 8: + *bankh = ADDR_SURF_BANK_HEIGHT_8; + break; + } + switch (*mtaspect) { + default: + case 1: + *mtaspect = ADDR_SURF_MACRO_ASPECT_1; + break; + case 2: + *mtaspect = ADDR_SURF_MACRO_ASPECT_2; + break; + case 4: + *mtaspect = ADDR_SURF_MACRO_ASPECT_4; + break; + case 8: + *mtaspect = ADDR_SURF_MACRO_ASPECT_8; + break; + } +} + +static int dce_v11_0_crtc_do_set_base(struct drm_crtc *crtc, + struct drm_framebuffer *fb, + int x, int y, int atomic) +{ + struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); + struct drm_device *dev = crtc->dev; + struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_framebuffer *amdgpu_fb; + struct drm_framebuffer *target_fb; + struct drm_gem_object *obj; + struct amdgpu_bo *rbo; + uint64_t fb_location, tiling_flags; + uint32_t fb_format, fb_pitch_pixels; + unsigned bankw, bankh, mtaspect, tile_split; + u32 fb_swap = REG_SET_FIELD(0, GRPH_SWAP_CNTL, GRPH_ENDIAN_SWAP, ENDIAN_NONE); + /* XXX change to VI */ + u32 pipe_config = (adev->gfx.config.tile_mode_array[10] >> 6) & 0x1f; + u32 tmp, viewport_w, viewport_h; + int r; + bool bypass_lut = false; + + /* no fb bound */ + if (!atomic && !crtc->primary->fb) { + DRM_DEBUG_KMS("No FB bound\n"); + return 0; + } + + if (atomic) { + amdgpu_fb = to_amdgpu_framebuffer(fb); + target_fb = fb; + } + else { + amdgpu_fb = to_amdgpu_framebuffer(crtc->primary->fb); + target_fb = crtc->primary->fb; + } + + /* If atomic, assume fb object is pinned & idle & fenced and + * just update base pointers + */ + obj = amdgpu_fb->obj; + rbo = gem_to_amdgpu_bo(obj); + r = amdgpu_bo_reserve(rbo, false); + if (unlikely(r != 0)) + return r; + + if (atomic) + fb_location = amdgpu_bo_gpu_offset(rbo); + else { + r = amdgpu_bo_pin(rbo, AMDGPU_GEM_DOMAIN_VRAM, &fb_location); + if (unlikely(r != 0)) { + amdgpu_bo_unreserve(rbo); + return -EINVAL; + } + } + + amdgpu_bo_get_tiling_flags(rbo, &tiling_flags); + amdgpu_bo_unreserve(rbo); + + switch (target_fb->pixel_format) { + case DRM_FORMAT_C8: + fb_format = REG_SET_FIELD(0, GRPH_CONTROL, GRPH_DEPTH, 0); + fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_FORMAT, 0); + break; + case DRM_FORMAT_XRGB4444: + case DRM_FORMAT_ARGB4444: + fb_format = REG_SET_FIELD(0, GRPH_CONTROL, GRPH_DEPTH, 1); + fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_FORMAT, 2); +#ifdef __BIG_ENDIAN + fb_swap = REG_SET_FIELD(fb_swap, GRPH_SWAP_CNTL, GRPH_ENDIAN_SWAP, + ENDIAN_8IN16); +#endif + break; + case DRM_FORMAT_XRGB1555: + case DRM_FORMAT_ARGB1555: + fb_format = REG_SET_FIELD(0, GRPH_CONTROL, GRPH_DEPTH, 1); + fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_FORMAT, 0); +#ifdef __BIG_ENDIAN + fb_swap = REG_SET_FIELD(fb_swap, GRPH_SWAP_CNTL, GRPH_ENDIAN_SWAP, + ENDIAN_8IN16); +#endif + break; + case DRM_FORMAT_BGRX5551: + case DRM_FORMAT_BGRA5551: + fb_format = REG_SET_FIELD(0, GRPH_CONTROL, GRPH_DEPTH, 1); + fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_FORMAT, 5); +#ifdef __BIG_ENDIAN + fb_swap = REG_SET_FIELD(fb_swap, GRPH_SWAP_CNTL, GRPH_ENDIAN_SWAP, + ENDIAN_8IN16); +#endif + break; + case DRM_FORMAT_RGB565: + fb_format = REG_SET_FIELD(0, GRPH_CONTROL, GRPH_DEPTH, 1); + fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_FORMAT, 1); +#ifdef __BIG_ENDIAN + fb_swap = REG_SET_FIELD(fb_swap, GRPH_SWAP_CNTL, GRPH_ENDIAN_SWAP, + ENDIAN_8IN16); +#endif + break; + case DRM_FORMAT_XRGB8888: + case DRM_FORMAT_ARGB8888: + fb_format = REG_SET_FIELD(0, GRPH_CONTROL, GRPH_DEPTH, 2); + fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_FORMAT, 0); +#ifdef __BIG_ENDIAN + fb_swap = REG_SET_FIELD(fb_swap, GRPH_SWAP_CNTL, GRPH_ENDIAN_SWAP, + ENDIAN_8IN32); +#endif + break; + case DRM_FORMAT_XRGB2101010: + case DRM_FORMAT_ARGB2101010: + fb_format = REG_SET_FIELD(0, GRPH_CONTROL, GRPH_DEPTH, 2); + fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_FORMAT, 1); +#ifdef __BIG_ENDIAN + fb_swap = REG_SET_FIELD(fb_swap, GRPH_SWAP_CNTL, GRPH_ENDIAN_SWAP, + ENDIAN_8IN32); +#endif + /* Greater 8 bpc fb needs to bypass hw-lut to retain precision */ + bypass_lut = true; + break; + case DRM_FORMAT_BGRX1010102: + case DRM_FORMAT_BGRA1010102: + fb_format = REG_SET_FIELD(0, GRPH_CONTROL, GRPH_DEPTH, 2); + fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_FORMAT, 4); +#ifdef __BIG_ENDIAN + fb_swap = REG_SET_FIELD(fb_swap, GRPH_SWAP_CNTL, GRPH_ENDIAN_SWAP, + ENDIAN_8IN32); +#endif + /* Greater 8 bpc fb needs to bypass hw-lut to retain precision */ + bypass_lut = true; + break; + default: + DRM_ERROR("Unsupported screen format %s\n", + drm_get_format_name(target_fb->pixel_format)); + return -EINVAL; + } + + if (tiling_flags & AMDGPU_TILING_MACRO) { + unsigned tileb, index, num_banks, tile_split_bytes; + + dce_v11_0_tiling_fields(tiling_flags, &bankw, &bankh, &mtaspect, &tile_split); + /* Set NUM_BANKS. */ + /* Calculate the macrotile mode index. */ + tile_split_bytes = 64 << tile_split; + tileb = 8 * 8 * target_fb->bits_per_pixel / 8; + tileb = min(tile_split_bytes, tileb); + + for (index = 0; tileb > 64; index++) { + tileb >>= 1; + } + + if (index >= 16) { + DRM_ERROR("Wrong screen bpp (%u) or tile split (%u)\n", + target_fb->bits_per_pixel, tile_split); + return -EINVAL; + } + + /* XXX fix me for VI */ + num_banks = (adev->gfx.config.macrotile_mode_array[index] >> 6) & 0x3; + fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_NUM_BANKS, num_banks); + fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_ARRAY_MODE, + ARRAY_2D_TILED_THIN1); + fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_TILE_SPLIT, + tile_split); + fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_BANK_WIDTH, bankw); + fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_BANK_HEIGHT, bankh); + fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_MACRO_TILE_ASPECT, + mtaspect); + fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_MICRO_TILE_MODE, + ADDR_SURF_MICRO_TILING_DISPLAY); + } else if (tiling_flags & AMDGPU_TILING_MICRO) { + fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_ARRAY_MODE, + ARRAY_1D_TILED_THIN1); + } + + /* Read the pipe config from the 2D TILED SCANOUT mode. + * It should be the same for the other modes too, but not all + * modes set the pipe config field. */ + fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_PIPE_CONFIG, + pipe_config); + + dce_v11_0_vga_enable(crtc, false); + + WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset, + upper_32_bits(fb_location)); + WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset, + upper_32_bits(fb_location)); + WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset, + (u32)fb_location & GRPH_PRIMARY_SURFACE_ADDRESS__GRPH_PRIMARY_SURFACE_ADDRESS_MASK); + WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset, + (u32) fb_location & GRPH_SECONDARY_SURFACE_ADDRESS__GRPH_SECONDARY_SURFACE_ADDRESS_MASK); + WREG32(mmGRPH_CONTROL + amdgpu_crtc->crtc_offset, fb_format); + WREG32(mmGRPH_SWAP_CNTL + amdgpu_crtc->crtc_offset, fb_swap); + + /* + * The LUT only has 256 slots for indexing by a 8 bpc fb. Bypass the LUT + * for > 8 bpc scanout to avoid truncation of fb indices to 8 msb's, to + * retain the full precision throughout the pipeline. + */ + tmp = RREG32(mmGRPH_LUT_10BIT_BYPASS + amdgpu_crtc->crtc_offset); + if (bypass_lut) + tmp = REG_SET_FIELD(tmp, GRPH_LUT_10BIT_BYPASS, GRPH_LUT_10BIT_BYPASS_EN, 1); + else + tmp = REG_SET_FIELD(tmp, GRPH_LUT_10BIT_BYPASS, GRPH_LUT_10BIT_BYPASS_EN, 0); + WREG32(mmGRPH_LUT_10BIT_BYPASS + amdgpu_crtc->crtc_offset, tmp); + + if (bypass_lut) + DRM_DEBUG_KMS("Bypassing hardware LUT due to 10 bit fb scanout.\n"); + + WREG32(mmGRPH_SURFACE_OFFSET_X + amdgpu_crtc->crtc_offset, 0); + WREG32(mmGRPH_SURFACE_OFFSET_Y + amdgpu_crtc->crtc_offset, 0); + WREG32(mmGRPH_X_START + amdgpu_crtc->crtc_offset, 0); + WREG32(mmGRPH_Y_START + amdgpu_crtc->crtc_offset, 0); + WREG32(mmGRPH_X_END + amdgpu_crtc->crtc_offset, target_fb->width); + WREG32(mmGRPH_Y_END + amdgpu_crtc->crtc_offset, target_fb->height); + + fb_pitch_pixels = target_fb->pitches[0] / (target_fb->bits_per_pixel / 8); + WREG32(mmGRPH_PITCH + amdgpu_crtc->crtc_offset, fb_pitch_pixels); + + dce_v11_0_grph_enable(crtc, true); + + WREG32(mmLB_DESKTOP_HEIGHT + amdgpu_crtc->crtc_offset, + target_fb->height); + + x &= ~3; + y &= ~1; + WREG32(mmVIEWPORT_START + amdgpu_crtc->crtc_offset, + (x << 16) | y); + viewport_w = crtc->mode.hdisplay; + viewport_h = (crtc->mode.vdisplay + 1) & ~1; + WREG32(mmVIEWPORT_SIZE + amdgpu_crtc->crtc_offset, + (viewport_w << 16) | viewport_h); + + /* pageflip setup */ + /* make sure flip is at vb rather than hb */ + tmp = RREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset); + tmp = REG_SET_FIELD(tmp, GRPH_FLIP_CONTROL, + GRPH_SURFACE_UPDATE_H_RETRACE_EN, 0); + WREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset, tmp); + + /* set pageflip to happen only at start of vblank interval (front porch) */ + WREG32(mmCRTC_MASTER_UPDATE_MODE + amdgpu_crtc->crtc_offset, 3); + + if (!atomic && fb && fb != crtc->primary->fb) { + amdgpu_fb = to_amdgpu_framebuffer(fb); + rbo = gem_to_amdgpu_bo(amdgpu_fb->obj); + r = amdgpu_bo_reserve(rbo, false); + if (unlikely(r != 0)) + return r; + amdgpu_bo_unpin(rbo); + amdgpu_bo_unreserve(rbo); + } + + /* Bytes per pixel may have changed */ + dce_v11_0_bandwidth_update(adev); + + return 0; +} + +static void dce_v11_0_set_interleave(struct drm_crtc *crtc, + struct drm_display_mode *mode) +{ + struct drm_device *dev = crtc->dev; + struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); + u32 tmp; + + tmp = RREG32(mmLB_DATA_FORMAT + amdgpu_crtc->crtc_offset); + if (mode->flags & DRM_MODE_FLAG_INTERLACE) + tmp = REG_SET_FIELD(tmp, LB_DATA_FORMAT, INTERLEAVE_EN, 1); + else + tmp = REG_SET_FIELD(tmp, LB_DATA_FORMAT, INTERLEAVE_EN, 0); + WREG32(mmLB_DATA_FORMAT + amdgpu_crtc->crtc_offset, tmp); +} + +static void dce_v11_0_crtc_load_lut(struct drm_crtc *crtc) +{ + struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); + struct drm_device *dev = crtc->dev; + struct amdgpu_device *adev = dev->dev_private; + int i; + u32 tmp; + + DRM_DEBUG_KMS("%d\n", amdgpu_crtc->crtc_id); + + tmp = RREG32(mmINPUT_CSC_CONTROL + amdgpu_crtc->crtc_offset); + tmp = REG_SET_FIELD(tmp, INPUT_CSC_CONTROL, INPUT_CSC_GRPH_MODE, 0); + WREG32(mmINPUT_CSC_CONTROL + amdgpu_crtc->crtc_offset, tmp); + + tmp = RREG32(mmPRESCALE_GRPH_CONTROL + amdgpu_crtc->crtc_offset); + tmp = REG_SET_FIELD(tmp, PRESCALE_GRPH_CONTROL, GRPH_PRESCALE_BYPASS, 1); + WREG32(mmPRESCALE_GRPH_CONTROL + amdgpu_crtc->crtc_offset, tmp); + + tmp = RREG32(mmINPUT_GAMMA_CONTROL + amdgpu_crtc->crtc_offset); + tmp = REG_SET_FIELD(tmp, INPUT_GAMMA_CONTROL, GRPH_INPUT_GAMMA_MODE, 0); + WREG32(mmINPUT_GAMMA_CONTROL + amdgpu_crtc->crtc_offset, tmp); + + WREG32(mmDC_LUT_CONTROL + amdgpu_crtc->crtc_offset, 0); + + WREG32(mmDC_LUT_BLACK_OFFSET_BLUE + amdgpu_crtc->crtc_offset, 0); + WREG32(mmDC_LUT_BLACK_OFFSET_GREEN + amdgpu_crtc->crtc_offset, 0); + WREG32(mmDC_LUT_BLACK_OFFSET_RED + amdgpu_crtc->crtc_offset, 0); + + WREG32(mmDC_LUT_WHITE_OFFSET_BLUE + amdgpu_crtc->crtc_offset, 0xffff); + WREG32(mmDC_LUT_WHITE_OFFSET_GREEN + amdgpu_crtc->crtc_offset, 0xffff); + WREG32(mmDC_LUT_WHITE_OFFSET_RED + amdgpu_crtc->crtc_offset, 0xffff); + + WREG32(mmDC_LUT_RW_MODE + amdgpu_crtc->crtc_offset, 0); + WREG32(mmDC_LUT_WRITE_EN_MASK + amdgpu_crtc->crtc_offset, 0x00000007); + + WREG32(mmDC_LUT_RW_INDEX + amdgpu_crtc->crtc_offset, 0); + for (i = 0; i < 256; i++) { + WREG32(mmDC_LUT_30_COLOR + amdgpu_crtc->crtc_offset, + (amdgpu_crtc->lut_r[i] << 20) | + (amdgpu_crtc->lut_g[i] << 10) | + (amdgpu_crtc->lut_b[i] << 0)); + } + + tmp = RREG32(mmDEGAMMA_CONTROL + amdgpu_crtc->crtc_offset); + tmp = REG_SET_FIELD(tmp, DEGAMMA_CONTROL, GRPH_DEGAMMA_MODE, 0); + tmp = REG_SET_FIELD(tmp, DEGAMMA_CONTROL, CURSOR_DEGAMMA_MODE, 0); + tmp = REG_SET_FIELD(tmp, DEGAMMA_CONTROL, CURSOR2_DEGAMMA_MODE, 0); + WREG32(mmDEGAMMA_CONTROL + amdgpu_crtc->crtc_offset, tmp); + + tmp = RREG32(mmGAMUT_REMAP_CONTROL + amdgpu_crtc->crtc_offset); + tmp = REG_SET_FIELD(tmp, GAMUT_REMAP_CONTROL, GRPH_GAMUT_REMAP_MODE, 0); + WREG32(mmGAMUT_REMAP_CONTROL + amdgpu_crtc->crtc_offset, tmp); + + tmp = RREG32(mmREGAMMA_CONTROL + amdgpu_crtc->crtc_offset); + tmp = REG_SET_FIELD(tmp, REGAMMA_CONTROL, GRPH_REGAMMA_MODE, 0); + WREG32(mmREGAMMA_CONTROL + amdgpu_crtc->crtc_offset, tmp); + + tmp = RREG32(mmOUTPUT_CSC_CONTROL + amdgpu_crtc->crtc_offset); + tmp = REG_SET_FIELD(tmp, OUTPUT_CSC_CONTROL, OUTPUT_CSC_GRPH_MODE, 0); + WREG32(mmOUTPUT_CSC_CONTROL + amdgpu_crtc->crtc_offset, tmp); + + /* XXX match this to the depth of the crtc fmt block, move to modeset? */ + WREG32(mmDENORM_CONTROL + amdgpu_crtc->crtc_offset, 0); + /* XXX this only needs to be programmed once per crtc at startup, + * not sure where the best place for it is + */ + tmp = RREG32(mmALPHA_CONTROL + amdgpu_crtc->crtc_offset); + tmp = REG_SET_FIELD(tmp, ALPHA_CONTROL, CURSOR_ALPHA_BLND_ENA, 1); + WREG32(mmALPHA_CONTROL + amdgpu_crtc->crtc_offset, tmp); +} + +static int dce_v11_0_pick_dig_encoder(struct drm_encoder *encoder) +{ + struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); + struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; + + switch (amdgpu_encoder->encoder_id) { + case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: + if (dig->linkb) + return 1; + else + return 0; + break; + case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: + if (dig->linkb) + return 3; + else + return 2; + break; + case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: + if (dig->linkb) + return 5; + else + return 4; + break; + case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3: + return 6; + break; + default: + DRM_ERROR("invalid encoder_id: 0x%x\n", amdgpu_encoder->encoder_id); + return 0; + } +} + +/** + * dce_v11_0_pick_pll - Allocate a PPLL for use by the crtc. + * + * @crtc: drm crtc + * + * Returns the PPLL (Pixel PLL) to be used by the crtc. For DP monitors + * a single PPLL can be used for all DP crtcs/encoders. For non-DP + * monitors a dedicated PPLL must be used. If a particular board has + * an external DP PLL, return ATOM_PPLL_INVALID to skip PLL programming + * as there is no need to program the PLL itself. If we are not able to + * allocate a PLL, return ATOM_PPLL_INVALID to skip PLL programming to + * avoid messing up an existing monitor. + * + * Asic specific PLL information + * + * DCE 10.x + * Tonga + * - PPLL1, PPLL2 are available for all UNIPHY (both DP and non-DP) + * CI + * - PPLL0, PPLL1, PPLL2 are available for all UNIPHY (both DP and non-DP) and DAC + * + */ +static u32 dce_v11_0_pick_pll(struct drm_crtc *crtc) +{ + struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); + struct drm_device *dev = crtc->dev; + struct amdgpu_device *adev = dev->dev_private; + u32 pll_in_use; + int pll; + + if (ENCODER_MODE_IS_DP(amdgpu_atombios_encoder_get_encoder_mode(amdgpu_crtc->encoder))) { + if (adev->clock.dp_extclk) + /* skip PPLL programming if using ext clock */ + return ATOM_PPLL_INVALID; + else { + /* use the same PPLL for all DP monitors */ + pll = amdgpu_pll_get_shared_dp_ppll(crtc); + if (pll != ATOM_PPLL_INVALID) + return pll; + } + } else { + /* use the same PPLL for all monitors with the same clock */ + pll = amdgpu_pll_get_shared_nondp_ppll(crtc); + if (pll != ATOM_PPLL_INVALID) + return pll; + } + + /* XXX need to determine what plls are available on each DCE11 part */ + pll_in_use = amdgpu_pll_get_use_mask(crtc); + if (adev->asic_type == CHIP_CARRIZO) { + if (!(pll_in_use & (1 << ATOM_PPLL1))) + return ATOM_PPLL1; + if (!(pll_in_use & (1 << ATOM_PPLL0))) + return ATOM_PPLL0; + DRM_ERROR("unable to allocate a PPLL\n"); + return ATOM_PPLL_INVALID; + } else { + if (!(pll_in_use & (1 << ATOM_PPLL2))) + return ATOM_PPLL2; + if (!(pll_in_use & (1 << ATOM_PPLL1))) + return ATOM_PPLL1; + if (!(pll_in_use & (1 << ATOM_PPLL0))) + return ATOM_PPLL0; + DRM_ERROR("unable to allocate a PPLL\n"); + return ATOM_PPLL_INVALID; + } + return ATOM_PPLL_INVALID; +} + +static void dce_v11_0_lock_cursor(struct drm_crtc *crtc, bool lock) +{ + struct amdgpu_device *adev = crtc->dev->dev_private; + struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); + uint32_t cur_lock; + + cur_lock = RREG32(mmCUR_UPDATE + amdgpu_crtc->crtc_offset); + if (lock) + cur_lock = REG_SET_FIELD(cur_lock, CUR_UPDATE, CURSOR_UPDATE_LOCK, 1); + else + cur_lock = REG_SET_FIELD(cur_lock, CUR_UPDATE, CURSOR_UPDATE_LOCK, 0); + WREG32(mmCUR_UPDATE + amdgpu_crtc->crtc_offset, cur_lock); +} + +static void dce_v11_0_hide_cursor(struct drm_crtc *crtc) +{ + struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); + struct amdgpu_device *adev = crtc->dev->dev_private; + u32 tmp; + + tmp = RREG32_IDX(mmCUR_CONTROL + amdgpu_crtc->crtc_offset); + tmp = REG_SET_FIELD(tmp, CUR_CONTROL, CURSOR_EN, 0); + WREG32_IDX(mmCUR_CONTROL + amdgpu_crtc->crtc_offset, tmp); +} + +static void dce_v11_0_show_cursor(struct drm_crtc *crtc) +{ + struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); + struct amdgpu_device *adev = crtc->dev->dev_private; + u32 tmp; + + tmp = RREG32_IDX(mmCUR_CONTROL + amdgpu_crtc->crtc_offset); + tmp = REG_SET_FIELD(tmp, CUR_CONTROL, CURSOR_EN, 1); + tmp = REG_SET_FIELD(tmp, CUR_CONTROL, CURSOR_MODE, 2); + WREG32_IDX(mmCUR_CONTROL + amdgpu_crtc->crtc_offset, tmp); +} + +static void dce_v11_0_set_cursor(struct drm_crtc *crtc, struct drm_gem_object *obj, + uint64_t gpu_addr) +{ + struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); + struct amdgpu_device *adev = crtc->dev->dev_private; + + WREG32(mmCUR_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset, + upper_32_bits(gpu_addr)); + WREG32(mmCUR_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset, + lower_32_bits(gpu_addr)); +} + +static int dce_v11_0_crtc_cursor_move(struct drm_crtc *crtc, + int x, int y) +{ + struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); + struct amdgpu_device *adev = crtc->dev->dev_private; + int xorigin = 0, yorigin = 0; + + /* avivo cursor are offset into the total surface */ + x += crtc->x; + y += crtc->y; + DRM_DEBUG("x %d y %d c->x %d c->y %d\n", x, y, crtc->x, crtc->y); + + if (x < 0) { + xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1); + x = 0; + } + if (y < 0) { + yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1); + y = 0; + } + + dce_v11_0_lock_cursor(crtc, true); + WREG32(mmCUR_POSITION + amdgpu_crtc->crtc_offset, (x << 16) | y); + WREG32(mmCUR_HOT_SPOT + amdgpu_crtc->crtc_offset, (xorigin << 16) | yorigin); + WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset, + ((amdgpu_crtc->cursor_width - 1) << 16) | (amdgpu_crtc->cursor_height - 1)); + dce_v11_0_lock_cursor(crtc, false); + + return 0; +} + +static int dce_v11_0_crtc_cursor_set(struct drm_crtc *crtc, + struct drm_file *file_priv, + uint32_t handle, + uint32_t width, + uint32_t height) +{ + struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); + struct drm_gem_object *obj; + struct amdgpu_bo *robj; + uint64_t gpu_addr; + int ret; + + if (!handle) { + /* turn off cursor */ + dce_v11_0_hide_cursor(crtc); + obj = NULL; + goto unpin; + } + + if ((width > amdgpu_crtc->max_cursor_width) || + (height > amdgpu_crtc->max_cursor_height)) { + DRM_ERROR("bad cursor width or height %d x %d\n", width, height); + return -EINVAL; + } + + obj = drm_gem_object_lookup(crtc->dev, file_priv, handle); + if (!obj) { + DRM_ERROR("Cannot find cursor object %x for crtc %d\n", handle, amdgpu_crtc->crtc_id); + return -ENOENT; + } + + robj = gem_to_amdgpu_bo(obj); + ret = amdgpu_bo_reserve(robj, false); + if (unlikely(ret != 0)) + goto fail; + ret = amdgpu_bo_pin_restricted(robj, AMDGPU_GEM_DOMAIN_VRAM, + 0, &gpu_addr); + amdgpu_bo_unreserve(robj); + if (ret) + goto fail; + + amdgpu_crtc->cursor_width = width; + amdgpu_crtc->cursor_height = height; + + dce_v11_0_lock_cursor(crtc, true); + dce_v11_0_set_cursor(crtc, obj, gpu_addr); + dce_v11_0_show_cursor(crtc); + dce_v11_0_lock_cursor(crtc, false); + +unpin: + if (amdgpu_crtc->cursor_bo) { + robj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo); + ret = amdgpu_bo_reserve(robj, false); + if (likely(ret == 0)) { + amdgpu_bo_unpin(robj); + amdgpu_bo_unreserve(robj); + } + drm_gem_object_unreference_unlocked(amdgpu_crtc->cursor_bo); + } + + amdgpu_crtc->cursor_bo = obj; + return 0; +fail: + drm_gem_object_unreference_unlocked(obj); + + return ret; +} + +static void dce_v11_0_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green, + u16 *blue, uint32_t start, uint32_t size) +{ + struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); + int end = (start + size > 256) ? 256 : start + size, i; + + /* userspace palettes are always correct as is */ + for (i = start; i < end; i++) { + amdgpu_crtc->lut_r[i] = red[i] >> 6; + amdgpu_crtc->lut_g[i] = green[i] >> 6; + amdgpu_crtc->lut_b[i] = blue[i] >> 6; + } + dce_v11_0_crtc_load_lut(crtc); +} + +static void dce_v11_0_crtc_destroy(struct drm_crtc *crtc) +{ + struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); + + drm_crtc_cleanup(crtc); + destroy_workqueue(amdgpu_crtc->pflip_queue); + kfree(amdgpu_crtc); +} + +static const struct drm_crtc_funcs dce_v11_0_crtc_funcs = { + .cursor_set = dce_v11_0_crtc_cursor_set, + .cursor_move = dce_v11_0_crtc_cursor_move, + .gamma_set = dce_v11_0_crtc_gamma_set, + .set_config = amdgpu_crtc_set_config, + .destroy = dce_v11_0_crtc_destroy, + .page_flip = amdgpu_crtc_page_flip, +}; + +static void dce_v11_0_crtc_dpms(struct drm_crtc *crtc, int mode) +{ + struct drm_device *dev = crtc->dev; + struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); + + switch (mode) { + case DRM_MODE_DPMS_ON: + amdgpu_crtc->enabled = true; + amdgpu_atombios_crtc_enable(crtc, ATOM_ENABLE); + dce_v11_0_vga_enable(crtc, true); + amdgpu_atombios_crtc_blank(crtc, ATOM_DISABLE); + dce_v11_0_vga_enable(crtc, false); + drm_vblank_post_modeset(dev, amdgpu_crtc->crtc_id); + dce_v11_0_crtc_load_lut(crtc); + break; + case DRM_MODE_DPMS_STANDBY: + case DRM_MODE_DPMS_SUSPEND: + case DRM_MODE_DPMS_OFF: + drm_vblank_pre_modeset(dev, amdgpu_crtc->crtc_id); + if (amdgpu_crtc->enabled) { + dce_v11_0_vga_enable(crtc, true); + amdgpu_atombios_crtc_blank(crtc, ATOM_ENABLE); + dce_v11_0_vga_enable(crtc, false); + } + amdgpu_atombios_crtc_enable(crtc, ATOM_DISABLE); + amdgpu_crtc->enabled = false; + break; + } + /* adjust pm to dpms */ + amdgpu_pm_compute_clocks(adev); +} + +static void dce_v11_0_crtc_prepare(struct drm_crtc *crtc) +{ + /* disable crtc pair power gating before programming */ + amdgpu_atombios_crtc_powergate(crtc, ATOM_DISABLE); + amdgpu_atombios_crtc_lock(crtc, ATOM_ENABLE); + dce_v11_0_crtc_dpms(crtc, DRM_MODE_DPMS_OFF); +} + +static void dce_v11_0_crtc_commit(struct drm_crtc *crtc) +{ + dce_v11_0_crtc_dpms(crtc, DRM_MODE_DPMS_ON); + amdgpu_atombios_crtc_lock(crtc, ATOM_DISABLE); +} + +static void dce_v11_0_crtc_disable(struct drm_crtc *crtc) +{ + struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); + struct drm_device *dev = crtc->dev; + struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_atom_ss ss; + int i; + + dce_v11_0_crtc_dpms(crtc, DRM_MODE_DPMS_OFF); + if (crtc->primary->fb) { + int r; + struct amdgpu_framebuffer *amdgpu_fb; + struct amdgpu_bo *rbo; + + amdgpu_fb = to_amdgpu_framebuffer(crtc->primary->fb); + rbo = gem_to_amdgpu_bo(amdgpu_fb->obj); + r = amdgpu_bo_reserve(rbo, false); + if (unlikely(r)) + DRM_ERROR("failed to reserve rbo before unpin\n"); + else { + amdgpu_bo_unpin(rbo); + amdgpu_bo_unreserve(rbo); + } + } + /* disable the GRPH */ + dce_v11_0_grph_enable(crtc, false); + + amdgpu_atombios_crtc_powergate(crtc, ATOM_ENABLE); + + for (i = 0; i < adev->mode_info.num_crtc; i++) { + if (adev->mode_info.crtcs[i] && + adev->mode_info.crtcs[i]->enabled && + i != amdgpu_crtc->crtc_id && + amdgpu_crtc->pll_id == adev->mode_info.crtcs[i]->pll_id) { + /* one other crtc is using this pll don't turn + * off the pll + */ + goto done; + } + } + + switch (amdgpu_crtc->pll_id) { + case ATOM_PPLL0: + case ATOM_PPLL1: + case ATOM_PPLL2: + /* disable the ppll */ + amdgpu_atombios_crtc_program_pll(crtc, amdgpu_crtc->crtc_id, amdgpu_crtc->pll_id, + 0, 0, ATOM_DISABLE, 0, 0, 0, 0, 0, false, &ss); + break; + default: + break; + } +done: + amdgpu_crtc->pll_id = ATOM_PPLL_INVALID; + amdgpu_crtc->adjusted_clock = 0; + amdgpu_crtc->encoder = NULL; + amdgpu_crtc->connector = NULL; +} + +static int dce_v11_0_crtc_mode_set(struct drm_crtc *crtc, + struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode, + int x, int y, struct drm_framebuffer *old_fb) +{ + struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); + + if (!amdgpu_crtc->adjusted_clock) + return -EINVAL; + + amdgpu_atombios_crtc_set_pll(crtc, adjusted_mode); + amdgpu_atombios_crtc_set_dtd_timing(crtc, adjusted_mode); + dce_v11_0_crtc_do_set_base(crtc, old_fb, x, y, 0); + amdgpu_atombios_crtc_overscan_setup(crtc, mode, adjusted_mode); + amdgpu_atombios_crtc_scaler_setup(crtc); + /* update the hw version fpr dpm */ + amdgpu_crtc->hw_mode = *adjusted_mode; + + return 0; +} + +static bool dce_v11_0_crtc_mode_fixup(struct drm_crtc *crtc, + const struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode) +{ + struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); + struct drm_device *dev = crtc->dev; + struct drm_encoder *encoder; + + /* assign the encoder to the amdgpu crtc to avoid repeated lookups later */ + list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { + if (encoder->crtc == crtc) { + amdgpu_crtc->encoder = encoder; + amdgpu_crtc->connector = amdgpu_get_connector_for_encoder(encoder); + break; + } + } + if ((amdgpu_crtc->encoder == NULL) || (amdgpu_crtc->connector == NULL)) { + amdgpu_crtc->encoder = NULL; + amdgpu_crtc->connector = NULL; + return false; + } + if (!amdgpu_crtc_scaling_mode_fixup(crtc, mode, adjusted_mode)) + return false; + if (amdgpu_atombios_crtc_prepare_pll(crtc, adjusted_mode)) + return false; + /* pick pll */ + amdgpu_crtc->pll_id = dce_v11_0_pick_pll(crtc); + /* if we can't get a PPLL for a non-DP encoder, fail */ + if ((amdgpu_crtc->pll_id == ATOM_PPLL_INVALID) && + !ENCODER_MODE_IS_DP(amdgpu_atombios_encoder_get_encoder_mode(amdgpu_crtc->encoder))) + return false; + + return true; +} + +static int dce_v11_0_crtc_set_base(struct drm_crtc *crtc, int x, int y, + struct drm_framebuffer *old_fb) +{ + return dce_v11_0_crtc_do_set_base(crtc, old_fb, x, y, 0); +} + +static int dce_v11_0_crtc_set_base_atomic(struct drm_crtc *crtc, + struct drm_framebuffer *fb, + int x, int y, enum mode_set_atomic state) +{ + return dce_v11_0_crtc_do_set_base(crtc, fb, x, y, 1); +} + +static const struct drm_crtc_helper_funcs dce_v11_0_crtc_helper_funcs = { + .dpms = dce_v11_0_crtc_dpms, + .mode_fixup = dce_v11_0_crtc_mode_fixup, + .mode_set = dce_v11_0_crtc_mode_set, + .mode_set_base = dce_v11_0_crtc_set_base, + .mode_set_base_atomic = dce_v11_0_crtc_set_base_atomic, + .prepare = dce_v11_0_crtc_prepare, + .commit = dce_v11_0_crtc_commit, + .load_lut = dce_v11_0_crtc_load_lut, + .disable = dce_v11_0_crtc_disable, +}; + +static int dce_v11_0_crtc_init(struct amdgpu_device *adev, int index) +{ + struct amdgpu_crtc *amdgpu_crtc; + int i; + + amdgpu_crtc = kzalloc(sizeof(struct amdgpu_crtc) + + (AMDGPUFB_CONN_LIMIT * sizeof(struct drm_connector *)), GFP_KERNEL); + if (amdgpu_crtc == NULL) + return -ENOMEM; + + drm_crtc_init(adev->ddev, &amdgpu_crtc->base, &dce_v11_0_crtc_funcs); + + drm_mode_crtc_set_gamma_size(&amdgpu_crtc->base, 256); + amdgpu_crtc->crtc_id = index; + amdgpu_crtc->pflip_queue = create_singlethread_workqueue("amdgpu-pageflip-queue"); + adev->mode_info.crtcs[index] = amdgpu_crtc; + + amdgpu_crtc->max_cursor_width = 128; + amdgpu_crtc->max_cursor_height = 128; + adev->ddev->mode_config.cursor_width = amdgpu_crtc->max_cursor_width; + adev->ddev->mode_config.cursor_height = amdgpu_crtc->max_cursor_height; + + for (i = 0; i < 256; i++) { + amdgpu_crtc->lut_r[i] = i << 2; + amdgpu_crtc->lut_g[i] = i << 2; + amdgpu_crtc->lut_b[i] = i << 2; + } + + switch (amdgpu_crtc->crtc_id) { + case 0: + default: + amdgpu_crtc->crtc_offset = CRTC0_REGISTER_OFFSET; + break; + case 1: + amdgpu_crtc->crtc_offset = CRTC1_REGISTER_OFFSET; + break; + case 2: + amdgpu_crtc->crtc_offset = CRTC2_REGISTER_OFFSET; + break; + case 3: + amdgpu_crtc->crtc_offset = CRTC3_REGISTER_OFFSET; + break; + case 4: + amdgpu_crtc->crtc_offset = CRTC4_REGISTER_OFFSET; + break; + case 5: + amdgpu_crtc->crtc_offset = CRTC5_REGISTER_OFFSET; + break; + } + + amdgpu_crtc->pll_id = ATOM_PPLL_INVALID; + amdgpu_crtc->adjusted_clock = 0; + amdgpu_crtc->encoder = NULL; + amdgpu_crtc->connector = NULL; + drm_crtc_helper_add(&amdgpu_crtc->base, &dce_v11_0_crtc_helper_funcs); + + return 0; +} + +static int dce_v11_0_early_init(struct amdgpu_device *adev) +{ + adev->audio_endpt_rreg = &dce_v11_0_audio_endpt_rreg; + adev->audio_endpt_wreg = &dce_v11_0_audio_endpt_wreg; + + dce_v11_0_set_display_funcs(adev); + dce_v11_0_set_irq_funcs(adev); + + switch (adev->asic_type) { + case CHIP_CARRIZO: + adev->mode_info.num_crtc = 4; + adev->mode_info.num_hpd = 6; + adev->mode_info.num_dig = 9; + break; + default: + /* FIXME: not supported yet */ + return -EINVAL; + } + + return 0; +} + +static int dce_v11_0_sw_init(struct amdgpu_device *adev) +{ + int r, i; + + for (i = 0; i < adev->mode_info.num_crtc; i++) { + r = amdgpu_irq_add_id(adev, i + 1, &adev->crtc_irq); + if (r) + return r; + } + + for (i = 8; i < 20; i += 2) { + r = amdgpu_irq_add_id(adev, i, &adev->pageflip_irq); + if (r) + return r; + } + + /* HPD hotplug */ + r = amdgpu_irq_add_id(adev, 42, &adev->hpd_irq); + if (r) + return r; + + adev->mode_info.mode_config_initialized = true; + + adev->ddev->mode_config.funcs = &amdgpu_mode_funcs; + + adev->ddev->mode_config.max_width = 16384; + adev->ddev->mode_config.max_height = 16384; + + adev->ddev->mode_config.preferred_depth = 24; + adev->ddev->mode_config.prefer_shadow = 1; + + adev->ddev->mode_config.fb_base = adev->mc.aper_base; + + r = amdgpu_modeset_create_props(adev); + if (r) + return r; + + adev->ddev->mode_config.max_width = 16384; + adev->ddev->mode_config.max_height = 16384; + + /* allocate crtcs */ + for (i = 0; i < adev->mode_info.num_crtc; i++) { + r = dce_v11_0_crtc_init(adev, i); + if (r) + return r; + } + + if (amdgpu_atombios_get_connector_info_from_object_table(adev)) + amdgpu_print_display_setup(adev->ddev); + else + return -EINVAL; + + /* setup afmt */ + dce_v11_0_afmt_init(adev); + + r = dce_v11_0_audio_init(adev); + if (r) + return r; + + drm_kms_helper_poll_init(adev->ddev); + + return r; +} + +static int dce_v11_0_sw_fini(struct amdgpu_device *adev) +{ + kfree(adev->mode_info.bios_hardcoded_edid); + + drm_kms_helper_poll_fini(adev->ddev); + + dce_v11_0_audio_fini(adev); + + dce_v11_0_afmt_fini(adev); + + adev->mode_info.mode_config_initialized = false; + + return 0; +} + +static int dce_v11_0_hw_init(struct amdgpu_device *adev) +{ + int i; + + dce_v11_0_init_golden_registers(adev); + + /* init dig PHYs, disp eng pll */ + amdgpu_atombios_encoder_init_dig(adev); + amdgpu_atombios_crtc_set_disp_eng_pll(adev, adev->clock.default_dispclk); + + /* initialize hpd */ + dce_v11_0_hpd_init(adev); + + for (i = 0; i < adev->mode_info.audio.num_pins; i++) { + dce_v11_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false); + } + + return 0; +} + +static int dce_v11_0_hw_fini(struct amdgpu_device *adev) +{ + int i; + + dce_v11_0_hpd_fini(adev); + + for (i = 0; i < adev->mode_info.audio.num_pins; i++) { + dce_v11_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false); + } + + return 0; +} + +static int dce_v11_0_suspend(struct amdgpu_device *adev) +{ + struct drm_connector *connector; + + drm_kms_helper_poll_disable(adev->ddev); + + /* turn off display hw */ + list_for_each_entry(connector, &adev->ddev->mode_config.connector_list, head) { + drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF); + } + + amdgpu_atombios_scratch_regs_save(adev); + + dce_v11_0_hpd_fini(adev); + + return 0; +} + +static int dce_v11_0_resume(struct amdgpu_device *adev) +{ + struct drm_connector *connector; + + dce_v11_0_init_golden_registers(adev); + + amdgpu_atombios_scratch_regs_restore(adev); + + /* init dig PHYs, disp eng pll */ + amdgpu_atombios_crtc_powergate_init(adev); + amdgpu_atombios_encoder_init_dig(adev); + amdgpu_atombios_crtc_set_disp_eng_pll(adev, adev->clock.default_dispclk); + /* turn on the BL */ + if (adev->mode_info.bl_encoder) { + u8 bl_level = amdgpu_display_backlight_get_level(adev, + adev->mode_info.bl_encoder); + amdgpu_display_backlight_set_level(adev, adev->mode_info.bl_encoder, + bl_level); + } + + /* initialize hpd */ + dce_v11_0_hpd_init(adev); + + /* blat the mode back in */ + drm_helper_resume_force_mode(adev->ddev); + /* turn on display hw */ + list_for_each_entry(connector, &adev->ddev->mode_config.connector_list, head) { + drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON); + } + + drm_kms_helper_poll_enable(adev->ddev); + + return 0; +} + +static bool dce_v11_0_is_idle(struct amdgpu_device *adev) +{ + /* XXX todo */ + return true; +} + +static int dce_v11_0_wait_for_idle(struct amdgpu_device *adev) +{ + /* XXX todo */ + return 0; +} + +static void dce_v11_0_print_status(struct amdgpu_device *adev) +{ + dev_info(adev->dev, "DCE 10.x registers\n"); + /* XXX todo */ +} + +static int dce_v11_0_soft_reset(struct amdgpu_device *adev) +{ + u32 srbm_soft_reset = 0, tmp; + + if (dce_v11_0_is_display_hung(adev)) + srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_DC_MASK; + + if (srbm_soft_reset) { + dce_v11_0_print_status(adev); + + tmp = RREG32(mmSRBM_SOFT_RESET); + tmp |= srbm_soft_reset; + dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp); + WREG32(mmSRBM_SOFT_RESET, tmp); + tmp = RREG32(mmSRBM_SOFT_RESET); + + udelay(50); + + tmp &= ~srbm_soft_reset; + WREG32(mmSRBM_SOFT_RESET, tmp); + tmp = RREG32(mmSRBM_SOFT_RESET); + + /* Wait a little for things to settle down */ + udelay(50); + dce_v11_0_print_status(adev); + } + return 0; +} + +static void dce_v11_0_set_crtc_vblank_interrupt_state(struct amdgpu_device *adev, + int crtc, + enum amdgpu_interrupt_state state) +{ + u32 lb_interrupt_mask; + + if (crtc >= adev->mode_info.num_crtc) { + DRM_DEBUG("invalid crtc %d\n", crtc); + return; + } + + switch (state) { + case AMDGPU_IRQ_STATE_DISABLE: + lb_interrupt_mask = RREG32(mmLB_INTERRUPT_MASK + crtc_offsets[crtc]); + lb_interrupt_mask = REG_SET_FIELD(lb_interrupt_mask, LB_INTERRUPT_MASK, + VBLANK_INTERRUPT_MASK, 0); + WREG32(mmLB_INTERRUPT_MASK + crtc_offsets[crtc], lb_interrupt_mask); + break; + case AMDGPU_IRQ_STATE_ENABLE: + lb_interrupt_mask = RREG32(mmLB_INTERRUPT_MASK + crtc_offsets[crtc]); + lb_interrupt_mask = REG_SET_FIELD(lb_interrupt_mask, LB_INTERRUPT_MASK, + VBLANK_INTERRUPT_MASK, 1); + WREG32(mmLB_INTERRUPT_MASK + crtc_offsets[crtc], lb_interrupt_mask); + break; + default: + break; + } +} + +static void dce_v11_0_set_crtc_vline_interrupt_state(struct amdgpu_device *adev, + int crtc, + enum amdgpu_interrupt_state state) +{ + u32 lb_interrupt_mask; + + if (crtc >= adev->mode_info.num_crtc) { + DRM_DEBUG("invalid crtc %d\n", crtc); + return; + } + + switch (state) { + case AMDGPU_IRQ_STATE_DISABLE: + lb_interrupt_mask = RREG32(mmLB_INTERRUPT_MASK + crtc_offsets[crtc]); + lb_interrupt_mask = REG_SET_FIELD(lb_interrupt_mask, LB_INTERRUPT_MASK, + VLINE_INTERRUPT_MASK, 0); + WREG32(mmLB_INTERRUPT_MASK + crtc_offsets[crtc], lb_interrupt_mask); + break; + case AMDGPU_IRQ_STATE_ENABLE: + lb_interrupt_mask = RREG32(mmLB_INTERRUPT_MASK + crtc_offsets[crtc]); + lb_interrupt_mask = REG_SET_FIELD(lb_interrupt_mask, LB_INTERRUPT_MASK, + VLINE_INTERRUPT_MASK, 1); + WREG32(mmLB_INTERRUPT_MASK + crtc_offsets[crtc], lb_interrupt_mask); + break; + default: + break; + } +} + +static int dce_v11_0_set_hpd_irq_state(struct amdgpu_device *adev, + struct amdgpu_irq_src *source, + unsigned hpd, + enum amdgpu_interrupt_state state) +{ + u32 tmp; + + if (hpd >= adev->mode_info.num_hpd) { + DRM_DEBUG("invalid hdp %d\n", hpd); + return 0; + } + + switch (state) { + case AMDGPU_IRQ_STATE_DISABLE: + tmp = RREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[hpd]); + tmp = REG_SET_FIELD(tmp, DC_HPD_INT_CONTROL, DC_HPD_INT_EN, 0); + WREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[hpd], tmp); + break; + case AMDGPU_IRQ_STATE_ENABLE: + tmp = RREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[hpd]); + tmp = REG_SET_FIELD(tmp, DC_HPD_INT_CONTROL, DC_HPD_INT_EN, 1); + WREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[hpd], tmp); + break; + default: + break; + } + + return 0; +} + +static int dce_v11_0_set_crtc_irq_state(struct amdgpu_device *adev, + struct amdgpu_irq_src *source, + unsigned type, + enum amdgpu_interrupt_state state) +{ + switch (type) { + case AMDGPU_CRTC_IRQ_VBLANK1: + dce_v11_0_set_crtc_vblank_interrupt_state(adev, 0, state); + break; + case AMDGPU_CRTC_IRQ_VBLANK2: + dce_v11_0_set_crtc_vblank_interrupt_state(adev, 1, state); + break; + case AMDGPU_CRTC_IRQ_VBLANK3: + dce_v11_0_set_crtc_vblank_interrupt_state(adev, 2, state); + break; + case AMDGPU_CRTC_IRQ_VBLANK4: + dce_v11_0_set_crtc_vblank_interrupt_state(adev, 3, state); + break; + case AMDGPU_CRTC_IRQ_VBLANK5: + dce_v11_0_set_crtc_vblank_interrupt_state(adev, 4, state); + break; + case AMDGPU_CRTC_IRQ_VBLANK6: + dce_v11_0_set_crtc_vblank_interrupt_state(adev, 5, state); + break; + case AMDGPU_CRTC_IRQ_VLINE1: + dce_v11_0_set_crtc_vline_interrupt_state(adev, 0, state); + break; + case AMDGPU_CRTC_IRQ_VLINE2: + dce_v11_0_set_crtc_vline_interrupt_state(adev, 1, state); + break; + case AMDGPU_CRTC_IRQ_VLINE3: + dce_v11_0_set_crtc_vline_interrupt_state(adev, 2, state); + break; + case AMDGPU_CRTC_IRQ_VLINE4: + dce_v11_0_set_crtc_vline_interrupt_state(adev, 3, state); + break; + case AMDGPU_CRTC_IRQ_VLINE5: + dce_v11_0_set_crtc_vline_interrupt_state(adev, 4, state); + break; + case AMDGPU_CRTC_IRQ_VLINE6: + dce_v11_0_set_crtc_vline_interrupt_state(adev, 5, state); + break; + default: + break; + } + return 0; +} + +static int dce_v11_0_set_pageflip_irq_state(struct amdgpu_device *adev, + struct amdgpu_irq_src *src, + unsigned type, + enum amdgpu_interrupt_state state) +{ + u32 reg, reg_block; + /* now deal with page flip IRQ */ + switch (type) { + case AMDGPU_PAGEFLIP_IRQ_D1: + reg_block = CRTC0_REGISTER_OFFSET; + break; + case AMDGPU_PAGEFLIP_IRQ_D2: + reg_block = CRTC1_REGISTER_OFFSET; + break; + case AMDGPU_PAGEFLIP_IRQ_D3: + reg_block = CRTC2_REGISTER_OFFSET; + break; + case AMDGPU_PAGEFLIP_IRQ_D4: + reg_block = CRTC3_REGISTER_OFFSET; + break; + case AMDGPU_PAGEFLIP_IRQ_D5: + reg_block = CRTC4_REGISTER_OFFSET; + break; + case AMDGPU_PAGEFLIP_IRQ_D6: + reg_block = CRTC5_REGISTER_OFFSET; + break; + default: + DRM_ERROR("invalid pageflip crtc %d\n", type); + return -EINVAL; + } + + reg = RREG32(mmGRPH_INTERRUPT_CONTROL + reg_block); + if (state == AMDGPU_IRQ_STATE_DISABLE) + WREG32(mmGRPH_INTERRUPT_CONTROL + reg_block, reg & ~GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK_MASK); + else + WREG32(mmGRPH_INTERRUPT_CONTROL + reg_block, reg | GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK_MASK); + + return 0; +} + +static int dce_v11_0_pageflip_irq(struct amdgpu_device *adev, + struct amdgpu_irq_src *source, + struct amdgpu_iv_entry *entry) +{ + int reg_block; + unsigned long flags; + unsigned crtc_id; + struct amdgpu_crtc *amdgpu_crtc; + struct amdgpu_flip_work *works; + + crtc_id = (entry->src_id - 8) >> 1; + amdgpu_crtc = adev->mode_info.crtcs[crtc_id]; + + /* ack the interrupt */ + switch(crtc_id){ + case AMDGPU_PAGEFLIP_IRQ_D1: + reg_block = CRTC0_REGISTER_OFFSET; + break; + case AMDGPU_PAGEFLIP_IRQ_D2: + reg_block = CRTC1_REGISTER_OFFSET; + break; + case AMDGPU_PAGEFLIP_IRQ_D3: + reg_block = CRTC2_REGISTER_OFFSET; + break; + case AMDGPU_PAGEFLIP_IRQ_D4: + reg_block = CRTC3_REGISTER_OFFSET; + break; + case AMDGPU_PAGEFLIP_IRQ_D5: + reg_block = CRTC4_REGISTER_OFFSET; + break; + case AMDGPU_PAGEFLIP_IRQ_D6: + reg_block = CRTC5_REGISTER_OFFSET; + break; + default: + DRM_ERROR("invalid pageflip crtc %d\n", crtc_id); + return -EINVAL; + } + + if (RREG32(mmGRPH_INTERRUPT_STATUS + reg_block) & GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_OCCURRED_MASK) + WREG32(mmGRPH_INTERRUPT_STATUS + reg_block, GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_CLEAR_MASK); + + /* IRQ could occur when in initial stage */ + if(amdgpu_crtc == NULL) + return 0; + + spin_lock_irqsave(&adev->ddev->event_lock, flags); + works = amdgpu_crtc->pflip_works; + if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){ + DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d != " + "AMDGPU_FLIP_SUBMITTED(%d)\n", + amdgpu_crtc->pflip_status, + AMDGPU_FLIP_SUBMITTED); + spin_unlock_irqrestore(&adev->ddev->event_lock, flags); + return 0; + } + + /* page flip completed. clean up */ + amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE; + amdgpu_crtc->pflip_works = NULL; + + /* wakeup usersapce */ + if(works->event) + drm_send_vblank_event(adev->ddev, crtc_id, works->event); + + spin_unlock_irqrestore(&adev->ddev->event_lock, flags); + + drm_vblank_put(adev->ddev, amdgpu_crtc->crtc_id); + amdgpu_irq_put(adev, &adev->pageflip_irq, crtc_id); + queue_work(amdgpu_crtc->pflip_queue, &works->unpin_work); + + return 0; +} + +static void dce_v11_0_hpd_int_ack(struct amdgpu_device *adev, + int hpd) +{ + u32 tmp; + + if (hpd >= adev->mode_info.num_hpd) { + DRM_DEBUG("invalid hdp %d\n", hpd); + return; + } + + tmp = RREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[hpd]); + tmp = REG_SET_FIELD(tmp, DC_HPD_INT_CONTROL, DC_HPD_INT_ACK, 1); + WREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[hpd], tmp); +} + +static void dce_v11_0_crtc_vblank_int_ack(struct amdgpu_device *adev, + int crtc) +{ + u32 tmp; + + if (crtc >= adev->mode_info.num_crtc) { + DRM_DEBUG("invalid crtc %d\n", crtc); + return; + } + + tmp = RREG32(mmLB_VBLANK_STATUS + crtc_offsets[crtc]); + tmp = REG_SET_FIELD(tmp, LB_VBLANK_STATUS, VBLANK_ACK, 1); + WREG32(mmLB_VBLANK_STATUS + crtc_offsets[crtc], tmp); +} + +static void dce_v11_0_crtc_vline_int_ack(struct amdgpu_device *adev, + int crtc) +{ + u32 tmp; + + if (crtc >= adev->mode_info.num_crtc) { + DRM_DEBUG("invalid crtc %d\n", crtc); + return; + } + + tmp = RREG32(mmLB_VLINE_STATUS + crtc_offsets[crtc]); + tmp = REG_SET_FIELD(tmp, LB_VLINE_STATUS, VLINE_ACK, 1); + WREG32(mmLB_VLINE_STATUS + crtc_offsets[crtc], tmp); +} + +static int dce_v11_0_crtc_irq(struct amdgpu_device *adev, + struct amdgpu_irq_src *source, + struct amdgpu_iv_entry *entry) +{ + unsigned crtc = entry->src_id - 1; + uint32_t disp_int = RREG32(interrupt_status_offsets[crtc].reg); + unsigned irq_type = amdgpu_crtc_idx_to_irq_type(adev, crtc); + + switch (entry->src_data) { + case 0: /* vblank */ + if (disp_int & interrupt_status_offsets[crtc].vblank) { + dce_v11_0_crtc_vblank_int_ack(adev, crtc); + if (amdgpu_irq_enabled(adev, source, irq_type)) { + drm_handle_vblank(adev->ddev, crtc); + } + DRM_DEBUG("IH: D%d vblank\n", crtc + 1); + } + break; + case 1: /* vline */ + if (disp_int & interrupt_status_offsets[crtc].vline) { + dce_v11_0_crtc_vline_int_ack(adev, crtc); + DRM_DEBUG("IH: D%d vline\n", crtc + 1); + } + break; + default: + DRM_DEBUG("Unhandled interrupt: %d %d\n", entry->src_id, entry->src_data); + break; + } + + return 0; +} + +static int dce_v11_0_hpd_irq(struct amdgpu_device *adev, + struct amdgpu_irq_src *source, + struct amdgpu_iv_entry *entry) +{ + uint32_t disp_int, mask; + unsigned hpd; + + if (entry->src_data >= adev->mode_info.num_hpd) { + DRM_DEBUG("Unhandled interrupt: %d %d\n", entry->src_id, entry->src_data); + return 0; + } + + hpd = entry->src_data; + disp_int = RREG32(interrupt_status_offsets[hpd].reg); + mask = interrupt_status_offsets[hpd].hpd; + + if (disp_int & mask) { + dce_v11_0_hpd_int_ack(adev, hpd); + schedule_work(&adev->hotplug_work); + DRM_DEBUG("IH: HPD%d\n", hpd + 1); + } + + return 0; +} + +static int dce_v11_0_set_clockgating_state(struct amdgpu_device *adev, + enum amdgpu_clockgating_state state) +{ + return 0; +} + +static int dce_v11_0_set_powergating_state(struct amdgpu_device *adev, + enum amdgpu_powergating_state state) +{ + return 0; +} + +const struct amdgpu_ip_funcs dce_v11_0_ip_funcs = { + .early_init = dce_v11_0_early_init, + .late_init = NULL, + .sw_init = dce_v11_0_sw_init, + .sw_fini = dce_v11_0_sw_fini, + .hw_init = dce_v11_0_hw_init, + .hw_fini = dce_v11_0_hw_fini, + .suspend = dce_v11_0_suspend, + .resume = dce_v11_0_resume, + .is_idle = dce_v11_0_is_idle, + .wait_for_idle = dce_v11_0_wait_for_idle, + .soft_reset = dce_v11_0_soft_reset, + .print_status = dce_v11_0_print_status, + .set_clockgating_state = dce_v11_0_set_clockgating_state, + .set_powergating_state = dce_v11_0_set_powergating_state, +}; + +static void +dce_v11_0_encoder_mode_set(struct drm_encoder *encoder, + struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode) +{ + struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); + + amdgpu_encoder->pixel_clock = adjusted_mode->clock; + + /* need to call this here rather than in prepare() since we need some crtc info */ + amdgpu_atombios_encoder_dpms(encoder, DRM_MODE_DPMS_OFF); + + /* set scaler clears this on some chips */ + dce_v11_0_set_interleave(encoder->crtc, mode); + + if (amdgpu_atombios_encoder_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI) { + dce_v11_0_afmt_enable(encoder, true); + dce_v11_0_afmt_setmode(encoder, adjusted_mode); + } +} + +static void dce_v11_0_encoder_prepare(struct drm_encoder *encoder) +{ + struct amdgpu_device *adev = encoder->dev->dev_private; + struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); + struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder); + + if ((amdgpu_encoder->active_device & + (ATOM_DEVICE_DFP_SUPPORT | ATOM_DEVICE_LCD_SUPPORT)) || + (amdgpu_encoder_get_dp_bridge_encoder_id(encoder) != + ENCODER_OBJECT_ID_NONE)) { + struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; + if (dig) { + dig->dig_encoder = dce_v11_0_pick_dig_encoder(encoder); + if (amdgpu_encoder->active_device & ATOM_DEVICE_DFP_SUPPORT) + dig->afmt = adev->mode_info.afmt[dig->dig_encoder]; + } + } + + amdgpu_atombios_scratch_regs_lock(adev, true); + + if (connector) { + struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector); + + /* select the clock/data port if it uses a router */ + if (amdgpu_connector->router.cd_valid) + amdgpu_i2c_router_select_cd_port(amdgpu_connector); + + /* turn eDP panel on for mode set */ + if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) + amdgpu_atombios_encoder_set_edp_panel_power(connector, + ATOM_TRANSMITTER_ACTION_POWER_ON); + } + + /* this is needed for the pll/ss setup to work correctly in some cases */ + amdgpu_atombios_encoder_set_crtc_source(encoder); + /* set up the FMT blocks */ + dce_v11_0_program_fmt(encoder); +} + +static void dce_v11_0_encoder_commit(struct drm_encoder *encoder) +{ + struct drm_device *dev = encoder->dev; + struct amdgpu_device *adev = dev->dev_private; + + /* need to call this here as we need the crtc set up */ + amdgpu_atombios_encoder_dpms(encoder, DRM_MODE_DPMS_ON); + amdgpu_atombios_scratch_regs_lock(adev, false); +} + +static void dce_v11_0_encoder_disable(struct drm_encoder *encoder) +{ + struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); + struct amdgpu_encoder_atom_dig *dig; + + amdgpu_atombios_encoder_dpms(encoder, DRM_MODE_DPMS_OFF); + + if (amdgpu_atombios_encoder_is_digital(encoder)) { + if (amdgpu_atombios_encoder_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI) + dce_v11_0_afmt_enable(encoder, false); + dig = amdgpu_encoder->enc_priv; + dig->dig_encoder = -1; + } + amdgpu_encoder->active_device = 0; +} + +/* these are handled by the primary encoders */ +static void dce_v11_0_ext_prepare(struct drm_encoder *encoder) +{ + +} + +static void dce_v11_0_ext_commit(struct drm_encoder *encoder) +{ + +} + +static void +dce_v11_0_ext_mode_set(struct drm_encoder *encoder, + struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode) +{ + +} + +static void dce_v11_0_ext_disable(struct drm_encoder *encoder) +{ + +} + +static void +dce_v11_0_ext_dpms(struct drm_encoder *encoder, int mode) +{ + +} + +static bool dce_v11_0_ext_mode_fixup(struct drm_encoder *encoder, + const struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode) +{ + return true; +} + +static const struct drm_encoder_helper_funcs dce_v11_0_ext_helper_funcs = { + .dpms = dce_v11_0_ext_dpms, + .mode_fixup = dce_v11_0_ext_mode_fixup, + .prepare = dce_v11_0_ext_prepare, + .mode_set = dce_v11_0_ext_mode_set, + .commit = dce_v11_0_ext_commit, + .disable = dce_v11_0_ext_disable, + /* no detect for TMDS/LVDS yet */ +}; + +static const struct drm_encoder_helper_funcs dce_v11_0_dig_helper_funcs = { + .dpms = amdgpu_atombios_encoder_dpms, + .mode_fixup = amdgpu_atombios_encoder_mode_fixup, + .prepare = dce_v11_0_encoder_prepare, + .mode_set = dce_v11_0_encoder_mode_set, + .commit = dce_v11_0_encoder_commit, + .disable = dce_v11_0_encoder_disable, + .detect = amdgpu_atombios_encoder_dig_detect, +}; + +static const struct drm_encoder_helper_funcs dce_v11_0_dac_helper_funcs = { + .dpms = amdgpu_atombios_encoder_dpms, + .mode_fixup = amdgpu_atombios_encoder_mode_fixup, + .prepare = dce_v11_0_encoder_prepare, + .mode_set = dce_v11_0_encoder_mode_set, + .commit = dce_v11_0_encoder_commit, + .detect = amdgpu_atombios_encoder_dac_detect, +}; + +static void dce_v11_0_encoder_destroy(struct drm_encoder *encoder) +{ + struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); + if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) + amdgpu_atombios_encoder_fini_backlight(amdgpu_encoder); + kfree(amdgpu_encoder->enc_priv); + drm_encoder_cleanup(encoder); + kfree(amdgpu_encoder); +} + +static const struct drm_encoder_funcs dce_v11_0_encoder_funcs = { + .destroy = dce_v11_0_encoder_destroy, +}; + +static void dce_v11_0_encoder_add(struct amdgpu_device *adev, + uint32_t encoder_enum, + uint32_t supported_device, + u16 caps) +{ + struct drm_device *dev = adev->ddev; + struct drm_encoder *encoder; + struct amdgpu_encoder *amdgpu_encoder; + + /* see if we already added it */ + list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { + amdgpu_encoder = to_amdgpu_encoder(encoder); + if (amdgpu_encoder->encoder_enum == encoder_enum) { + amdgpu_encoder->devices |= supported_device; + return; + } + + } + + /* add a new one */ + amdgpu_encoder = kzalloc(sizeof(struct amdgpu_encoder), GFP_KERNEL); + if (!amdgpu_encoder) + return; + + encoder = &amdgpu_encoder->base; + switch (adev->mode_info.num_crtc) { + case 1: + encoder->possible_crtcs = 0x1; + break; + case 2: + default: + encoder->possible_crtcs = 0x3; + break; + case 4: + encoder->possible_crtcs = 0xf; + break; + case 6: + encoder->possible_crtcs = 0x3f; + break; + } + + amdgpu_encoder->enc_priv = NULL; + + amdgpu_encoder->encoder_enum = encoder_enum; + amdgpu_encoder->encoder_id = (encoder_enum & OBJECT_ID_MASK) >> OBJECT_ID_SHIFT; + amdgpu_encoder->devices = supported_device; + amdgpu_encoder->rmx_type = RMX_OFF; + amdgpu_encoder->underscan_type = UNDERSCAN_OFF; + amdgpu_encoder->is_ext_encoder = false; + amdgpu_encoder->caps = caps; + + switch (amdgpu_encoder->encoder_id) { + case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1: + case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2: + drm_encoder_init(dev, encoder, &dce_v11_0_encoder_funcs, + DRM_MODE_ENCODER_DAC); + drm_encoder_helper_add(encoder, &dce_v11_0_dac_helper_funcs); + break; + case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1: + case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: + case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: + case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: + case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3: + if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) { + amdgpu_encoder->rmx_type = RMX_FULL; + drm_encoder_init(dev, encoder, &dce_v11_0_encoder_funcs, + DRM_MODE_ENCODER_LVDS); + amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_lcd_info(amdgpu_encoder); + } else if (amdgpu_encoder->devices & (ATOM_DEVICE_CRT_SUPPORT)) { + drm_encoder_init(dev, encoder, &dce_v11_0_encoder_funcs, + DRM_MODE_ENCODER_DAC); + amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_dig_info(amdgpu_encoder); + } else { + drm_encoder_init(dev, encoder, &dce_v11_0_encoder_funcs, + DRM_MODE_ENCODER_TMDS); + amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_dig_info(amdgpu_encoder); + } + drm_encoder_helper_add(encoder, &dce_v11_0_dig_helper_funcs); + break; + case ENCODER_OBJECT_ID_SI170B: + case ENCODER_OBJECT_ID_CH7303: + case ENCODER_OBJECT_ID_EXTERNAL_SDVOA: + case ENCODER_OBJECT_ID_EXTERNAL_SDVOB: + case ENCODER_OBJECT_ID_TITFP513: + case ENCODER_OBJECT_ID_VT1623: + case ENCODER_OBJECT_ID_HDMI_SI1930: + case ENCODER_OBJECT_ID_TRAVIS: + case ENCODER_OBJECT_ID_NUTMEG: + /* these are handled by the primary encoders */ + amdgpu_encoder->is_ext_encoder = true; + if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) + drm_encoder_init(dev, encoder, &dce_v11_0_encoder_funcs, + DRM_MODE_ENCODER_LVDS); + else if (amdgpu_encoder->devices & (ATOM_DEVICE_CRT_SUPPORT)) + drm_encoder_init(dev, encoder, &dce_v11_0_encoder_funcs, + DRM_MODE_ENCODER_DAC); + else + drm_encoder_init(dev, encoder, &dce_v11_0_encoder_funcs, + DRM_MODE_ENCODER_TMDS); + drm_encoder_helper_add(encoder, &dce_v11_0_ext_helper_funcs); + break; + } +} + +static const struct amdgpu_display_funcs dce_v11_0_display_funcs = { + .set_vga_render_state = &dce_v11_0_set_vga_render_state, + .bandwidth_update = &dce_v11_0_bandwidth_update, + .vblank_get_counter = &dce_v11_0_vblank_get_counter, + .vblank_wait = &dce_v11_0_vblank_wait, + .is_display_hung = &dce_v11_0_is_display_hung, + .backlight_set_level = &amdgpu_atombios_encoder_set_backlight_level, + .backlight_get_level = &amdgpu_atombios_encoder_get_backlight_level, + .hpd_sense = &dce_v11_0_hpd_sense, + .hpd_set_polarity = &dce_v11_0_hpd_set_polarity, + .hpd_get_gpio_reg = &dce_v11_0_hpd_get_gpio_reg, + .page_flip = &dce_v11_0_page_flip, + .page_flip_get_scanoutpos = &dce_v11_0_crtc_get_scanoutpos, + .add_encoder = &dce_v11_0_encoder_add, + .add_connector = &amdgpu_connector_add, + .stop_mc_access = &dce_v11_0_stop_mc_access, + .resume_mc_access = &dce_v11_0_resume_mc_access, +}; + +static void dce_v11_0_set_display_funcs(struct amdgpu_device *adev) +{ + if (adev->mode_info.funcs == NULL) + adev->mode_info.funcs = &dce_v11_0_display_funcs; +} + +static const struct amdgpu_irq_src_funcs dce_v11_0_crtc_irq_funcs = { + .set = dce_v11_0_set_crtc_irq_state, + .process = dce_v11_0_crtc_irq, +}; + +static const struct amdgpu_irq_src_funcs dce_v11_0_pageflip_irq_funcs = { + .set = dce_v11_0_set_pageflip_irq_state, + .process = dce_v11_0_pageflip_irq, +}; + +static const struct amdgpu_irq_src_funcs dce_v11_0_hpd_irq_funcs = { + .set = dce_v11_0_set_hpd_irq_state, + .process = dce_v11_0_hpd_irq, +}; + +static void dce_v11_0_set_irq_funcs(struct amdgpu_device *adev) +{ + adev->crtc_irq.num_types = AMDGPU_CRTC_IRQ_LAST; + adev->crtc_irq.funcs = &dce_v11_0_crtc_irq_funcs; + + adev->pageflip_irq.num_types = AMDGPU_PAGEFLIP_IRQ_LAST; + adev->pageflip_irq.funcs = &dce_v11_0_pageflip_irq_funcs; + + adev->hpd_irq.num_types = AMDGPU_HPD_LAST; + adev->hpd_irq.funcs = &dce_v11_0_hpd_irq_funcs; +} diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.h b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.h new file mode 100644 index 000000000000..eeb9a56b514a --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.h @@ -0,0 +1,29 @@ +/* + * Copyright 2014 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef __DCE_V11_0_H__ +#define __DCE_V11_0_H__ + +extern const struct amdgpu_ip_funcs dce_v11_0_ip_funcs; + +#endif diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c new file mode 100644 index 000000000000..a8397dd2bce4 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c @@ -0,0 +1,4286 @@ +/* + * Copyright 2014 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#include +#include "drmP.h" +#include "amdgpu.h" +#include "amdgpu_gfx.h" +#include "vi.h" +#include "vid.h" +#include "amdgpu_ucode.h" +#include "clearstate_vi.h" + +#include "gmc/gmc_8_2_d.h" +#include "gmc/gmc_8_2_sh_mask.h" + +#include "oss/oss_3_0_d.h" +#include "oss/oss_3_0_sh_mask.h" + +#include "bif/bif_5_0_d.h" +#include "bif/bif_5_0_sh_mask.h" + +#include "gca/gfx_8_0_d.h" +#include "gca/gfx_8_0_enum.h" +#include "gca/gfx_8_0_sh_mask.h" +#include "gca/gfx_8_0_enum.h" + +#include "uvd/uvd_5_0_d.h" +#include "uvd/uvd_5_0_sh_mask.h" + +#include "dce/dce_10_0_d.h" +#include "dce/dce_10_0_sh_mask.h" + +#define GFX8_NUM_GFX_RINGS 1 +#define GFX8_NUM_COMPUTE_RINGS 8 + +#define TOPAZ_GB_ADDR_CONFIG_GOLDEN 0x22010001 +#define CARRIZO_GB_ADDR_CONFIG_GOLDEN 0x22010001 +#define TONGA_GB_ADDR_CONFIG_GOLDEN 0x22011003 + +#define ARRAY_MODE(x) ((x) << GB_TILE_MODE0__ARRAY_MODE__SHIFT) +#define PIPE_CONFIG(x) ((x) << GB_TILE_MODE0__PIPE_CONFIG__SHIFT) +#define TILE_SPLIT(x) ((x) << GB_TILE_MODE0__TILE_SPLIT__SHIFT) +#define MICRO_TILE_MODE_NEW(x) ((x) << GB_TILE_MODE0__MICRO_TILE_MODE_NEW__SHIFT) +#define SAMPLE_SPLIT(x) ((x) << GB_TILE_MODE0__SAMPLE_SPLIT__SHIFT) +#define BANK_WIDTH(x) ((x) << GB_MACROTILE_MODE0__BANK_WIDTH__SHIFT) +#define BANK_HEIGHT(x) ((x) << GB_MACROTILE_MODE0__BANK_HEIGHT__SHIFT) +#define MACRO_TILE_ASPECT(x) ((x) << GB_MACROTILE_MODE0__MACRO_TILE_ASPECT__SHIFT) +#define NUM_BANKS(x) ((x) << GB_MACROTILE_MODE0__NUM_BANKS__SHIFT) + +MODULE_FIRMWARE("radeon/carrizo_ce.bin"); +MODULE_FIRMWARE("radeon/carrizo_pfp.bin"); +MODULE_FIRMWARE("radeon/carrizo_me.bin"); +MODULE_FIRMWARE("radeon/carrizo_mec.bin"); +MODULE_FIRMWARE("radeon/carrizo_mec2.bin"); +MODULE_FIRMWARE("radeon/carrizo_rlc.bin"); + +MODULE_FIRMWARE("radeon/tonga_ce.bin"); +MODULE_FIRMWARE("radeon/tonga_pfp.bin"); +MODULE_FIRMWARE("radeon/tonga_me.bin"); +MODULE_FIRMWARE("radeon/tonga_mec.bin"); +MODULE_FIRMWARE("radeon/tonga_mec2.bin"); +MODULE_FIRMWARE("radeon/tonga_rlc.bin"); + +MODULE_FIRMWARE("radeon/topaz_ce.bin"); +MODULE_FIRMWARE("radeon/topaz_pfp.bin"); +MODULE_FIRMWARE("radeon/topaz_me.bin"); +MODULE_FIRMWARE("radeon/topaz_mec.bin"); +MODULE_FIRMWARE("radeon/topaz_mec2.bin"); +MODULE_FIRMWARE("radeon/topaz_rlc.bin"); + +static const struct amdgpu_gds_reg_offset amdgpu_gds_reg_offset[] = +{ + {mmGDS_VMID0_BASE, mmGDS_VMID0_SIZE, mmGDS_GWS_VMID0, mmGDS_OA_VMID0}, + {mmGDS_VMID1_BASE, mmGDS_VMID1_SIZE, mmGDS_GWS_VMID1, mmGDS_OA_VMID1}, + {mmGDS_VMID2_BASE, mmGDS_VMID2_SIZE, mmGDS_GWS_VMID2, mmGDS_OA_VMID2}, + {mmGDS_VMID3_BASE, mmGDS_VMID3_SIZE, mmGDS_GWS_VMID3, mmGDS_OA_VMID3}, + {mmGDS_VMID4_BASE, mmGDS_VMID4_SIZE, mmGDS_GWS_VMID4, mmGDS_OA_VMID4}, + {mmGDS_VMID5_BASE, mmGDS_VMID5_SIZE, mmGDS_GWS_VMID5, mmGDS_OA_VMID5}, + {mmGDS_VMID6_BASE, mmGDS_VMID6_SIZE, mmGDS_GWS_VMID6, mmGDS_OA_VMID6}, + {mmGDS_VMID7_BASE, mmGDS_VMID7_SIZE, mmGDS_GWS_VMID7, mmGDS_OA_VMID7}, + {mmGDS_VMID8_BASE, mmGDS_VMID8_SIZE, mmGDS_GWS_VMID8, mmGDS_OA_VMID8}, + {mmGDS_VMID9_BASE, mmGDS_VMID9_SIZE, mmGDS_GWS_VMID9, mmGDS_OA_VMID9}, + {mmGDS_VMID10_BASE, mmGDS_VMID10_SIZE, mmGDS_GWS_VMID10, mmGDS_OA_VMID10}, + {mmGDS_VMID11_BASE, mmGDS_VMID11_SIZE, mmGDS_GWS_VMID11, mmGDS_OA_VMID11}, + {mmGDS_VMID12_BASE, mmGDS_VMID12_SIZE, mmGDS_GWS_VMID12, mmGDS_OA_VMID12}, + {mmGDS_VMID13_BASE, mmGDS_VMID13_SIZE, mmGDS_GWS_VMID13, mmGDS_OA_VMID13}, + {mmGDS_VMID14_BASE, mmGDS_VMID14_SIZE, mmGDS_GWS_VMID14, mmGDS_OA_VMID14}, + {mmGDS_VMID15_BASE, mmGDS_VMID15_SIZE, mmGDS_GWS_VMID15, mmGDS_OA_VMID15} +}; + +static const u32 golden_settings_tonga_a11[] = +{ + mmCB_HW_CONTROL, 0xfffdf3cf, 0x00007208, + mmCB_HW_CONTROL_3, 0x00000040, 0x00000040, + mmDB_DEBUG2, 0xf00fffff, 0x00000400, + mmGB_GPU_ID, 0x0000000f, 0x00000000, + mmPA_SC_ENHANCE, 0xffffffff, 0x20000001, + mmPA_SC_FIFO_DEPTH_CNTL, 0x000003ff, 0x000000fc, + mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000, + mmTA_CNTL_AUX, 0x000f000f, 0x000b0000, + mmTCC_CTRL, 0x00100000, 0xf31fff7f, + mmTCP_ADDR_CONFIG, 0x000003ff, 0x000002fb, + mmTCP_CHAN_STEER_HI, 0xffffffff, 0x0000543b, + mmTCP_CHAN_STEER_LO, 0xffffffff, 0xa9210876, +}; + +static const u32 tonga_golden_common_all[] = +{ + mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000, + mmPA_SC_RASTER_CONFIG, 0xffffffff, 0x16000012, + mmPA_SC_RASTER_CONFIG_1, 0xffffffff, 0x0000002A, + mmGB_ADDR_CONFIG, 0xffffffff, 0x22011003, + mmSPI_RESOURCE_RESERVE_CU_0, 0xffffffff, 0x00000800, + mmSPI_RESOURCE_RESERVE_CU_1, 0xffffffff, 0x00000800, + mmSPI_RESOURCE_RESERVE_EN_CU_0, 0xffffffff, 0x00007FBF, + mmSPI_RESOURCE_RESERVE_EN_CU_1, 0xffffffff, 0x00007FAF +}; + +static const u32 tonga_mgcg_cgcg_init[] = +{ + mmRLC_CGTT_MGCG_OVERRIDE, 0xffffffff, 0xffffffff, + mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000, + mmCB_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100, + mmCGTT_BCI_CLK_CTRL, 0xffffffff, 0x00000100, + mmCGTT_CP_CLK_CTRL, 0xffffffff, 0x00000100, + mmCGTT_CPC_CLK_CTRL, 0xffffffff, 0x00000100, + mmCGTT_CPF_CLK_CTRL, 0xffffffff, 0x40000100, + mmCGTT_GDS_CLK_CTRL, 0xffffffff, 0x00000100, + mmCGTT_IA_CLK_CTRL, 0xffffffff, 0x06000100, + mmCGTT_PA_CLK_CTRL, 0xffffffff, 0x00000100, + mmCGTT_WD_CLK_CTRL, 0xffffffff, 0x06000100, + mmCGTT_PC_CLK_CTRL, 0xffffffff, 0x00000100, + mmCGTT_RLC_CLK_CTRL, 0xffffffff, 0x00000100, + mmCGTT_SC_CLK_CTRL, 0xffffffff, 0x00000100, + mmCGTT_SPI_CLK_CTRL, 0xffffffff, 0x00000100, + mmCGTT_SQ_CLK_CTRL, 0xffffffff, 0x00000100, + mmCGTT_SQG_CLK_CTRL, 0xffffffff, 0x00000100, + mmCGTT_SX_CLK_CTRL0, 0xffffffff, 0x00000100, + mmCGTT_SX_CLK_CTRL1, 0xffffffff, 0x00000100, + mmCGTT_SX_CLK_CTRL2, 0xffffffff, 0x00000100, + mmCGTT_SX_CLK_CTRL3, 0xffffffff, 0x00000100, + mmCGTT_SX_CLK_CTRL4, 0xffffffff, 0x00000100, + mmCGTT_TCI_CLK_CTRL, 0xffffffff, 0x00000100, + mmCGTT_TCP_CLK_CTRL, 0xffffffff, 0x00000100, + mmCGTT_VGT_CLK_CTRL, 0xffffffff, 0x06000100, + mmDB_CGTT_CLK_CTRL_0, 0xffffffff, 0x00000100, + mmTA_CGTT_CTRL, 0xffffffff, 0x00000100, + mmTCA_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100, + mmTCC_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100, + mmTD_CGTT_CTRL, 0xffffffff, 0x00000100, + mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000, + mmCGTS_CU0_SP0_CTRL_REG, 0xffffffff, 0x00010000, + mmCGTS_CU0_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002, + mmCGTS_CU0_TA_SQC_CTRL_REG, 0xffffffff, 0x00040007, + mmCGTS_CU0_SP1_CTRL_REG, 0xffffffff, 0x00060005, + mmCGTS_CU0_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008, + mmCGTS_CU1_SP0_CTRL_REG, 0xffffffff, 0x00010000, + mmCGTS_CU1_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002, + mmCGTS_CU1_TA_CTRL_REG, 0xffffffff, 0x00040007, + mmCGTS_CU1_SP1_CTRL_REG, 0xffffffff, 0x00060005, + mmCGTS_CU1_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008, + mmCGTS_CU2_SP0_CTRL_REG, 0xffffffff, 0x00010000, + mmCGTS_CU2_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002, + mmCGTS_CU2_TA_CTRL_REG, 0xffffffff, 0x00040007, + mmCGTS_CU2_SP1_CTRL_REG, 0xffffffff, 0x00060005, + mmCGTS_CU2_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008, + mmCGTS_CU3_SP0_CTRL_REG, 0xffffffff, 0x00010000, + mmCGTS_CU3_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002, + mmCGTS_CU3_TA_CTRL_REG, 0xffffffff, 0x00040007, + mmCGTS_CU3_SP1_CTRL_REG, 0xffffffff, 0x00060005, + mmCGTS_CU3_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008, + mmCGTS_CU4_SP0_CTRL_REG, 0xffffffff, 0x00010000, + mmCGTS_CU4_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002, + mmCGTS_CU4_TA_SQC_CTRL_REG, 0xffffffff, 0x00040007, + mmCGTS_CU4_SP1_CTRL_REG, 0xffffffff, 0x00060005, + mmCGTS_CU4_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008, + mmCGTS_CU5_SP0_CTRL_REG, 0xffffffff, 0x00010000, + mmCGTS_CU5_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002, + mmCGTS_CU5_TA_CTRL_REG, 0xffffffff, 0x00040007, + mmCGTS_CU5_SP1_CTRL_REG, 0xffffffff, 0x00060005, + mmCGTS_CU5_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008, + mmCGTS_CU6_SP0_CTRL_REG, 0xffffffff, 0x00010000, + mmCGTS_CU6_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002, + mmCGTS_CU6_TA_CTRL_REG, 0xffffffff, 0x00040007, + mmCGTS_CU6_SP1_CTRL_REG, 0xffffffff, 0x00060005, + mmCGTS_CU6_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008, + mmCGTS_CU7_SP0_CTRL_REG, 0xffffffff, 0x00010000, + mmCGTS_CU7_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002, + mmCGTS_CU7_TA_CTRL_REG, 0xffffffff, 0x00040007, + mmCGTS_CU7_SP1_CTRL_REG, 0xffffffff, 0x00060005, + mmCGTS_CU7_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008, + mmCGTS_SM_CTRL_REG, 0xffffffff, 0x96e00200, + mmCP_RB_WPTR_POLL_CNTL, 0xffffffff, 0x00900100, + mmRLC_CGCG_CGLS_CTRL, 0xffffffff, 0x0020003c, + mmCP_MEM_SLP_CNTL, 0x00000001, 0x00000001, +}; + +static const u32 golden_settings_iceland_a11[] = +{ + mmCB_HW_CONTROL_3, 0x00000040, 0x00000040, + mmDB_DEBUG2, 0xf00fffff, 0x00000400, + mmDB_DEBUG3, 0xc0000000, 0xc0000000, + mmGB_GPU_ID, 0x0000000f, 0x00000000, + mmPA_SC_ENHANCE, 0xffffffff, 0x20000001, + mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000, + mmPA_SC_RASTER_CONFIG, 0x3f3fffff, 0x00000002, + mmPA_SC_RASTER_CONFIG_1, 0x0000003f, 0x00000000, + mmTA_CNTL_AUX, 0x000f000f, 0x000b0000, + mmTCC_CTRL, 0x00100000, 0xf31fff7f, + mmTCP_ADDR_CONFIG, 0x000003ff, 0x000000f1, + mmTCP_CHAN_STEER_HI, 0xffffffff, 0x00000000, + mmTCP_CHAN_STEER_LO, 0xffffffff, 0x00000010, +}; + +static const u32 iceland_golden_common_all[] = +{ + mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000, + mmPA_SC_RASTER_CONFIG, 0xffffffff, 0x00000002, + mmPA_SC_RASTER_CONFIG_1, 0xffffffff, 0x00000000, + mmGB_ADDR_CONFIG, 0xffffffff, 0x22010001, + mmSPI_RESOURCE_RESERVE_CU_0, 0xffffffff, 0x00000800, + mmSPI_RESOURCE_RESERVE_CU_1, 0xffffffff, 0x00000800, + mmSPI_RESOURCE_RESERVE_EN_CU_0, 0xffffffff, 0x00007FBF, + mmSPI_RESOURCE_RESERVE_EN_CU_1, 0xffffffff, 0x00007FAF +}; + +static const u32 iceland_mgcg_cgcg_init[] = +{ + mmRLC_CGTT_MGCG_OVERRIDE, 0xffffffff, 0xffffffff, + mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000, + mmCB_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100, + mmCGTT_BCI_CLK_CTRL, 0xffffffff, 0x00000100, + mmCGTT_CP_CLK_CTRL, 0xffffffff, 0xc0000100, + mmCGTT_CPC_CLK_CTRL, 0xffffffff, 0xc0000100, + mmCGTT_CPF_CLK_CTRL, 0xffffffff, 0xc0000100, + mmCGTT_GDS_CLK_CTRL, 0xffffffff, 0x00000100, + mmCGTT_IA_CLK_CTRL, 0xffffffff, 0x06000100, + mmCGTT_PA_CLK_CTRL, 0xffffffff, 0x00000100, + mmCGTT_WD_CLK_CTRL, 0xffffffff, 0x06000100, + mmCGTT_PC_CLK_CTRL, 0xffffffff, 0x00000100, + mmCGTT_RLC_CLK_CTRL, 0xffffffff, 0x00000100, + mmCGTT_SC_CLK_CTRL, 0xffffffff, 0x00000100, + mmCGTT_SPI_CLK_CTRL, 0xffffffff, 0x00000100, + mmCGTT_SQ_CLK_CTRL, 0xffffffff, 0x00000100, + mmCGTT_SQG_CLK_CTRL, 0xffffffff, 0x00000100, + mmCGTT_SX_CLK_CTRL0, 0xffffffff, 0x00000100, + mmCGTT_SX_CLK_CTRL1, 0xffffffff, 0x00000100, + mmCGTT_SX_CLK_CTRL2, 0xffffffff, 0x00000100, + mmCGTT_SX_CLK_CTRL3, 0xffffffff, 0x00000100, + mmCGTT_SX_CLK_CTRL4, 0xffffffff, 0x00000100, + mmCGTT_TCI_CLK_CTRL, 0xffffffff, 0xff000100, + mmCGTT_TCP_CLK_CTRL, 0xffffffff, 0x00000100, + mmCGTT_VGT_CLK_CTRL, 0xffffffff, 0x06000100, + mmDB_CGTT_CLK_CTRL_0, 0xffffffff, 0x00000100, + mmTA_CGTT_CTRL, 0xffffffff, 0x00000100, + mmTCA_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100, + mmTCC_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100, + mmTD_CGTT_CTRL, 0xffffffff, 0x00000100, + mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000, + mmCGTS_CU0_SP0_CTRL_REG, 0xffffffff, 0x00010000, + mmCGTS_CU0_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002, + mmCGTS_CU0_TA_SQC_CTRL_REG, 0xffffffff, 0x0f840f87, + mmCGTS_CU0_SP1_CTRL_REG, 0xffffffff, 0x00060005, + mmCGTS_CU0_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008, + mmCGTS_CU1_SP0_CTRL_REG, 0xffffffff, 0x00010000, + mmCGTS_CU1_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002, + mmCGTS_CU1_TA_CTRL_REG, 0xffffffff, 0x00040007, + mmCGTS_CU1_SP1_CTRL_REG, 0xffffffff, 0x00060005, + mmCGTS_CU1_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008, + mmCGTS_CU2_SP0_CTRL_REG, 0xffffffff, 0x00010000, + mmCGTS_CU2_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002, + mmCGTS_CU2_TA_CTRL_REG, 0xffffffff, 0x00040007, + mmCGTS_CU2_SP1_CTRL_REG, 0xffffffff, 0x00060005, + mmCGTS_CU2_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008, + mmCGTS_CU3_SP0_CTRL_REG, 0xffffffff, 0x00010000, + mmCGTS_CU3_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002, + mmCGTS_CU3_TA_CTRL_REG, 0xffffffff, 0x00040007, + mmCGTS_CU3_SP1_CTRL_REG, 0xffffffff, 0x00060005, + mmCGTS_CU3_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008, + mmCGTS_CU4_SP0_CTRL_REG, 0xffffffff, 0x00010000, + mmCGTS_CU4_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002, + mmCGTS_CU4_TA_SQC_CTRL_REG, 0xffffffff, 0x0f840f87, + mmCGTS_CU4_SP1_CTRL_REG, 0xffffffff, 0x00060005, + mmCGTS_CU4_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008, + mmCGTS_CU5_SP0_CTRL_REG, 0xffffffff, 0x00010000, + mmCGTS_CU5_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002, + mmCGTS_CU5_TA_CTRL_REG, 0xffffffff, 0x00040007, + mmCGTS_CU5_SP1_CTRL_REG, 0xffffffff, 0x00060005, + mmCGTS_CU5_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008, + mmCGTS_SM_CTRL_REG, 0xffffffff, 0x96e00200, + mmCP_RB_WPTR_POLL_CNTL, 0xffffffff, 0x00900100, + mmRLC_CGCG_CGLS_CTRL, 0xffffffff, 0x0020003c, +}; + +static const u32 cz_golden_settings_a11[] = +{ + mmCB_HW_CONTROL_3, 0x00000040, 0x00000040, + mmDB_DEBUG2, 0xf00fffff, 0x00000400, + mmGB_GPU_ID, 0x0000000f, 0x00000000, + mmPA_SC_ENHANCE, 0xffffffff, 0x00000001, + mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000, + mmTA_CNTL_AUX, 0x000f000f, 0x00010000, + mmTCP_ADDR_CONFIG, 0x0000000f, 0x000000f3, + mmTCP_CHAN_STEER_LO, 0xffffffff, 0x00001302 +}; + +static const u32 cz_golden_common_all[] = +{ + mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000, + mmPA_SC_RASTER_CONFIG, 0xffffffff, 0x00000002, + mmPA_SC_RASTER_CONFIG_1, 0xffffffff, 0x00000000, + mmGB_ADDR_CONFIG, 0xffffffff, 0x22010001, + mmSPI_RESOURCE_RESERVE_CU_0, 0xffffffff, 0x00000800, + mmSPI_RESOURCE_RESERVE_CU_1, 0xffffffff, 0x00000800, + mmSPI_RESOURCE_RESERVE_EN_CU_0, 0xffffffff, 0x00007FBF, + mmSPI_RESOURCE_RESERVE_EN_CU_1, 0xffffffff, 0x00007FAF +}; + +static const u32 cz_mgcg_cgcg_init[] = +{ + mmRLC_CGTT_MGCG_OVERRIDE, 0xffffffff, 0xffffffff, + mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000, + mmCB_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100, + mmCGTT_BCI_CLK_CTRL, 0xffffffff, 0x00000100, + mmCGTT_CP_CLK_CTRL, 0xffffffff, 0x00000100, + mmCGTT_CPC_CLK_CTRL, 0xffffffff, 0x00000100, + mmCGTT_CPF_CLK_CTRL, 0xffffffff, 0x00000100, + mmCGTT_GDS_CLK_CTRL, 0xffffffff, 0x00000100, + mmCGTT_IA_CLK_CTRL, 0xffffffff, 0x06000100, + mmCGTT_PA_CLK_CTRL, 0xffffffff, 0x00000100, + mmCGTT_WD_CLK_CTRL, 0xffffffff, 0x06000100, + mmCGTT_PC_CLK_CTRL, 0xffffffff, 0x00000100, + mmCGTT_RLC_CLK_CTRL, 0xffffffff, 0x00000100, + mmCGTT_SC_CLK_CTRL, 0xffffffff, 0x00000100, + mmCGTT_SPI_CLK_CTRL, 0xffffffff, 0x00000100, + mmCGTT_SQ_CLK_CTRL, 0xffffffff, 0x00000100, + mmCGTT_SQG_CLK_CTRL, 0xffffffff, 0x00000100, + mmCGTT_SX_CLK_CTRL0, 0xffffffff, 0x00000100, + mmCGTT_SX_CLK_CTRL1, 0xffffffff, 0x00000100, + mmCGTT_SX_CLK_CTRL2, 0xffffffff, 0x00000100, + mmCGTT_SX_CLK_CTRL3, 0xffffffff, 0x00000100, + mmCGTT_SX_CLK_CTRL4, 0xffffffff, 0x00000100, + mmCGTT_TCI_CLK_CTRL, 0xffffffff, 0x00000100, + mmCGTT_TCP_CLK_CTRL, 0xffffffff, 0x00000100, + mmCGTT_VGT_CLK_CTRL, 0xffffffff, 0x06000100, + mmDB_CGTT_CLK_CTRL_0, 0xffffffff, 0x00000100, + mmTA_CGTT_CTRL, 0xffffffff, 0x00000100, + mmTCA_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100, + mmTCC_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100, + mmTD_CGTT_CTRL, 0xffffffff, 0x00000100, + mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000, + mmCGTS_CU0_SP0_CTRL_REG, 0xffffffff, 0x00010000, + mmCGTS_CU0_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002, + mmCGTS_CU0_TA_SQC_CTRL_REG, 0xffffffff, 0x00040007, + mmCGTS_CU0_SP1_CTRL_REG, 0xffffffff, 0x00060005, + mmCGTS_CU0_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008, + mmCGTS_CU1_SP0_CTRL_REG, 0xffffffff, 0x00010000, + mmCGTS_CU1_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002, + mmCGTS_CU1_TA_CTRL_REG, 0xffffffff, 0x00040007, + mmCGTS_CU1_SP1_CTRL_REG, 0xffffffff, 0x00060005, + mmCGTS_CU1_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008, + mmCGTS_CU2_SP0_CTRL_REG, 0xffffffff, 0x00010000, + mmCGTS_CU2_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002, + mmCGTS_CU2_TA_CTRL_REG, 0xffffffff, 0x00040007, + mmCGTS_CU2_SP1_CTRL_REG, 0xffffffff, 0x00060005, + mmCGTS_CU2_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008, + mmCGTS_CU3_SP0_CTRL_REG, 0xffffffff, 0x00010000, + mmCGTS_CU3_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002, + mmCGTS_CU3_TA_CTRL_REG, 0xffffffff, 0x00040007, + mmCGTS_CU3_SP1_CTRL_REG, 0xffffffff, 0x00060005, + mmCGTS_CU3_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008, + mmCGTS_CU4_SP0_CTRL_REG, 0xffffffff, 0x00010000, + mmCGTS_CU4_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002, + mmCGTS_CU4_TA_SQC_CTRL_REG, 0xffffffff, 0x00040007, + mmCGTS_CU4_SP1_CTRL_REG, 0xffffffff, 0x00060005, + mmCGTS_CU4_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008, + mmCGTS_CU5_SP0_CTRL_REG, 0xffffffff, 0x00010000, + mmCGTS_CU5_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002, + mmCGTS_CU5_TA_CTRL_REG, 0xffffffff, 0x00040007, + mmCGTS_CU5_SP1_CTRL_REG, 0xffffffff, 0x00060005, + mmCGTS_CU5_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008, + mmCGTS_CU6_SP0_CTRL_REG, 0xffffffff, 0x00010000, + mmCGTS_CU6_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002, + mmCGTS_CU6_TA_CTRL_REG, 0xffffffff, 0x00040007, + mmCGTS_CU6_SP1_CTRL_REG, 0xffffffff, 0x00060005, + mmCGTS_CU6_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008, + mmCGTS_CU7_SP0_CTRL_REG, 0xffffffff, 0x00010000, + mmCGTS_CU7_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002, + mmCGTS_CU7_TA_CTRL_REG, 0xffffffff, 0x00040007, + mmCGTS_CU7_SP1_CTRL_REG, 0xffffffff, 0x00060005, + mmCGTS_CU7_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008, + mmCGTS_SM_CTRL_REG, 0xffffffff, 0x96e00200, + mmCP_RB_WPTR_POLL_CNTL, 0xffffffff, 0x00900100, + mmRLC_CGCG_CGLS_CTRL, 0xffffffff, 0x0020003f, + mmCP_MEM_SLP_CNTL, 0x00000001, 0x00000001, +}; + +static void gfx_v8_0_set_ring_funcs(struct amdgpu_device *adev); +static void gfx_v8_0_set_irq_funcs(struct amdgpu_device *adev); +static void gfx_v8_0_set_gds_init(struct amdgpu_device *adev); + +static void gfx_v8_0_init_golden_registers(struct amdgpu_device *adev) +{ + switch (adev->asic_type) { + case CHIP_TOPAZ: + amdgpu_program_register_sequence(adev, + iceland_mgcg_cgcg_init, + (const u32)ARRAY_SIZE(iceland_mgcg_cgcg_init)); + amdgpu_program_register_sequence(adev, + golden_settings_iceland_a11, + (const u32)ARRAY_SIZE(golden_settings_iceland_a11)); + amdgpu_program_register_sequence(adev, + iceland_golden_common_all, + (const u32)ARRAY_SIZE(iceland_golden_common_all)); + break; + case CHIP_TONGA: + amdgpu_program_register_sequence(adev, + tonga_mgcg_cgcg_init, + (const u32)ARRAY_SIZE(tonga_mgcg_cgcg_init)); + amdgpu_program_register_sequence(adev, + golden_settings_tonga_a11, + (const u32)ARRAY_SIZE(golden_settings_tonga_a11)); + amdgpu_program_register_sequence(adev, + tonga_golden_common_all, + (const u32)ARRAY_SIZE(tonga_golden_common_all)); + break; + case CHIP_CARRIZO: + amdgpu_program_register_sequence(adev, + cz_mgcg_cgcg_init, + (const u32)ARRAY_SIZE(cz_mgcg_cgcg_init)); + amdgpu_program_register_sequence(adev, + cz_golden_settings_a11, + (const u32)ARRAY_SIZE(cz_golden_settings_a11)); + amdgpu_program_register_sequence(adev, + cz_golden_common_all, + (const u32)ARRAY_SIZE(cz_golden_common_all)); + break; + default: + break; + } +} + +static void gfx_v8_0_scratch_init(struct amdgpu_device *adev) +{ + int i; + + adev->gfx.scratch.num_reg = 7; + adev->gfx.scratch.reg_base = mmSCRATCH_REG0; + for (i = 0; i < adev->gfx.scratch.num_reg; i++) { + adev->gfx.scratch.free[i] = true; + adev->gfx.scratch.reg[i] = adev->gfx.scratch.reg_base + i; + } +} + +static int gfx_v8_0_ring_test_ring(struct amdgpu_ring *ring) +{ + struct amdgpu_device *adev = ring->adev; + uint32_t scratch; + uint32_t tmp = 0; + unsigned i; + int r; + + r = amdgpu_gfx_scratch_get(adev, &scratch); + if (r) { + DRM_ERROR("amdgpu: cp failed to get scratch reg (%d).\n", r); + return r; + } + WREG32(scratch, 0xCAFEDEAD); + r = amdgpu_ring_lock(ring, 3); + if (r) { + DRM_ERROR("amdgpu: cp failed to lock ring %d (%d).\n", + ring->idx, r); + amdgpu_gfx_scratch_free(adev, scratch); + return r; + } + amdgpu_ring_write(ring, PACKET3(PACKET3_SET_UCONFIG_REG, 1)); + amdgpu_ring_write(ring, (scratch - PACKET3_SET_UCONFIG_REG_START)); + amdgpu_ring_write(ring, 0xDEADBEEF); + amdgpu_ring_unlock_commit(ring); + + for (i = 0; i < adev->usec_timeout; i++) { + tmp = RREG32(scratch); + if (tmp == 0xDEADBEEF) + break; + DRM_UDELAY(1); + } + if (i < adev->usec_timeout) { + DRM_INFO("ring test on %d succeeded in %d usecs\n", + ring->idx, i); + } else { + DRM_ERROR("amdgpu: ring %d test failed (scratch(0x%04X)=0x%08X)\n", + ring->idx, scratch, tmp); + r = -EINVAL; + } + amdgpu_gfx_scratch_free(adev, scratch); + return r; +} + +static int gfx_v8_0_ring_test_ib(struct amdgpu_ring *ring) +{ + struct amdgpu_device *adev = ring->adev; + struct amdgpu_ib ib; + uint32_t scratch; + uint32_t tmp = 0; + unsigned i; + int r; + + r = amdgpu_gfx_scratch_get(adev, &scratch); + if (r) { + DRM_ERROR("amdgpu: failed to get scratch reg (%d).\n", r); + return r; + } + WREG32(scratch, 0xCAFEDEAD); + r = amdgpu_ib_get(ring, NULL, 256, &ib); + if (r) { + DRM_ERROR("amdgpu: failed to get ib (%d).\n", r); + amdgpu_gfx_scratch_free(adev, scratch); + return r; + } + ib.ptr[0] = PACKET3(PACKET3_SET_UCONFIG_REG, 1); + ib.ptr[1] = ((scratch - PACKET3_SET_UCONFIG_REG_START)); + ib.ptr[2] = 0xDEADBEEF; + ib.length_dw = 3; + r = amdgpu_ib_schedule(adev, 1, &ib, AMDGPU_FENCE_OWNER_UNDEFINED); + if (r) { + amdgpu_gfx_scratch_free(adev, scratch); + amdgpu_ib_free(adev, &ib); + DRM_ERROR("amdgpu: failed to schedule ib (%d).\n", r); + return r; + } + r = amdgpu_fence_wait(ib.fence, false); + if (r) { + DRM_ERROR("amdgpu: fence wait failed (%d).\n", r); + amdgpu_gfx_scratch_free(adev, scratch); + amdgpu_ib_free(adev, &ib); + return r; + } + for (i = 0; i < adev->usec_timeout; i++) { + tmp = RREG32(scratch); + if (tmp == 0xDEADBEEF) + break; + DRM_UDELAY(1); + } + if (i < adev->usec_timeout) { + DRM_INFO("ib test on ring %d succeeded in %u usecs\n", + ib.fence->ring->idx, i); + } else { + DRM_ERROR("amdgpu: ib test failed (scratch(0x%04X)=0x%08X)\n", + scratch, tmp); + r = -EINVAL; + } + amdgpu_gfx_scratch_free(adev, scratch); + amdgpu_ib_free(adev, &ib); + return r; +} + +static int gfx_v8_0_init_microcode(struct amdgpu_device *adev) +{ + const char *chip_name; + char fw_name[30]; + int err; + struct amdgpu_firmware_info *info = NULL; + const struct common_firmware_header *header = NULL; + + DRM_DEBUG("\n"); + + switch (adev->asic_type) { + case CHIP_TOPAZ: + chip_name = "topaz"; + break; + case CHIP_TONGA: + chip_name = "tonga"; + break; + case CHIP_CARRIZO: + chip_name = "carrizo"; + break; + default: + BUG(); + } + + snprintf(fw_name, sizeof(fw_name), "radeon/%s_pfp.bin", chip_name); + err = request_firmware(&adev->gfx.pfp_fw, fw_name, adev->dev); + if (err) + goto out; + err = amdgpu_ucode_validate(adev->gfx.pfp_fw); + if (err) + goto out; + + snprintf(fw_name, sizeof(fw_name), "radeon/%s_me.bin", chip_name); + err = request_firmware(&adev->gfx.me_fw, fw_name, adev->dev); + if (err) + goto out; + err = amdgpu_ucode_validate(adev->gfx.me_fw); + if (err) + goto out; + + snprintf(fw_name, sizeof(fw_name), "radeon/%s_ce.bin", chip_name); + err = request_firmware(&adev->gfx.ce_fw, fw_name, adev->dev); + if (err) + goto out; + err = amdgpu_ucode_validate(adev->gfx.ce_fw); + if (err) + goto out; + + snprintf(fw_name, sizeof(fw_name), "radeon/%s_rlc.bin", chip_name); + err = request_firmware(&adev->gfx.rlc_fw, fw_name, adev->dev); + if (err) + goto out; + err = amdgpu_ucode_validate(adev->gfx.rlc_fw); + + snprintf(fw_name, sizeof(fw_name), "radeon/%s_mec.bin", chip_name); + err = request_firmware(&adev->gfx.mec_fw, fw_name, adev->dev); + if (err) + goto out; + err = amdgpu_ucode_validate(adev->gfx.mec_fw); + if (err) + goto out; + + snprintf(fw_name, sizeof(fw_name), "radeon/%s_mec2.bin", chip_name); + err = request_firmware(&adev->gfx.mec2_fw, fw_name, adev->dev); + if (!err) { + err = amdgpu_ucode_validate(adev->gfx.mec2_fw); + if (err) + goto out; + } else { + err = 0; + adev->gfx.mec2_fw = NULL; + } + + if (adev->firmware.smu_load) { + info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_PFP]; + info->ucode_id = AMDGPU_UCODE_ID_CP_PFP; + info->fw = adev->gfx.pfp_fw; + header = (const struct common_firmware_header *)info->fw->data; + adev->firmware.fw_size += + ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE); + + info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_ME]; + info->ucode_id = AMDGPU_UCODE_ID_CP_ME; + info->fw = adev->gfx.me_fw; + header = (const struct common_firmware_header *)info->fw->data; + adev->firmware.fw_size += + ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE); + + info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_CE]; + info->ucode_id = AMDGPU_UCODE_ID_CP_CE; + info->fw = adev->gfx.ce_fw; + header = (const struct common_firmware_header *)info->fw->data; + adev->firmware.fw_size += + ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE); + + info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_G]; + info->ucode_id = AMDGPU_UCODE_ID_RLC_G; + info->fw = adev->gfx.rlc_fw; + header = (const struct common_firmware_header *)info->fw->data; + adev->firmware.fw_size += + ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE); + + info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC1]; + info->ucode_id = AMDGPU_UCODE_ID_CP_MEC1; + info->fw = adev->gfx.mec_fw; + header = (const struct common_firmware_header *)info->fw->data; + adev->firmware.fw_size += + ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE); + + if (adev->gfx.mec2_fw) { + info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC2]; + info->ucode_id = AMDGPU_UCODE_ID_CP_MEC2; + info->fw = adev->gfx.mec2_fw; + header = (const struct common_firmware_header *)info->fw->data; + adev->firmware.fw_size += + ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE); + } + + } + +out: + if (err) { + dev_err(adev->dev, + "gfx8: Failed to load firmware \"%s\"\n", + fw_name); + release_firmware(adev->gfx.pfp_fw); + adev->gfx.pfp_fw = NULL; + release_firmware(adev->gfx.me_fw); + adev->gfx.me_fw = NULL; + release_firmware(adev->gfx.ce_fw); + adev->gfx.ce_fw = NULL; + release_firmware(adev->gfx.rlc_fw); + adev->gfx.rlc_fw = NULL; + release_firmware(adev->gfx.mec_fw); + adev->gfx.mec_fw = NULL; + release_firmware(adev->gfx.mec2_fw); + adev->gfx.mec2_fw = NULL; + } + return err; +} + +static void gfx_v8_0_mec_fini(struct amdgpu_device *adev) +{ + int r; + + if (adev->gfx.mec.hpd_eop_obj) { + r = amdgpu_bo_reserve(adev->gfx.mec.hpd_eop_obj, false); + if (unlikely(r != 0)) + dev_warn(adev->dev, "(%d) reserve HPD EOP bo failed\n", r); + amdgpu_bo_unpin(adev->gfx.mec.hpd_eop_obj); + amdgpu_bo_unreserve(adev->gfx.mec.hpd_eop_obj); + + amdgpu_bo_unref(&adev->gfx.mec.hpd_eop_obj); + adev->gfx.mec.hpd_eop_obj = NULL; + } +} + +#define MEC_HPD_SIZE 2048 + +static int gfx_v8_0_mec_init(struct amdgpu_device *adev) +{ + int r; + u32 *hpd; + + /* + * we assign only 1 pipe because all other pipes will + * be handled by KFD + */ + adev->gfx.mec.num_mec = 1; + adev->gfx.mec.num_pipe = 1; + adev->gfx.mec.num_queue = adev->gfx.mec.num_mec * adev->gfx.mec.num_pipe * 8; + + if (adev->gfx.mec.hpd_eop_obj == NULL) { + r = amdgpu_bo_create(adev, + adev->gfx.mec.num_mec *adev->gfx.mec.num_pipe * MEC_HPD_SIZE * 2, + PAGE_SIZE, true, + AMDGPU_GEM_DOMAIN_GTT, 0, NULL, + &adev->gfx.mec.hpd_eop_obj); + if (r) { + dev_warn(adev->dev, "(%d) create HDP EOP bo failed\n", r); + return r; + } + } + + r = amdgpu_bo_reserve(adev->gfx.mec.hpd_eop_obj, false); + if (unlikely(r != 0)) { + gfx_v8_0_mec_fini(adev); + return r; + } + r = amdgpu_bo_pin(adev->gfx.mec.hpd_eop_obj, AMDGPU_GEM_DOMAIN_GTT, + &adev->gfx.mec.hpd_eop_gpu_addr); + if (r) { + dev_warn(adev->dev, "(%d) pin HDP EOP bo failed\n", r); + gfx_v8_0_mec_fini(adev); + return r; + } + r = amdgpu_bo_kmap(adev->gfx.mec.hpd_eop_obj, (void **)&hpd); + if (r) { + dev_warn(adev->dev, "(%d) map HDP EOP bo failed\n", r); + gfx_v8_0_mec_fini(adev); + return r; + } + + memset(hpd, 0, adev->gfx.mec.num_mec *adev->gfx.mec.num_pipe * MEC_HPD_SIZE * 2); + + amdgpu_bo_kunmap(adev->gfx.mec.hpd_eop_obj); + amdgpu_bo_unreserve(adev->gfx.mec.hpd_eop_obj); + + return 0; +} + +static int gfx_v8_0_sw_init(struct amdgpu_device *adev) +{ + int i, r; + struct amdgpu_ring *ring; + + /* EOP Event */ + r = amdgpu_irq_add_id(adev, 181, &adev->gfx.eop_irq); + if (r) + return r; + + /* Privileged reg */ + r = amdgpu_irq_add_id(adev, 184, &adev->gfx.priv_reg_irq); + if (r) + return r; + + /* Privileged inst */ + r = amdgpu_irq_add_id(adev, 185, &adev->gfx.priv_inst_irq); + if (r) + return r; + + adev->gfx.gfx_current_status = AMDGPU_GFX_NORMAL_MODE; + + gfx_v8_0_scratch_init(adev); + + r = gfx_v8_0_init_microcode(adev); + if (r) { + DRM_ERROR("Failed to load gfx firmware!\n"); + return r; + } + + r = gfx_v8_0_mec_init(adev); + if (r) { + DRM_ERROR("Failed to init MEC BOs!\n"); + return r; + } + + r = amdgpu_wb_get(adev, &adev->gfx.ce_sync_offs); + if (r) { + DRM_ERROR("(%d) gfx.ce_sync_offs wb alloc failed\n", r); + return r; + } + + /* set up the gfx ring */ + for (i = 0; i < adev->gfx.num_gfx_rings; i++) { + ring = &adev->gfx.gfx_ring[i]; + ring->ring_obj = NULL; + sprintf(ring->name, "gfx"); + /* no gfx doorbells on iceland */ + if (adev->asic_type != CHIP_TOPAZ) { + ring->use_doorbell = true; + ring->doorbell_index = AMDGPU_DOORBELL_GFX_RING0; + } + + r = amdgpu_ring_init(adev, ring, 1024 * 1024, + PACKET3(PACKET3_NOP, 0x3FFF), 0xf, + &adev->gfx.eop_irq, AMDGPU_CP_IRQ_GFX_EOP, + AMDGPU_RING_TYPE_GFX); + if (r) + return r; + } + + /* set up the compute queues */ + for (i = 0; i < adev->gfx.num_compute_rings; i++) { + unsigned irq_type; + + /* max 32 queues per MEC */ + if ((i >= 32) || (i >= AMDGPU_MAX_COMPUTE_RINGS)) { + DRM_ERROR("Too many (%d) compute rings!\n", i); + break; + } + ring = &adev->gfx.compute_ring[i]; + ring->ring_obj = NULL; + ring->use_doorbell = true; + ring->doorbell_index = AMDGPU_DOORBELL_MEC_RING0 + i; + ring->me = 1; /* first MEC */ + ring->pipe = i / 8; + ring->queue = i % 8; + sprintf(ring->name, "comp %d.%d.%d", ring->me, ring->pipe, ring->queue); + irq_type = AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP + ring->pipe; + /* type-2 packets are deprecated on MEC, use type-3 instead */ + r = amdgpu_ring_init(adev, ring, 1024 * 1024, + PACKET3(PACKET3_NOP, 0x3FFF), 0xf, + &adev->gfx.eop_irq, irq_type, + AMDGPU_RING_TYPE_COMPUTE); + if (r) + return r; + } + + /* reserve GDS, GWS and OA resource for gfx */ + r = amdgpu_bo_create(adev, adev->gds.mem.gfx_partition_size, + PAGE_SIZE, true, + AMDGPU_GEM_DOMAIN_GDS, 0, + NULL, &adev->gds.gds_gfx_bo); + if (r) + return r; + + r = amdgpu_bo_create(adev, adev->gds.gws.gfx_partition_size, + PAGE_SIZE, true, + AMDGPU_GEM_DOMAIN_GWS, 0, + NULL, &adev->gds.gws_gfx_bo); + if (r) + return r; + + r = amdgpu_bo_create(adev, adev->gds.oa.gfx_partition_size, + PAGE_SIZE, true, + AMDGPU_GEM_DOMAIN_OA, 0, + NULL, &adev->gds.oa_gfx_bo); + if (r) + return r; + + return 0; +} + +static int gfx_v8_0_sw_fini(struct amdgpu_device *adev) +{ + int i; + + amdgpu_bo_unref(&adev->gds.oa_gfx_bo); + amdgpu_bo_unref(&adev->gds.gws_gfx_bo); + amdgpu_bo_unref(&adev->gds.gds_gfx_bo); + + for (i = 0; i < adev->gfx.num_gfx_rings; i++) + amdgpu_ring_fini(&adev->gfx.gfx_ring[i]); + for (i = 0; i < adev->gfx.num_compute_rings; i++) + amdgpu_ring_fini(&adev->gfx.compute_ring[i]); + + amdgpu_wb_free(adev, adev->gfx.ce_sync_offs); + + gfx_v8_0_mec_fini(adev); + + return 0; +} + +static void gfx_v8_0_tiling_mode_table_init(struct amdgpu_device *adev) +{ + const u32 num_tile_mode_states = 32; + const u32 num_secondary_tile_mode_states = 16; + u32 reg_offset, gb_tile_moden, split_equal_to_row_size; + + switch (adev->gfx.config.mem_row_size_in_kb) { + case 1: + split_equal_to_row_size = ADDR_SURF_TILE_SPLIT_1KB; + break; + case 2: + default: + split_equal_to_row_size = ADDR_SURF_TILE_SPLIT_2KB; + break; + case 4: + split_equal_to_row_size = ADDR_SURF_TILE_SPLIT_4KB; + break; + } + + switch (adev->asic_type) { + case CHIP_TOPAZ: + for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++) { + switch (reg_offset) { + case 0: + gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P2) | + TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) | + MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); + break; + case 1: + gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P2) | + TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) | + MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); + break; + case 2: + gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P2) | + TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) | + MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); + break; + case 3: + gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P2) | + TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) | + MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); + break; + case 4: + gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P2) | + TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) | + MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); + break; + case 5: + gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P2) | + TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) | + MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); + break; + case 6: + gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P2) | + TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) | + MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); + break; + case 8: + gb_tile_moden = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) | + PIPE_CONFIG(ADDR_SURF_P2)); + break; + case 9: + gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P2) | + MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); + break; + case 10: + gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P2) | + MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); + break; + case 11: + gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P2) | + MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8)); + break; + case 13: + gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P2) | + MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); + break; + case 14: + gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P2) | + MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); + break; + case 15: + gb_tile_moden = (ARRAY_MODE(ARRAY_3D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P2) | + MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); + break; + case 16: + gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P2) | + MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8)); + break; + case 18: + gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THICK) | + PIPE_CONFIG(ADDR_SURF_P2) | + MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); + break; + case 19: + gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THICK) | + PIPE_CONFIG(ADDR_SURF_P2) | + MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); + break; + case 20: + gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THICK) | + PIPE_CONFIG(ADDR_SURF_P2) | + MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); + break; + case 21: + gb_tile_moden = (ARRAY_MODE(ARRAY_3D_TILED_THICK) | + PIPE_CONFIG(ADDR_SURF_P2) | + MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); + break; + case 22: + gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) | + PIPE_CONFIG(ADDR_SURF_P2) | + MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); + break; + case 24: + gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THICK) | + PIPE_CONFIG(ADDR_SURF_P2) | + MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); + break; + case 25: + gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_XTHICK) | + PIPE_CONFIG(ADDR_SURF_P2) | + MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); + break; + case 26: + gb_tile_moden = (ARRAY_MODE(ARRAY_3D_TILED_XTHICK) | + PIPE_CONFIG(ADDR_SURF_P2) | + MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); + break; + case 27: + gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P2) | + MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); + break; + case 28: + gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P2) | + MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); + break; + case 29: + gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P2) | + MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8)); + break; + case 7: + case 12: + case 17: + case 23: + /* unused idx */ + continue; + default: + gb_tile_moden = 0; + break; + }; + adev->gfx.config.tile_mode_array[reg_offset] = gb_tile_moden; + WREG32(mmGB_TILE_MODE0 + reg_offset, gb_tile_moden); + } + for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++) { + switch (reg_offset) { + case 0: + gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_4) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | + NUM_BANKS(ADDR_SURF_8_BANK)); + break; + case 1: + gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_4) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | + NUM_BANKS(ADDR_SURF_8_BANK)); + break; + case 2: + gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | + NUM_BANKS(ADDR_SURF_8_BANK)); + break; + case 3: + gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | + NUM_BANKS(ADDR_SURF_8_BANK)); + break; + case 4: + gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | + NUM_BANKS(ADDR_SURF_8_BANK)); + break; + case 5: + gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | + NUM_BANKS(ADDR_SURF_8_BANK)); + break; + case 6: + gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | + NUM_BANKS(ADDR_SURF_8_BANK)); + break; + case 8: + gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_4) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_8) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | + NUM_BANKS(ADDR_SURF_16_BANK)); + break; + case 9: + gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_4) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | + NUM_BANKS(ADDR_SURF_16_BANK)); + break; + case 10: + gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | + NUM_BANKS(ADDR_SURF_16_BANK)); + break; + case 11: + gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | + NUM_BANKS(ADDR_SURF_16_BANK)); + break; + case 12: + gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | + NUM_BANKS(ADDR_SURF_16_BANK)); + break; + case 13: + gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | + NUM_BANKS(ADDR_SURF_16_BANK)); + break; + case 14: + gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | + NUM_BANKS(ADDR_SURF_8_BANK)); + break; + case 7: + /* unused idx */ + continue; + default: + gb_tile_moden = 0; + break; + }; + adev->gfx.config.macrotile_mode_array[reg_offset] = gb_tile_moden; + WREG32(mmGB_MACROTILE_MODE0 + reg_offset, gb_tile_moden); + } + case CHIP_TONGA: + for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++) { + switch (reg_offset) { + case 0: + gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) | + TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) | + MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); + break; + case 1: + gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) | + TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) | + MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); + break; + case 2: + gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) | + TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) | + MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); + break; + case 3: + gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) | + TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) | + MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); + break; + case 4: + gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) | + TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) | + MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); + break; + case 5: + gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) | + TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) | + MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); + break; + case 6: + gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) | + TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) | + MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); + break; + case 7: + gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P4_16x16) | + TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) | + MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); + break; + case 8: + gb_tile_moden = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) | + PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16)); + break; + case 9: + gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) | + MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); + break; + case 10: + gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) | + MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); + break; + case 11: + gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) | + MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8)); + break; + case 12: + gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P4_16x16) | + MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8)); + break; + case 13: + gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) | + MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); + break; + case 14: + gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) | + MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); + break; + case 15: + gb_tile_moden = (ARRAY_MODE(ARRAY_3D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) | + MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); + break; + case 16: + gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) | + MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8)); + break; + case 17: + gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P4_16x16) | + MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8)); + break; + case 18: + gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THICK) | + PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) | + MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); + break; + case 19: + gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THICK) | + PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) | + MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); + break; + case 20: + gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THICK) | + PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) | + MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); + break; + case 21: + gb_tile_moden = (ARRAY_MODE(ARRAY_3D_TILED_THICK) | + PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) | + MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); + break; + case 22: + gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) | + PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) | + MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); + break; + case 23: + gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) | + PIPE_CONFIG(ADDR_SURF_P4_16x16) | + MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); + break; + case 24: + gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THICK) | + PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) | + MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); + break; + case 25: + gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_XTHICK) | + PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) | + MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); + break; + case 26: + gb_tile_moden = (ARRAY_MODE(ARRAY_3D_TILED_XTHICK) | + PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) | + MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); + break; + case 27: + gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) | + MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); + break; + case 28: + gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) | + MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); + break; + case 29: + gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) | + MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8)); + break; + case 30: + gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P4_16x16) | + MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8)); + break; + default: + gb_tile_moden = 0; + break; + }; + adev->gfx.config.tile_mode_array[reg_offset] = gb_tile_moden; + WREG32(mmGB_TILE_MODE0 + reg_offset, gb_tile_moden); + } + for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++) { + switch (reg_offset) { + case 0: + gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | + NUM_BANKS(ADDR_SURF_16_BANK)); + break; + case 1: + gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | + NUM_BANKS(ADDR_SURF_16_BANK)); + break; + case 2: + gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | + NUM_BANKS(ADDR_SURF_16_BANK)); + break; + case 3: + gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | + NUM_BANKS(ADDR_SURF_16_BANK)); + break; + case 4: + gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | + NUM_BANKS(ADDR_SURF_16_BANK)); + break; + case 5: + gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) | + NUM_BANKS(ADDR_SURF_16_BANK)); + break; + case 6: + gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) | + NUM_BANKS(ADDR_SURF_16_BANK)); + break; + case 8: + gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_8) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | + NUM_BANKS(ADDR_SURF_16_BANK)); + break; + case 9: + gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | + NUM_BANKS(ADDR_SURF_16_BANK)); + break; + case 10: + gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | + NUM_BANKS(ADDR_SURF_16_BANK)); + break; + case 11: + gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | + NUM_BANKS(ADDR_SURF_16_BANK)); + break; + case 12: + gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) | + NUM_BANKS(ADDR_SURF_8_BANK)); + break; + case 13: + gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) | + NUM_BANKS(ADDR_SURF_4_BANK)); + break; + case 14: + gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) | + NUM_BANKS(ADDR_SURF_4_BANK)); + break; + case 7: + /* unused idx */ + continue; + default: + gb_tile_moden = 0; + break; + }; + adev->gfx.config.macrotile_mode_array[reg_offset] = gb_tile_moden; + WREG32(mmGB_MACROTILE_MODE0 + reg_offset, gb_tile_moden); + } + break; + case CHIP_CARRIZO: + default: + for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++) { + switch (reg_offset) { + case 0: + gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P2) | + TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) | + MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); + break; + case 1: + gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P2) | + TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) | + MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); + break; + case 2: + gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P2) | + TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) | + MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); + break; + case 3: + gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P2) | + TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) | + MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); + break; + case 4: + gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P2) | + TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) | + MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); + break; + case 5: + gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P2) | + TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) | + MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); + break; + case 6: + gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P2) | + TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) | + MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); + break; + case 8: + gb_tile_moden = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) | + PIPE_CONFIG(ADDR_SURF_P2)); + break; + case 9: + gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P2) | + MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); + break; + case 10: + gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P2) | + MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); + break; + case 11: + gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P2) | + MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8)); + break; + case 13: + gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P2) | + MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); + break; + case 14: + gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P2) | + MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); + break; + case 15: + gb_tile_moden = (ARRAY_MODE(ARRAY_3D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P2) | + MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); + break; + case 16: + gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P2) | + MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8)); + break; + case 18: + gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THICK) | + PIPE_CONFIG(ADDR_SURF_P2) | + MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); + break; + case 19: + gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THICK) | + PIPE_CONFIG(ADDR_SURF_P2) | + MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); + break; + case 20: + gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THICK) | + PIPE_CONFIG(ADDR_SURF_P2) | + MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); + break; + case 21: + gb_tile_moden = (ARRAY_MODE(ARRAY_3D_TILED_THICK) | + PIPE_CONFIG(ADDR_SURF_P2) | + MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); + break; + case 22: + gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) | + PIPE_CONFIG(ADDR_SURF_P2) | + MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); + break; + case 24: + gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THICK) | + PIPE_CONFIG(ADDR_SURF_P2) | + MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); + break; + case 25: + gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_XTHICK) | + PIPE_CONFIG(ADDR_SURF_P2) | + MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); + break; + case 26: + gb_tile_moden = (ARRAY_MODE(ARRAY_3D_TILED_XTHICK) | + PIPE_CONFIG(ADDR_SURF_P2) | + MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); + break; + case 27: + gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P2) | + MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); + break; + case 28: + gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P2) | + MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); + break; + case 29: + gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P2) | + MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) | + SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8)); + break; + case 7: + case 12: + case 17: + case 23: + /* unused idx */ + continue; + default: + gb_tile_moden = 0; + break; + }; + adev->gfx.config.tile_mode_array[reg_offset] = gb_tile_moden; + WREG32(mmGB_TILE_MODE0 + reg_offset, gb_tile_moden); + } + for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++) { + switch (reg_offset) { + case 0: + gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | + NUM_BANKS(ADDR_SURF_8_BANK)); + break; + case 1: + gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | + NUM_BANKS(ADDR_SURF_8_BANK)); + break; + case 2: + gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | + NUM_BANKS(ADDR_SURF_8_BANK)); + break; + case 3: + gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | + NUM_BANKS(ADDR_SURF_8_BANK)); + break; + case 4: + gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | + NUM_BANKS(ADDR_SURF_8_BANK)); + break; + case 5: + gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | + NUM_BANKS(ADDR_SURF_8_BANK)); + break; + case 6: + gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | + NUM_BANKS(ADDR_SURF_8_BANK)); + break; + case 8: + gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_4) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_8) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | + NUM_BANKS(ADDR_SURF_16_BANK)); + break; + case 9: + gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_4) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | + NUM_BANKS(ADDR_SURF_16_BANK)); + break; + case 10: + gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | + NUM_BANKS(ADDR_SURF_16_BANK)); + break; + case 11: + gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | + NUM_BANKS(ADDR_SURF_16_BANK)); + break; + case 12: + gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | + NUM_BANKS(ADDR_SURF_16_BANK)); + break; + case 13: + gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | + NUM_BANKS(ADDR_SURF_16_BANK)); + break; + case 14: + gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | + NUM_BANKS(ADDR_SURF_8_BANK)); + break; + case 7: + /* unused idx */ + continue; + default: + gb_tile_moden = 0; + break; + }; + adev->gfx.config.macrotile_mode_array[reg_offset] = gb_tile_moden; + WREG32(mmGB_MACROTILE_MODE0 + reg_offset, gb_tile_moden); + } + } +} + +static u32 gfx_v8_0_create_bitmask(u32 bit_width) +{ + u32 i, mask = 0; + + for (i = 0; i < bit_width; i++) { + mask <<= 1; + mask |= 1; + } + return mask; +} + +void gfx_v8_0_select_se_sh(struct amdgpu_device *adev, u32 se_num, u32 sh_num) +{ + u32 data = REG_SET_FIELD(0, GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES, 1); + + if ((se_num == 0xffffffff) && (sh_num == 0xffffffff)) { + data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SH_BROADCAST_WRITES, 1); + data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_BROADCAST_WRITES, 1); + } else if (se_num == 0xffffffff) { + data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SH_INDEX, sh_num); + data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_BROADCAST_WRITES, 1); + } else if (sh_num == 0xffffffff) { + data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SH_BROADCAST_WRITES, 1); + data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_INDEX, se_num); + } else { + data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SH_INDEX, sh_num); + data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_INDEX, se_num); + } + WREG32(mmGRBM_GFX_INDEX, data); +} + +static u32 gfx_v8_0_get_rb_disabled(struct amdgpu_device *adev, + u32 max_rb_num_per_se, + u32 sh_per_se) +{ + u32 data, mask; + + data = RREG32(mmCC_RB_BACKEND_DISABLE); + if (data & 1) + data &= CC_RB_BACKEND_DISABLE__BACKEND_DISABLE_MASK; + else + data = 0; + + data |= RREG32(mmGC_USER_RB_BACKEND_DISABLE); + + data >>= GC_USER_RB_BACKEND_DISABLE__BACKEND_DISABLE__SHIFT; + + mask = gfx_v8_0_create_bitmask(max_rb_num_per_se / sh_per_se); + + return data & mask; +} + +static void gfx_v8_0_setup_rb(struct amdgpu_device *adev, + u32 se_num, u32 sh_per_se, + u32 max_rb_num_per_se) +{ + int i, j; + u32 data, mask; + u32 disabled_rbs = 0; + u32 enabled_rbs = 0; + + mutex_lock(&adev->grbm_idx_mutex); + for (i = 0; i < se_num; i++) { + for (j = 0; j < sh_per_se; j++) { + gfx_v8_0_select_se_sh(adev, i, j); + data = gfx_v8_0_get_rb_disabled(adev, + max_rb_num_per_se, sh_per_se); + disabled_rbs |= data << ((i * sh_per_se + j) * + RB_BITMAP_WIDTH_PER_SH); + } + } + gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff); + mutex_unlock(&adev->grbm_idx_mutex); + + mask = 1; + for (i = 0; i < max_rb_num_per_se * se_num; i++) { + if (!(disabled_rbs & mask)) + enabled_rbs |= mask; + mask <<= 1; + } + + adev->gfx.config.backend_enable_mask = enabled_rbs; + + mutex_lock(&adev->grbm_idx_mutex); + for (i = 0; i < se_num; i++) { + gfx_v8_0_select_se_sh(adev, i, 0xffffffff); + data = 0; + for (j = 0; j < sh_per_se; j++) { + switch (enabled_rbs & 3) { + case 0: + if (j == 0) + data |= (RASTER_CONFIG_RB_MAP_3 << + PA_SC_RASTER_CONFIG__PKR_MAP__SHIFT); + else + data |= (RASTER_CONFIG_RB_MAP_0 << + PA_SC_RASTER_CONFIG__PKR_MAP__SHIFT); + break; + case 1: + data |= (RASTER_CONFIG_RB_MAP_0 << + (i * sh_per_se + j) * 2); + break; + case 2: + data |= (RASTER_CONFIG_RB_MAP_3 << + (i * sh_per_se + j) * 2); + break; + case 3: + default: + data |= (RASTER_CONFIG_RB_MAP_2 << + (i * sh_per_se + j) * 2); + break; + } + enabled_rbs >>= 2; + } + WREG32(mmPA_SC_RASTER_CONFIG, data); + } + gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff); + mutex_unlock(&adev->grbm_idx_mutex); +} + +static void gfx_v8_0_gpu_init(struct amdgpu_device *adev) +{ + u32 gb_addr_config; + u32 mc_shared_chmap, mc_arb_ramcfg; + u32 dimm00_addr_map, dimm01_addr_map, dimm10_addr_map, dimm11_addr_map; + u32 tmp; + int i; + + switch (adev->asic_type) { + case CHIP_TOPAZ: + adev->gfx.config.max_shader_engines = 1; + adev->gfx.config.max_tile_pipes = 2; + adev->gfx.config.max_cu_per_sh = 6; + adev->gfx.config.max_sh_per_se = 1; + adev->gfx.config.max_backends_per_se = 2; + adev->gfx.config.max_texture_channel_caches = 2; + adev->gfx.config.max_gprs = 256; + adev->gfx.config.max_gs_threads = 32; + adev->gfx.config.max_hw_contexts = 8; + + adev->gfx.config.sc_prim_fifo_size_frontend = 0x20; + adev->gfx.config.sc_prim_fifo_size_backend = 0x100; + adev->gfx.config.sc_hiz_tile_fifo_size = 0x30; + adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130; + gb_addr_config = TOPAZ_GB_ADDR_CONFIG_GOLDEN; + break; + case CHIP_TONGA: + adev->gfx.config.max_shader_engines = 4; + adev->gfx.config.max_tile_pipes = 8; + adev->gfx.config.max_cu_per_sh = 8; + adev->gfx.config.max_sh_per_se = 1; + adev->gfx.config.max_backends_per_se = 2; + adev->gfx.config.max_texture_channel_caches = 8; + adev->gfx.config.max_gprs = 256; + adev->gfx.config.max_gs_threads = 32; + adev->gfx.config.max_hw_contexts = 8; + + adev->gfx.config.sc_prim_fifo_size_frontend = 0x20; + adev->gfx.config.sc_prim_fifo_size_backend = 0x100; + adev->gfx.config.sc_hiz_tile_fifo_size = 0x30; + adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130; + gb_addr_config = TONGA_GB_ADDR_CONFIG_GOLDEN; + break; + case CHIP_CARRIZO: + adev->gfx.config.max_shader_engines = 1; + adev->gfx.config.max_tile_pipes = 2; + adev->gfx.config.max_cu_per_sh = 8; + adev->gfx.config.max_sh_per_se = 1; + adev->gfx.config.max_backends_per_se = 2; + adev->gfx.config.max_texture_channel_caches = 2; + adev->gfx.config.max_gprs = 256; + adev->gfx.config.max_gs_threads = 32; + adev->gfx.config.max_hw_contexts = 8; + + adev->gfx.config.sc_prim_fifo_size_frontend = 0x20; + adev->gfx.config.sc_prim_fifo_size_backend = 0x100; + adev->gfx.config.sc_hiz_tile_fifo_size = 0x30; + adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130; + gb_addr_config = CARRIZO_GB_ADDR_CONFIG_GOLDEN; + break; + default: + adev->gfx.config.max_shader_engines = 2; + adev->gfx.config.max_tile_pipes = 4; + adev->gfx.config.max_cu_per_sh = 2; + adev->gfx.config.max_sh_per_se = 1; + adev->gfx.config.max_backends_per_se = 2; + adev->gfx.config.max_texture_channel_caches = 4; + adev->gfx.config.max_gprs = 256; + adev->gfx.config.max_gs_threads = 32; + adev->gfx.config.max_hw_contexts = 8; + + adev->gfx.config.sc_prim_fifo_size_frontend = 0x20; + adev->gfx.config.sc_prim_fifo_size_backend = 0x100; + adev->gfx.config.sc_hiz_tile_fifo_size = 0x30; + adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130; + gb_addr_config = TONGA_GB_ADDR_CONFIG_GOLDEN; + break; + } + + tmp = RREG32(mmGRBM_CNTL); + tmp = REG_SET_FIELD(tmp, GRBM_CNTL, READ_TIMEOUT, 0xff); + WREG32(mmGRBM_CNTL, tmp); + + mc_shared_chmap = RREG32(mmMC_SHARED_CHMAP); + adev->gfx.config.mc_arb_ramcfg = RREG32(mmMC_ARB_RAMCFG); + mc_arb_ramcfg = adev->gfx.config.mc_arb_ramcfg; + + adev->gfx.config.num_tile_pipes = adev->gfx.config.max_tile_pipes; + adev->gfx.config.mem_max_burst_length_bytes = 256; + if (adev->flags & AMDGPU_IS_APU) { + /* Get memory bank mapping mode. */ + tmp = RREG32(mmMC_FUS_DRAM0_BANK_ADDR_MAPPING); + dimm00_addr_map = REG_GET_FIELD(tmp, MC_FUS_DRAM0_BANK_ADDR_MAPPING, DIMM0ADDRMAP); + dimm01_addr_map = REG_GET_FIELD(tmp, MC_FUS_DRAM0_BANK_ADDR_MAPPING, DIMM1ADDRMAP); + + tmp = RREG32(mmMC_FUS_DRAM1_BANK_ADDR_MAPPING); + dimm10_addr_map = REG_GET_FIELD(tmp, MC_FUS_DRAM1_BANK_ADDR_MAPPING, DIMM0ADDRMAP); + dimm11_addr_map = REG_GET_FIELD(tmp, MC_FUS_DRAM1_BANK_ADDR_MAPPING, DIMM1ADDRMAP); + + /* Validate settings in case only one DIMM installed. */ + if ((dimm00_addr_map == 0) || (dimm00_addr_map == 3) || (dimm00_addr_map == 4) || (dimm00_addr_map > 12)) + dimm00_addr_map = 0; + if ((dimm01_addr_map == 0) || (dimm01_addr_map == 3) || (dimm01_addr_map == 4) || (dimm01_addr_map > 12)) + dimm01_addr_map = 0; + if ((dimm10_addr_map == 0) || (dimm10_addr_map == 3) || (dimm10_addr_map == 4) || (dimm10_addr_map > 12)) + dimm10_addr_map = 0; + if ((dimm11_addr_map == 0) || (dimm11_addr_map == 3) || (dimm11_addr_map == 4) || (dimm11_addr_map > 12)) + dimm11_addr_map = 0; + + /* If DIMM Addr map is 8GB, ROW size should be 2KB. Otherwise 1KB. */ + /* If ROW size(DIMM1) != ROW size(DMIMM0), ROW size should be larger one. */ + if ((dimm00_addr_map == 11) || (dimm01_addr_map == 11) || (dimm10_addr_map == 11) || (dimm11_addr_map == 11)) + adev->gfx.config.mem_row_size_in_kb = 2; + else + adev->gfx.config.mem_row_size_in_kb = 1; + } else { + tmp = REG_GET_FIELD(mc_arb_ramcfg, MC_ARB_RAMCFG, NOOFCOLS); + adev->gfx.config.mem_row_size_in_kb = (4 * (1 << (8 + tmp))) / 1024; + if (adev->gfx.config.mem_row_size_in_kb > 4) + adev->gfx.config.mem_row_size_in_kb = 4; + } + + adev->gfx.config.shader_engine_tile_size = 32; + adev->gfx.config.num_gpus = 1; + adev->gfx.config.multi_gpu_tile_size = 64; + + /* fix up row size */ + switch (adev->gfx.config.mem_row_size_in_kb) { + case 1: + default: + gb_addr_config = REG_SET_FIELD(gb_addr_config, GB_ADDR_CONFIG, ROW_SIZE, 0); + break; + case 2: + gb_addr_config = REG_SET_FIELD(gb_addr_config, GB_ADDR_CONFIG, ROW_SIZE, 1); + break; + case 4: + gb_addr_config = REG_SET_FIELD(gb_addr_config, GB_ADDR_CONFIG, ROW_SIZE, 2); + break; + } + adev->gfx.config.gb_addr_config = gb_addr_config; + + WREG32(mmGB_ADDR_CONFIG, gb_addr_config); + WREG32(mmHDP_ADDR_CONFIG, gb_addr_config); + WREG32(mmDMIF_ADDR_CALC, gb_addr_config); + WREG32(mmSDMA0_TILING_CONFIG + SDMA0_REGISTER_OFFSET, + gb_addr_config & 0x70); + WREG32(mmSDMA0_TILING_CONFIG + SDMA1_REGISTER_OFFSET, + gb_addr_config & 0x70); + WREG32(mmUVD_UDEC_ADDR_CONFIG, gb_addr_config); + WREG32(mmUVD_UDEC_DB_ADDR_CONFIG, gb_addr_config); + WREG32(mmUVD_UDEC_DBW_ADDR_CONFIG, gb_addr_config); + + gfx_v8_0_tiling_mode_table_init(adev); + + gfx_v8_0_setup_rb(adev, adev->gfx.config.max_shader_engines, + adev->gfx.config.max_sh_per_se, + adev->gfx.config.max_backends_per_se); + + /* XXX SH_MEM regs */ + /* where to put LDS, scratch, GPUVM in FSA64 space */ + mutex_lock(&adev->srbm_mutex); + for (i = 0; i < 16; i++) { + vi_srbm_select(adev, 0, 0, 0, i); + /* CP and shaders */ + if (i == 0) { + tmp = REG_SET_FIELD(0, SH_MEM_CONFIG, DEFAULT_MTYPE, MTYPE_UC); + tmp = REG_SET_FIELD(tmp, SH_MEM_CONFIG, APE1_MTYPE, MTYPE_UC); + WREG32(mmSH_MEM_CONFIG, tmp); + } else { + tmp = REG_SET_FIELD(0, SH_MEM_CONFIG, DEFAULT_MTYPE, MTYPE_NC); + tmp = REG_SET_FIELD(tmp, SH_MEM_CONFIG, APE1_MTYPE, MTYPE_NC); + WREG32(mmSH_MEM_CONFIG, tmp); + } + + WREG32(mmSH_MEM_APE1_BASE, 1); + WREG32(mmSH_MEM_APE1_LIMIT, 0); + WREG32(mmSH_MEM_BASES, 0); + } + vi_srbm_select(adev, 0, 0, 0, 0); + mutex_unlock(&adev->srbm_mutex); + + mutex_lock(&adev->grbm_idx_mutex); + /* + * making sure that the following register writes will be broadcasted + * to all the shaders + */ + gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff); + + WREG32(mmPA_SC_FIFO_SIZE, + (adev->gfx.config.sc_prim_fifo_size_frontend << + PA_SC_FIFO_SIZE__SC_FRONTEND_PRIM_FIFO_SIZE__SHIFT) | + (adev->gfx.config.sc_prim_fifo_size_backend << + PA_SC_FIFO_SIZE__SC_BACKEND_PRIM_FIFO_SIZE__SHIFT) | + (adev->gfx.config.sc_hiz_tile_fifo_size << + PA_SC_FIFO_SIZE__SC_HIZ_TILE_FIFO_SIZE__SHIFT) | + (adev->gfx.config.sc_earlyz_tile_fifo_size << + PA_SC_FIFO_SIZE__SC_EARLYZ_TILE_FIFO_SIZE__SHIFT)); + mutex_unlock(&adev->grbm_idx_mutex); + +} + +static void gfx_v8_0_wait_for_rlc_serdes(struct amdgpu_device *adev) +{ + u32 i, j, k; + u32 mask; + + mutex_lock(&adev->grbm_idx_mutex); + for (i = 0; i < adev->gfx.config.max_shader_engines; i++) { + for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) { + gfx_v8_0_select_se_sh(adev, i, j); + for (k = 0; k < adev->usec_timeout; k++) { + if (RREG32(mmRLC_SERDES_CU_MASTER_BUSY) == 0) + break; + udelay(1); + } + } + } + gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff); + mutex_unlock(&adev->grbm_idx_mutex); + + mask = RLC_SERDES_NONCU_MASTER_BUSY__SE_MASTER_BUSY_MASK | + RLC_SERDES_NONCU_MASTER_BUSY__GC_MASTER_BUSY_MASK | + RLC_SERDES_NONCU_MASTER_BUSY__TC0_MASTER_BUSY_MASK | + RLC_SERDES_NONCU_MASTER_BUSY__TC1_MASTER_BUSY_MASK; + for (k = 0; k < adev->usec_timeout; k++) { + if ((RREG32(mmRLC_SERDES_NONCU_MASTER_BUSY) & mask) == 0) + break; + udelay(1); + } +} + +static void gfx_v8_0_enable_gui_idle_interrupt(struct amdgpu_device *adev, + bool enable) +{ + u32 tmp = RREG32(mmCP_INT_CNTL_RING0); + + if (enable) { + tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_BUSY_INT_ENABLE, 1); + tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_EMPTY_INT_ENABLE, 1); + tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CMP_BUSY_INT_ENABLE, 1); + tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, GFX_IDLE_INT_ENABLE, 1); + } else { + tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_BUSY_INT_ENABLE, 0); + tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_EMPTY_INT_ENABLE, 0); + tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CMP_BUSY_INT_ENABLE, 0); + tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, GFX_IDLE_INT_ENABLE, 0); + } + WREG32(mmCP_INT_CNTL_RING0, tmp); +} + +void gfx_v8_0_rlc_stop(struct amdgpu_device *adev) +{ + u32 tmp = RREG32(mmRLC_CNTL); + + tmp = REG_SET_FIELD(tmp, RLC_CNTL, RLC_ENABLE_F32, 0); + WREG32(mmRLC_CNTL, tmp); + + gfx_v8_0_enable_gui_idle_interrupt(adev, false); + + gfx_v8_0_wait_for_rlc_serdes(adev); +} + +static void gfx_v8_0_rlc_reset(struct amdgpu_device *adev) +{ + u32 tmp = RREG32(mmGRBM_SOFT_RESET); + + tmp = REG_SET_FIELD(tmp, GRBM_SOFT_RESET, SOFT_RESET_RLC, 1); + WREG32(mmGRBM_SOFT_RESET, tmp); + udelay(50); + tmp = REG_SET_FIELD(tmp, GRBM_SOFT_RESET, SOFT_RESET_RLC, 0); + WREG32(mmGRBM_SOFT_RESET, tmp); + udelay(50); +} + +static void gfx_v8_0_rlc_start(struct amdgpu_device *adev) +{ + u32 tmp = RREG32(mmRLC_CNTL); + + tmp = REG_SET_FIELD(tmp, RLC_CNTL, RLC_ENABLE_F32, 1); + WREG32(mmRLC_CNTL, tmp); + + /* carrizo do enable cp interrupt after cp inited */ + if (adev->asic_type != CHIP_CARRIZO) + gfx_v8_0_enable_gui_idle_interrupt(adev, true); + + udelay(50); +} + +static int gfx_v8_0_rlc_load_microcode(struct amdgpu_device *adev) +{ + const struct rlc_firmware_header_v2_0 *hdr; + const __le32 *fw_data; + unsigned i, fw_size; + + if (!adev->gfx.rlc_fw) + return -EINVAL; + + hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data; + amdgpu_ucode_print_rlc_hdr(&hdr->header); + adev->gfx.rlc_fw_version = le32_to_cpu(hdr->header.ucode_version); + + fw_data = (const __le32 *)(adev->gfx.rlc_fw->data + + le32_to_cpu(hdr->header.ucode_array_offset_bytes)); + fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4; + + WREG32(mmRLC_GPM_UCODE_ADDR, 0); + for (i = 0; i < fw_size; i++) + WREG32(mmRLC_GPM_UCODE_DATA, le32_to_cpup(fw_data++)); + WREG32(mmRLC_GPM_UCODE_ADDR, adev->gfx.rlc_fw_version); + + return 0; +} + +static int gfx_v8_0_rlc_resume(struct amdgpu_device *adev) +{ + int r; + + gfx_v8_0_rlc_stop(adev); + + /* disable CG */ + WREG32(mmRLC_CGCG_CGLS_CTRL, 0); + + /* disable PG */ + WREG32(mmRLC_PG_CNTL, 0); + + gfx_v8_0_rlc_reset(adev); + + if (!adev->firmware.smu_load) { + /* legacy rlc firmware loading */ + r = gfx_v8_0_rlc_load_microcode(adev); + if (r) + return r; + } else { + r = adev->smu.smumgr_funcs->check_fw_load_finish(adev, + AMDGPU_UCODE_ID_RLC_G); + if (r) + return -EINVAL; + } + + gfx_v8_0_rlc_start(adev); + + return 0; +} + +static void gfx_v8_0_cp_gfx_enable(struct amdgpu_device *adev, bool enable) +{ + int i; + u32 tmp = RREG32(mmCP_ME_CNTL); + + if (enable) { + tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, ME_HALT, 0); + tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_HALT, 0); + tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, CE_HALT, 0); + } else { + tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, ME_HALT, 1); + tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_HALT, 1); + tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, CE_HALT, 1); + for (i = 0; i < adev->gfx.num_gfx_rings; i++) + adev->gfx.gfx_ring[i].ready = false; + } + WREG32(mmCP_ME_CNTL, tmp); + udelay(50); +} + +static int gfx_v8_0_cp_gfx_load_microcode(struct amdgpu_device *adev) +{ + const struct gfx_firmware_header_v1_0 *pfp_hdr; + const struct gfx_firmware_header_v1_0 *ce_hdr; + const struct gfx_firmware_header_v1_0 *me_hdr; + const __le32 *fw_data; + unsigned i, fw_size; + + if (!adev->gfx.me_fw || !adev->gfx.pfp_fw || !adev->gfx.ce_fw) + return -EINVAL; + + pfp_hdr = (const struct gfx_firmware_header_v1_0 *) + adev->gfx.pfp_fw->data; + ce_hdr = (const struct gfx_firmware_header_v1_0 *) + adev->gfx.ce_fw->data; + me_hdr = (const struct gfx_firmware_header_v1_0 *) + adev->gfx.me_fw->data; + + amdgpu_ucode_print_gfx_hdr(&pfp_hdr->header); + amdgpu_ucode_print_gfx_hdr(&ce_hdr->header); + amdgpu_ucode_print_gfx_hdr(&me_hdr->header); + adev->gfx.pfp_fw_version = le32_to_cpu(pfp_hdr->header.ucode_version); + adev->gfx.ce_fw_version = le32_to_cpu(ce_hdr->header.ucode_version); + adev->gfx.me_fw_version = le32_to_cpu(me_hdr->header.ucode_version); + + gfx_v8_0_cp_gfx_enable(adev, false); + + /* PFP */ + fw_data = (const __le32 *) + (adev->gfx.pfp_fw->data + + le32_to_cpu(pfp_hdr->header.ucode_array_offset_bytes)); + fw_size = le32_to_cpu(pfp_hdr->header.ucode_size_bytes) / 4; + WREG32(mmCP_PFP_UCODE_ADDR, 0); + for (i = 0; i < fw_size; i++) + WREG32(mmCP_PFP_UCODE_DATA, le32_to_cpup(fw_data++)); + WREG32(mmCP_PFP_UCODE_ADDR, adev->gfx.pfp_fw_version); + + /* CE */ + fw_data = (const __le32 *) + (adev->gfx.ce_fw->data + + le32_to_cpu(ce_hdr->header.ucode_array_offset_bytes)); + fw_size = le32_to_cpu(ce_hdr->header.ucode_size_bytes) / 4; + WREG32(mmCP_CE_UCODE_ADDR, 0); + for (i = 0; i < fw_size; i++) + WREG32(mmCP_CE_UCODE_DATA, le32_to_cpup(fw_data++)); + WREG32(mmCP_CE_UCODE_ADDR, adev->gfx.ce_fw_version); + + /* ME */ + fw_data = (const __le32 *) + (adev->gfx.me_fw->data + + le32_to_cpu(me_hdr->header.ucode_array_offset_bytes)); + fw_size = le32_to_cpu(me_hdr->header.ucode_size_bytes) / 4; + WREG32(mmCP_ME_RAM_WADDR, 0); + for (i = 0; i < fw_size; i++) + WREG32(mmCP_ME_RAM_DATA, le32_to_cpup(fw_data++)); + WREG32(mmCP_ME_RAM_WADDR, adev->gfx.me_fw_version); + + return 0; +} + +static u32 gfx_v8_0_get_csb_size(struct amdgpu_device *adev) +{ + u32 count = 0; + const struct cs_section_def *sect = NULL; + const struct cs_extent_def *ext = NULL; + + /* begin clear state */ + count += 2; + /* context control state */ + count += 3; + + for (sect = vi_cs_data; sect->section != NULL; ++sect) { + for (ext = sect->section; ext->extent != NULL; ++ext) { + if (sect->id == SECT_CONTEXT) + count += 2 + ext->reg_count; + else + return 0; + } + } + /* pa_sc_raster_config/pa_sc_raster_config1 */ + count += 4; + /* end clear state */ + count += 2; + /* clear state */ + count += 2; + + return count; +} + +static int gfx_v8_0_cp_gfx_start(struct amdgpu_device *adev) +{ + struct amdgpu_ring *ring = &adev->gfx.gfx_ring[0]; + const struct cs_section_def *sect = NULL; + const struct cs_extent_def *ext = NULL; + int r, i; + + /* init the CP */ + WREG32(mmCP_MAX_CONTEXT, adev->gfx.config.max_hw_contexts - 1); + WREG32(mmCP_ENDIAN_SWAP, 0); + WREG32(mmCP_DEVICE_ID, 1); + + gfx_v8_0_cp_gfx_enable(adev, true); + + r = amdgpu_ring_lock(ring, gfx_v8_0_get_csb_size(adev) + 4); + if (r) { + DRM_ERROR("amdgpu: cp failed to lock ring (%d).\n", r); + return r; + } + + /* clear state buffer */ + amdgpu_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0)); + amdgpu_ring_write(ring, PACKET3_PREAMBLE_BEGIN_CLEAR_STATE); + + amdgpu_ring_write(ring, PACKET3(PACKET3_CONTEXT_CONTROL, 1)); + amdgpu_ring_write(ring, 0x80000000); + amdgpu_ring_write(ring, 0x80000000); + + for (sect = vi_cs_data; sect->section != NULL; ++sect) { + for (ext = sect->section; ext->extent != NULL; ++ext) { + if (sect->id == SECT_CONTEXT) { + amdgpu_ring_write(ring, + PACKET3(PACKET3_SET_CONTEXT_REG, + ext->reg_count)); + amdgpu_ring_write(ring, + ext->reg_index - PACKET3_SET_CONTEXT_REG_START); + for (i = 0; i < ext->reg_count; i++) + amdgpu_ring_write(ring, ext->extent[i]); + } + } + } + + amdgpu_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 2)); + amdgpu_ring_write(ring, mmPA_SC_RASTER_CONFIG - PACKET3_SET_CONTEXT_REG_START); + switch (adev->asic_type) { + case CHIP_TONGA: + amdgpu_ring_write(ring, 0x16000012); + amdgpu_ring_write(ring, 0x0000002A); + break; + case CHIP_TOPAZ: + case CHIP_CARRIZO: + amdgpu_ring_write(ring, 0x00000002); + amdgpu_ring_write(ring, 0x00000000); + break; + default: + BUG(); + } + + amdgpu_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0)); + amdgpu_ring_write(ring, PACKET3_PREAMBLE_END_CLEAR_STATE); + + amdgpu_ring_write(ring, PACKET3(PACKET3_CLEAR_STATE, 0)); + amdgpu_ring_write(ring, 0); + + /* init the CE partitions */ + amdgpu_ring_write(ring, PACKET3(PACKET3_SET_BASE, 2)); + amdgpu_ring_write(ring, PACKET3_BASE_INDEX(CE_PARTITION_BASE)); + amdgpu_ring_write(ring, 0x8000); + amdgpu_ring_write(ring, 0x8000); + + amdgpu_ring_unlock_commit(ring); + + return 0; +} + +static int gfx_v8_0_cp_gfx_resume(struct amdgpu_device *adev) +{ + struct amdgpu_ring *ring; + u32 tmp; + u32 rb_bufsz; + u64 rb_addr, rptr_addr; + int r; + + /* Set the write pointer delay */ + WREG32(mmCP_RB_WPTR_DELAY, 0); + + /* set the RB to use vmid 0 */ + WREG32(mmCP_RB_VMID, 0); + + /* Set ring buffer size */ + ring = &adev->gfx.gfx_ring[0]; + rb_bufsz = order_base_2(ring->ring_size / 8); + tmp = REG_SET_FIELD(0, CP_RB0_CNTL, RB_BUFSZ, rb_bufsz); + tmp = REG_SET_FIELD(tmp, CP_RB0_CNTL, RB_BLKSZ, rb_bufsz - 2); + tmp = REG_SET_FIELD(tmp, CP_RB0_CNTL, MTYPE, 3); + tmp = REG_SET_FIELD(tmp, CP_RB0_CNTL, MIN_IB_AVAILSZ, 1); +#ifdef __BIG_ENDIAN + tmp = REG_SET_FIELD(tmp, CP_RB0_CNTL, BUF_SWAP, 1); +#endif + WREG32(mmCP_RB0_CNTL, tmp); + + /* Initialize the ring buffer's read and write pointers */ + WREG32(mmCP_RB0_CNTL, tmp | CP_RB0_CNTL__RB_RPTR_WR_ENA_MASK); + ring->wptr = 0; + WREG32(mmCP_RB0_WPTR, ring->wptr); + + /* set the wb address wether it's enabled or not */ + rptr_addr = adev->wb.gpu_addr + (ring->rptr_offs * 4); + WREG32(mmCP_RB0_RPTR_ADDR, lower_32_bits(rptr_addr)); + WREG32(mmCP_RB0_RPTR_ADDR_HI, upper_32_bits(rptr_addr) & 0xFF); + + mdelay(1); + WREG32(mmCP_RB0_CNTL, tmp); + + rb_addr = ring->gpu_addr >> 8; + WREG32(mmCP_RB0_BASE, rb_addr); + WREG32(mmCP_RB0_BASE_HI, upper_32_bits(rb_addr)); + + /* no gfx doorbells on iceland */ + if (adev->asic_type != CHIP_TOPAZ) { + tmp = RREG32(mmCP_RB_DOORBELL_CONTROL); + if (ring->use_doorbell) { + tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL, + DOORBELL_OFFSET, ring->doorbell_index); + tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL, + DOORBELL_EN, 1); + } else { + tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL, + DOORBELL_EN, 0); + } + WREG32(mmCP_RB_DOORBELL_CONTROL, tmp); + + if (adev->asic_type == CHIP_TONGA) { + tmp = REG_SET_FIELD(0, CP_RB_DOORBELL_RANGE_LOWER, + DOORBELL_RANGE_LOWER, + AMDGPU_DOORBELL_GFX_RING0); + WREG32(mmCP_RB_DOORBELL_RANGE_LOWER, tmp); + + WREG32(mmCP_RB_DOORBELL_RANGE_UPPER, + CP_RB_DOORBELL_RANGE_UPPER__DOORBELL_RANGE_UPPER_MASK); + } + + } + + /* start the ring */ + gfx_v8_0_cp_gfx_start(adev); + ring->ready = true; + r = amdgpu_ring_test_ring(ring); + if (r) { + ring->ready = false; + return r; + } + + return 0; +} + +static void gfx_v8_0_cp_compute_enable(struct amdgpu_device *adev, bool enable) +{ + int i; + + if (enable) { + WREG32(mmCP_MEC_CNTL, 0); + } else { + WREG32(mmCP_MEC_CNTL, (CP_MEC_CNTL__MEC_ME1_HALT_MASK | CP_MEC_CNTL__MEC_ME2_HALT_MASK)); + for (i = 0; i < adev->gfx.num_compute_rings; i++) + adev->gfx.compute_ring[i].ready = false; + } + udelay(50); +} + +static int gfx_v8_0_cp_compute_start(struct amdgpu_device *adev) +{ + gfx_v8_0_cp_compute_enable(adev, true); + + return 0; +} + +static int gfx_v8_0_cp_compute_load_microcode(struct amdgpu_device *adev) +{ + const struct gfx_firmware_header_v1_0 *mec_hdr; + const __le32 *fw_data; + unsigned i, fw_size; + + if (!adev->gfx.mec_fw) + return -EINVAL; + + gfx_v8_0_cp_compute_enable(adev, false); + + mec_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data; + amdgpu_ucode_print_gfx_hdr(&mec_hdr->header); + adev->gfx.mec_fw_version = le32_to_cpu(mec_hdr->header.ucode_version); + + fw_data = (const __le32 *) + (adev->gfx.mec_fw->data + + le32_to_cpu(mec_hdr->header.ucode_array_offset_bytes)); + fw_size = le32_to_cpu(mec_hdr->header.ucode_size_bytes) / 4; + + /* MEC1 */ + WREG32(mmCP_MEC_ME1_UCODE_ADDR, 0); + for (i = 0; i < fw_size; i++) + WREG32(mmCP_MEC_ME1_UCODE_DATA, le32_to_cpup(fw_data+i)); + WREG32(mmCP_MEC_ME1_UCODE_ADDR, adev->gfx.mec_fw_version); + + /* Loading MEC2 firmware is only necessary if MEC2 should run different microcode than MEC1. */ + if (adev->gfx.mec2_fw) { + const struct gfx_firmware_header_v1_0 *mec2_hdr; + + mec2_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec2_fw->data; + amdgpu_ucode_print_gfx_hdr(&mec2_hdr->header); + adev->gfx.mec2_fw_version = le32_to_cpu(mec2_hdr->header.ucode_version); + + fw_data = (const __le32 *) + (adev->gfx.mec2_fw->data + + le32_to_cpu(mec2_hdr->header.ucode_array_offset_bytes)); + fw_size = le32_to_cpu(mec2_hdr->header.ucode_size_bytes) / 4; + + WREG32(mmCP_MEC_ME2_UCODE_ADDR, 0); + for (i = 0; i < fw_size; i++) + WREG32(mmCP_MEC_ME2_UCODE_DATA, le32_to_cpup(fw_data+i)); + WREG32(mmCP_MEC_ME2_UCODE_ADDR, adev->gfx.mec2_fw_version); + } + + return 0; +} + +struct vi_mqd { + uint32_t header; /* ordinal0 */ + uint32_t compute_dispatch_initiator; /* ordinal1 */ + uint32_t compute_dim_x; /* ordinal2 */ + uint32_t compute_dim_y; /* ordinal3 */ + uint32_t compute_dim_z; /* ordinal4 */ + uint32_t compute_start_x; /* ordinal5 */ + uint32_t compute_start_y; /* ordinal6 */ + uint32_t compute_start_z; /* ordinal7 */ + uint32_t compute_num_thread_x; /* ordinal8 */ + uint32_t compute_num_thread_y; /* ordinal9 */ + uint32_t compute_num_thread_z; /* ordinal10 */ + uint32_t compute_pipelinestat_enable; /* ordinal11 */ + uint32_t compute_perfcount_enable; /* ordinal12 */ + uint32_t compute_pgm_lo; /* ordinal13 */ + uint32_t compute_pgm_hi; /* ordinal14 */ + uint32_t compute_tba_lo; /* ordinal15 */ + uint32_t compute_tba_hi; /* ordinal16 */ + uint32_t compute_tma_lo; /* ordinal17 */ + uint32_t compute_tma_hi; /* ordinal18 */ + uint32_t compute_pgm_rsrc1; /* ordinal19 */ + uint32_t compute_pgm_rsrc2; /* ordinal20 */ + uint32_t compute_vmid; /* ordinal21 */ + uint32_t compute_resource_limits; /* ordinal22 */ + uint32_t compute_static_thread_mgmt_se0; /* ordinal23 */ + uint32_t compute_static_thread_mgmt_se1; /* ordinal24 */ + uint32_t compute_tmpring_size; /* ordinal25 */ + uint32_t compute_static_thread_mgmt_se2; /* ordinal26 */ + uint32_t compute_static_thread_mgmt_se3; /* ordinal27 */ + uint32_t compute_restart_x; /* ordinal28 */ + uint32_t compute_restart_y; /* ordinal29 */ + uint32_t compute_restart_z; /* ordinal30 */ + uint32_t compute_thread_trace_enable; /* ordinal31 */ + uint32_t compute_misc_reserved; /* ordinal32 */ + uint32_t compute_dispatch_id; /* ordinal33 */ + uint32_t compute_threadgroup_id; /* ordinal34 */ + uint32_t compute_relaunch; /* ordinal35 */ + uint32_t compute_wave_restore_addr_lo; /* ordinal36 */ + uint32_t compute_wave_restore_addr_hi; /* ordinal37 */ + uint32_t compute_wave_restore_control; /* ordinal38 */ + uint32_t reserved9; /* ordinal39 */ + uint32_t reserved10; /* ordinal40 */ + uint32_t reserved11; /* ordinal41 */ + uint32_t reserved12; /* ordinal42 */ + uint32_t reserved13; /* ordinal43 */ + uint32_t reserved14; /* ordinal44 */ + uint32_t reserved15; /* ordinal45 */ + uint32_t reserved16; /* ordinal46 */ + uint32_t reserved17; /* ordinal47 */ + uint32_t reserved18; /* ordinal48 */ + uint32_t reserved19; /* ordinal49 */ + uint32_t reserved20; /* ordinal50 */ + uint32_t reserved21; /* ordinal51 */ + uint32_t reserved22; /* ordinal52 */ + uint32_t reserved23; /* ordinal53 */ + uint32_t reserved24; /* ordinal54 */ + uint32_t reserved25; /* ordinal55 */ + uint32_t reserved26; /* ordinal56 */ + uint32_t reserved27; /* ordinal57 */ + uint32_t reserved28; /* ordinal58 */ + uint32_t reserved29; /* ordinal59 */ + uint32_t reserved30; /* ordinal60 */ + uint32_t reserved31; /* ordinal61 */ + uint32_t reserved32; /* ordinal62 */ + uint32_t reserved33; /* ordinal63 */ + uint32_t reserved34; /* ordinal64 */ + uint32_t compute_user_data_0; /* ordinal65 */ + uint32_t compute_user_data_1; /* ordinal66 */ + uint32_t compute_user_data_2; /* ordinal67 */ + uint32_t compute_user_data_3; /* ordinal68 */ + uint32_t compute_user_data_4; /* ordinal69 */ + uint32_t compute_user_data_5; /* ordinal70 */ + uint32_t compute_user_data_6; /* ordinal71 */ + uint32_t compute_user_data_7; /* ordinal72 */ + uint32_t compute_user_data_8; /* ordinal73 */ + uint32_t compute_user_data_9; /* ordinal74 */ + uint32_t compute_user_data_10; /* ordinal75 */ + uint32_t compute_user_data_11; /* ordinal76 */ + uint32_t compute_user_data_12; /* ordinal77 */ + uint32_t compute_user_data_13; /* ordinal78 */ + uint32_t compute_user_data_14; /* ordinal79 */ + uint32_t compute_user_data_15; /* ordinal80 */ + uint32_t cp_compute_csinvoc_count_lo; /* ordinal81 */ + uint32_t cp_compute_csinvoc_count_hi; /* ordinal82 */ + uint32_t reserved35; /* ordinal83 */ + uint32_t reserved36; /* ordinal84 */ + uint32_t reserved37; /* ordinal85 */ + uint32_t cp_mqd_query_time_lo; /* ordinal86 */ + uint32_t cp_mqd_query_time_hi; /* ordinal87 */ + uint32_t cp_mqd_connect_start_time_lo; /* ordinal88 */ + uint32_t cp_mqd_connect_start_time_hi; /* ordinal89 */ + uint32_t cp_mqd_connect_end_time_lo; /* ordinal90 */ + uint32_t cp_mqd_connect_end_time_hi; /* ordinal91 */ + uint32_t cp_mqd_connect_end_wf_count; /* ordinal92 */ + uint32_t cp_mqd_connect_end_pq_rptr; /* ordinal93 */ + uint32_t cp_mqd_connect_end_pq_wptr; /* ordinal94 */ + uint32_t cp_mqd_connect_end_ib_rptr; /* ordinal95 */ + uint32_t reserved38; /* ordinal96 */ + uint32_t reserved39; /* ordinal97 */ + uint32_t cp_mqd_save_start_time_lo; /* ordinal98 */ + uint32_t cp_mqd_save_start_time_hi; /* ordinal99 */ + uint32_t cp_mqd_save_end_time_lo; /* ordinal100 */ + uint32_t cp_mqd_save_end_time_hi; /* ordinal101 */ + uint32_t cp_mqd_restore_start_time_lo; /* ordinal102 */ + uint32_t cp_mqd_restore_start_time_hi; /* ordinal103 */ + uint32_t cp_mqd_restore_end_time_lo; /* ordinal104 */ + uint32_t cp_mqd_restore_end_time_hi; /* ordinal105 */ + uint32_t reserved40; /* ordinal106 */ + uint32_t reserved41; /* ordinal107 */ + uint32_t gds_cs_ctxsw_cnt0; /* ordinal108 */ + uint32_t gds_cs_ctxsw_cnt1; /* ordinal109 */ + uint32_t gds_cs_ctxsw_cnt2; /* ordinal110 */ + uint32_t gds_cs_ctxsw_cnt3; /* ordinal111 */ + uint32_t reserved42; /* ordinal112 */ + uint32_t reserved43; /* ordinal113 */ + uint32_t cp_pq_exe_status_lo; /* ordinal114 */ + uint32_t cp_pq_exe_status_hi; /* ordinal115 */ + uint32_t cp_packet_id_lo; /* ordinal116 */ + uint32_t cp_packet_id_hi; /* ordinal117 */ + uint32_t cp_packet_exe_status_lo; /* ordinal118 */ + uint32_t cp_packet_exe_status_hi; /* ordinal119 */ + uint32_t gds_save_base_addr_lo; /* ordinal120 */ + uint32_t gds_save_base_addr_hi; /* ordinal121 */ + uint32_t gds_save_mask_lo; /* ordinal122 */ + uint32_t gds_save_mask_hi; /* ordinal123 */ + uint32_t ctx_save_base_addr_lo; /* ordinal124 */ + uint32_t ctx_save_base_addr_hi; /* ordinal125 */ + uint32_t reserved44; /* ordinal126 */ + uint32_t reserved45; /* ordinal127 */ + uint32_t cp_mqd_base_addr_lo; /* ordinal128 */ + uint32_t cp_mqd_base_addr_hi; /* ordinal129 */ + uint32_t cp_hqd_active; /* ordinal130 */ + uint32_t cp_hqd_vmid; /* ordinal131 */ + uint32_t cp_hqd_persistent_state; /* ordinal132 */ + uint32_t cp_hqd_pipe_priority; /* ordinal133 */ + uint32_t cp_hqd_queue_priority; /* ordinal134 */ + uint32_t cp_hqd_quantum; /* ordinal135 */ + uint32_t cp_hqd_pq_base_lo; /* ordinal136 */ + uint32_t cp_hqd_pq_base_hi; /* ordinal137 */ + uint32_t cp_hqd_pq_rptr; /* ordinal138 */ + uint32_t cp_hqd_pq_rptr_report_addr_lo; /* ordinal139 */ + uint32_t cp_hqd_pq_rptr_report_addr_hi; /* ordinal140 */ + uint32_t cp_hqd_pq_wptr_poll_addr; /* ordinal141 */ + uint32_t cp_hqd_pq_wptr_poll_addr_hi; /* ordinal142 */ + uint32_t cp_hqd_pq_doorbell_control; /* ordinal143 */ + uint32_t cp_hqd_pq_wptr; /* ordinal144 */ + uint32_t cp_hqd_pq_control; /* ordinal145 */ + uint32_t cp_hqd_ib_base_addr_lo; /* ordinal146 */ + uint32_t cp_hqd_ib_base_addr_hi; /* ordinal147 */ + uint32_t cp_hqd_ib_rptr; /* ordinal148 */ + uint32_t cp_hqd_ib_control; /* ordinal149 */ + uint32_t cp_hqd_iq_timer; /* ordinal150 */ + uint32_t cp_hqd_iq_rptr; /* ordinal151 */ + uint32_t cp_hqd_dequeue_request; /* ordinal152 */ + uint32_t cp_hqd_dma_offload; /* ordinal153 */ + uint32_t cp_hqd_sema_cmd; /* ordinal154 */ + uint32_t cp_hqd_msg_type; /* ordinal155 */ + uint32_t cp_hqd_atomic0_preop_lo; /* ordinal156 */ + uint32_t cp_hqd_atomic0_preop_hi; /* ordinal157 */ + uint32_t cp_hqd_atomic1_preop_lo; /* ordinal158 */ + uint32_t cp_hqd_atomic1_preop_hi; /* ordinal159 */ + uint32_t cp_hqd_hq_status0; /* ordinal160 */ + uint32_t cp_hqd_hq_control0; /* ordinal161 */ + uint32_t cp_mqd_control; /* ordinal162 */ + uint32_t cp_hqd_hq_status1; /* ordinal163 */ + uint32_t cp_hqd_hq_control1; /* ordinal164 */ + uint32_t cp_hqd_eop_base_addr_lo; /* ordinal165 */ + uint32_t cp_hqd_eop_base_addr_hi; /* ordinal166 */ + uint32_t cp_hqd_eop_control; /* ordinal167 */ + uint32_t cp_hqd_eop_rptr; /* ordinal168 */ + uint32_t cp_hqd_eop_wptr; /* ordinal169 */ + uint32_t cp_hqd_eop_done_events; /* ordinal170 */ + uint32_t cp_hqd_ctx_save_base_addr_lo; /* ordinal171 */ + uint32_t cp_hqd_ctx_save_base_addr_hi; /* ordinal172 */ + uint32_t cp_hqd_ctx_save_control; /* ordinal173 */ + uint32_t cp_hqd_cntl_stack_offset; /* ordinal174 */ + uint32_t cp_hqd_cntl_stack_size; /* ordinal175 */ + uint32_t cp_hqd_wg_state_offset; /* ordinal176 */ + uint32_t cp_hqd_ctx_save_size; /* ordinal177 */ + uint32_t cp_hqd_gds_resource_state; /* ordinal178 */ + uint32_t cp_hqd_error; /* ordinal179 */ + uint32_t cp_hqd_eop_wptr_mem; /* ordinal180 */ + uint32_t cp_hqd_eop_dones; /* ordinal181 */ + uint32_t reserved46; /* ordinal182 */ + uint32_t reserved47; /* ordinal183 */ + uint32_t reserved48; /* ordinal184 */ + uint32_t reserved49; /* ordinal185 */ + uint32_t reserved50; /* ordinal186 */ + uint32_t reserved51; /* ordinal187 */ + uint32_t reserved52; /* ordinal188 */ + uint32_t reserved53; /* ordinal189 */ + uint32_t reserved54; /* ordinal190 */ + uint32_t reserved55; /* ordinal191 */ + uint32_t iqtimer_pkt_header; /* ordinal192 */ + uint32_t iqtimer_pkt_dw0; /* ordinal193 */ + uint32_t iqtimer_pkt_dw1; /* ordinal194 */ + uint32_t iqtimer_pkt_dw2; /* ordinal195 */ + uint32_t iqtimer_pkt_dw3; /* ordinal196 */ + uint32_t iqtimer_pkt_dw4; /* ordinal197 */ + uint32_t iqtimer_pkt_dw5; /* ordinal198 */ + uint32_t iqtimer_pkt_dw6; /* ordinal199 */ + uint32_t iqtimer_pkt_dw7; /* ordinal200 */ + uint32_t iqtimer_pkt_dw8; /* ordinal201 */ + uint32_t iqtimer_pkt_dw9; /* ordinal202 */ + uint32_t iqtimer_pkt_dw10; /* ordinal203 */ + uint32_t iqtimer_pkt_dw11; /* ordinal204 */ + uint32_t iqtimer_pkt_dw12; /* ordinal205 */ + uint32_t iqtimer_pkt_dw13; /* ordinal206 */ + uint32_t iqtimer_pkt_dw14; /* ordinal207 */ + uint32_t iqtimer_pkt_dw15; /* ordinal208 */ + uint32_t iqtimer_pkt_dw16; /* ordinal209 */ + uint32_t iqtimer_pkt_dw17; /* ordinal210 */ + uint32_t iqtimer_pkt_dw18; /* ordinal211 */ + uint32_t iqtimer_pkt_dw19; /* ordinal212 */ + uint32_t iqtimer_pkt_dw20; /* ordinal213 */ + uint32_t iqtimer_pkt_dw21; /* ordinal214 */ + uint32_t iqtimer_pkt_dw22; /* ordinal215 */ + uint32_t iqtimer_pkt_dw23; /* ordinal216 */ + uint32_t iqtimer_pkt_dw24; /* ordinal217 */ + uint32_t iqtimer_pkt_dw25; /* ordinal218 */ + uint32_t iqtimer_pkt_dw26; /* ordinal219 */ + uint32_t iqtimer_pkt_dw27; /* ordinal220 */ + uint32_t iqtimer_pkt_dw28; /* ordinal221 */ + uint32_t iqtimer_pkt_dw29; /* ordinal222 */ + uint32_t iqtimer_pkt_dw30; /* ordinal223 */ + uint32_t iqtimer_pkt_dw31; /* ordinal224 */ + uint32_t reserved56; /* ordinal225 */ + uint32_t reserved57; /* ordinal226 */ + uint32_t reserved58; /* ordinal227 */ + uint32_t set_resources_header; /* ordinal228 */ + uint32_t set_resources_dw1; /* ordinal229 */ + uint32_t set_resources_dw2; /* ordinal230 */ + uint32_t set_resources_dw3; /* ordinal231 */ + uint32_t set_resources_dw4; /* ordinal232 */ + uint32_t set_resources_dw5; /* ordinal233 */ + uint32_t set_resources_dw6; /* ordinal234 */ + uint32_t set_resources_dw7; /* ordinal235 */ + uint32_t reserved59; /* ordinal236 */ + uint32_t reserved60; /* ordinal237 */ + uint32_t reserved61; /* ordinal238 */ + uint32_t reserved62; /* ordinal239 */ + uint32_t reserved63; /* ordinal240 */ + uint32_t reserved64; /* ordinal241 */ + uint32_t reserved65; /* ordinal242 */ + uint32_t reserved66; /* ordinal243 */ + uint32_t reserved67; /* ordinal244 */ + uint32_t reserved68; /* ordinal245 */ + uint32_t reserved69; /* ordinal246 */ + uint32_t reserved70; /* ordinal247 */ + uint32_t reserved71; /* ordinal248 */ + uint32_t reserved72; /* ordinal249 */ + uint32_t reserved73; /* ordinal250 */ + uint32_t reserved74; /* ordinal251 */ + uint32_t reserved75; /* ordinal252 */ + uint32_t reserved76; /* ordinal253 */ + uint32_t reserved77; /* ordinal254 */ + uint32_t reserved78; /* ordinal255 */ + + uint32_t reserved_t[256]; /* Reserve 256 dword buffer used by ucode */ +}; + +static void gfx_v8_0_cp_compute_fini(struct amdgpu_device *adev) +{ + int i, r; + + for (i = 0; i < adev->gfx.num_compute_rings; i++) { + struct amdgpu_ring *ring = &adev->gfx.compute_ring[i]; + + if (ring->mqd_obj) { + r = amdgpu_bo_reserve(ring->mqd_obj, false); + if (unlikely(r != 0)) + dev_warn(adev->dev, "(%d) reserve MQD bo failed\n", r); + + amdgpu_bo_unpin(ring->mqd_obj); + amdgpu_bo_unreserve(ring->mqd_obj); + + amdgpu_bo_unref(&ring->mqd_obj); + ring->mqd_obj = NULL; + } + } +} + +static int gfx_v8_0_cp_compute_resume(struct amdgpu_device *adev) +{ + int r, i, j; + u32 tmp; + bool use_doorbell = true; + u64 hqd_gpu_addr; + u64 mqd_gpu_addr; + u64 eop_gpu_addr; + u64 wb_gpu_addr; + u32 *buf; + struct vi_mqd *mqd; + + /* init the pipes */ + mutex_lock(&adev->srbm_mutex); + for (i = 0; i < (adev->gfx.mec.num_pipe * adev->gfx.mec.num_mec); i++) { + int me = (i < 4) ? 1 : 2; + int pipe = (i < 4) ? i : (i - 4); + + eop_gpu_addr = adev->gfx.mec.hpd_eop_gpu_addr + (i * MEC_HPD_SIZE); + eop_gpu_addr >>= 8; + + vi_srbm_select(adev, me, pipe, 0, 0); + + /* write the EOP addr */ + WREG32(mmCP_HQD_EOP_BASE_ADDR, eop_gpu_addr); + WREG32(mmCP_HQD_EOP_BASE_ADDR_HI, upper_32_bits(eop_gpu_addr)); + + /* set the VMID assigned */ + WREG32(mmCP_HQD_VMID, 0); + + /* set the EOP size, register value is 2^(EOP_SIZE+1) dwords */ + tmp = RREG32(mmCP_HQD_EOP_CONTROL); + tmp = REG_SET_FIELD(tmp, CP_HQD_EOP_CONTROL, EOP_SIZE, + (order_base_2(MEC_HPD_SIZE / 4) - 1)); + WREG32(mmCP_HQD_EOP_CONTROL, tmp); + } + vi_srbm_select(adev, 0, 0, 0, 0); + mutex_unlock(&adev->srbm_mutex); + + /* init the queues. Just two for now. */ + for (i = 0; i < adev->gfx.num_compute_rings; i++) { + struct amdgpu_ring *ring = &adev->gfx.compute_ring[i]; + + if (ring->mqd_obj == NULL) { + r = amdgpu_bo_create(adev, + sizeof(struct vi_mqd), + PAGE_SIZE, true, + AMDGPU_GEM_DOMAIN_GTT, 0, NULL, + &ring->mqd_obj); + if (r) { + dev_warn(adev->dev, "(%d) create MQD bo failed\n", r); + return r; + } + } + + r = amdgpu_bo_reserve(ring->mqd_obj, false); + if (unlikely(r != 0)) { + gfx_v8_0_cp_compute_fini(adev); + return r; + } + r = amdgpu_bo_pin(ring->mqd_obj, AMDGPU_GEM_DOMAIN_GTT, + &mqd_gpu_addr); + if (r) { + dev_warn(adev->dev, "(%d) pin MQD bo failed\n", r); + gfx_v8_0_cp_compute_fini(adev); + return r; + } + r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&buf); + if (r) { + dev_warn(adev->dev, "(%d) map MQD bo failed\n", r); + gfx_v8_0_cp_compute_fini(adev); + return r; + } + + /* init the mqd struct */ + memset(buf, 0, sizeof(struct vi_mqd)); + + mqd = (struct vi_mqd *)buf; + mqd->header = 0xC0310800; + mqd->compute_pipelinestat_enable = 0x00000001; + mqd->compute_static_thread_mgmt_se0 = 0xffffffff; + mqd->compute_static_thread_mgmt_se1 = 0xffffffff; + mqd->compute_static_thread_mgmt_se2 = 0xffffffff; + mqd->compute_static_thread_mgmt_se3 = 0xffffffff; + mqd->compute_misc_reserved = 0x00000003; + + mutex_lock(&adev->srbm_mutex); + vi_srbm_select(adev, ring->me, + ring->pipe, + ring->queue, 0); + + /* disable wptr polling */ + tmp = RREG32(mmCP_PQ_WPTR_POLL_CNTL); + tmp = REG_SET_FIELD(tmp, CP_PQ_WPTR_POLL_CNTL, EN, 0); + WREG32(mmCP_PQ_WPTR_POLL_CNTL, tmp); + + mqd->cp_hqd_eop_base_addr_lo = + RREG32(mmCP_HQD_EOP_BASE_ADDR); + mqd->cp_hqd_eop_base_addr_hi = + RREG32(mmCP_HQD_EOP_BASE_ADDR_HI); + + /* enable doorbell? */ + tmp = RREG32(mmCP_HQD_PQ_DOORBELL_CONTROL); + if (use_doorbell) { + tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL, DOORBELL_EN, 1); + } else { + tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL, DOORBELL_EN, 0); + } + WREG32(mmCP_HQD_PQ_DOORBELL_CONTROL, tmp); + mqd->cp_hqd_pq_doorbell_control = tmp; + + /* disable the queue if it's active */ + mqd->cp_hqd_dequeue_request = 0; + mqd->cp_hqd_pq_rptr = 0; + mqd->cp_hqd_pq_wptr= 0; + if (RREG32(mmCP_HQD_ACTIVE) & 1) { + WREG32(mmCP_HQD_DEQUEUE_REQUEST, 1); + for (j = 0; j < adev->usec_timeout; j++) { + if (!(RREG32(mmCP_HQD_ACTIVE) & 1)) + break; + udelay(1); + } + WREG32(mmCP_HQD_DEQUEUE_REQUEST, mqd->cp_hqd_dequeue_request); + WREG32(mmCP_HQD_PQ_RPTR, mqd->cp_hqd_pq_rptr); + WREG32(mmCP_HQD_PQ_WPTR, mqd->cp_hqd_pq_wptr); + } + + /* set the pointer to the MQD */ + mqd->cp_mqd_base_addr_lo = mqd_gpu_addr & 0xfffffffc; + mqd->cp_mqd_base_addr_hi = upper_32_bits(mqd_gpu_addr); + WREG32(mmCP_MQD_BASE_ADDR, mqd->cp_mqd_base_addr_lo); + WREG32(mmCP_MQD_BASE_ADDR_HI, mqd->cp_mqd_base_addr_hi); + + /* set MQD vmid to 0 */ + tmp = RREG32(mmCP_MQD_CONTROL); + tmp = REG_SET_FIELD(tmp, CP_MQD_CONTROL, VMID, 0); + WREG32(mmCP_MQD_CONTROL, tmp); + mqd->cp_mqd_control = tmp; + + /* set the pointer to the HQD, this is similar CP_RB0_BASE/_HI */ + hqd_gpu_addr = ring->gpu_addr >> 8; + mqd->cp_hqd_pq_base_lo = hqd_gpu_addr; + mqd->cp_hqd_pq_base_hi = upper_32_bits(hqd_gpu_addr); + WREG32(mmCP_HQD_PQ_BASE, mqd->cp_hqd_pq_base_lo); + WREG32(mmCP_HQD_PQ_BASE_HI, mqd->cp_hqd_pq_base_hi); + + /* set up the HQD, this is similar to CP_RB0_CNTL */ + tmp = RREG32(mmCP_HQD_PQ_CONTROL); + tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, QUEUE_SIZE, + (order_base_2(ring->ring_size / 4) - 1)); + tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, RPTR_BLOCK_SIZE, + ((order_base_2(AMDGPU_GPU_PAGE_SIZE / 4) - 1) << 8)); +#ifdef __BIG_ENDIAN + tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, ENDIAN_SWAP, 1); +#endif + tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, UNORD_DISPATCH, 0); + tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, ROQ_PQ_IB_FLIP, 0); + tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, PRIV_STATE, 1); + tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, KMD_QUEUE, 1); + WREG32(mmCP_HQD_PQ_CONTROL, tmp); + mqd->cp_hqd_pq_control = tmp; + + /* set the wb address wether it's enabled or not */ + wb_gpu_addr = adev->wb.gpu_addr + (ring->rptr_offs * 4); + mqd->cp_hqd_pq_rptr_report_addr_lo = wb_gpu_addr & 0xfffffffc; + mqd->cp_hqd_pq_rptr_report_addr_hi = + upper_32_bits(wb_gpu_addr) & 0xffff; + WREG32(mmCP_HQD_PQ_RPTR_REPORT_ADDR, + mqd->cp_hqd_pq_rptr_report_addr_lo); + WREG32(mmCP_HQD_PQ_RPTR_REPORT_ADDR_HI, + mqd->cp_hqd_pq_rptr_report_addr_hi); + + /* only used if CP_PQ_WPTR_POLL_CNTL.CP_PQ_WPTR_POLL_CNTL__EN_MASK=1 */ + wb_gpu_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4); + mqd->cp_hqd_pq_wptr_poll_addr = wb_gpu_addr & 0xfffffffc; + mqd->cp_hqd_pq_wptr_poll_addr_hi = upper_32_bits(wb_gpu_addr) & 0xffff; + WREG32(mmCP_HQD_PQ_WPTR_POLL_ADDR, mqd->cp_hqd_pq_wptr_poll_addr); + WREG32(mmCP_HQD_PQ_WPTR_POLL_ADDR_HI, + mqd->cp_hqd_pq_wptr_poll_addr_hi); + + /* enable the doorbell if requested */ + if (use_doorbell) { + if (adev->asic_type == CHIP_CARRIZO) { + WREG32(mmCP_MEC_DOORBELL_RANGE_LOWER, + AMDGPU_DOORBELL_KIQ << 2); + WREG32(mmCP_MEC_DOORBELL_RANGE_UPPER, + AMDGPU_DOORBELL_MEC_RING7 << 2); + } + tmp = RREG32(mmCP_HQD_PQ_DOORBELL_CONTROL); + tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL, + DOORBELL_OFFSET, ring->doorbell_index); + tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL, DOORBELL_EN, 1); + tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL, DOORBELL_SOURCE, 0); + tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL, DOORBELL_HIT, 0); + mqd->cp_hqd_pq_doorbell_control = tmp; + + } else { + mqd->cp_hqd_pq_doorbell_control = 0; + } + WREG32(mmCP_HQD_PQ_DOORBELL_CONTROL, + mqd->cp_hqd_pq_doorbell_control); + + /* set the vmid for the queue */ + mqd->cp_hqd_vmid = 0; + WREG32(mmCP_HQD_VMID, mqd->cp_hqd_vmid); + + tmp = RREG32(mmCP_HQD_PERSISTENT_STATE); + tmp = REG_SET_FIELD(tmp, CP_HQD_PERSISTENT_STATE, PRELOAD_SIZE, 0x53); + WREG32(mmCP_HQD_PERSISTENT_STATE, tmp); + mqd->cp_hqd_persistent_state = tmp; + + /* activate the queue */ + mqd->cp_hqd_active = 1; + WREG32(mmCP_HQD_ACTIVE, mqd->cp_hqd_active); + + vi_srbm_select(adev, 0, 0, 0, 0); + mutex_unlock(&adev->srbm_mutex); + + amdgpu_bo_kunmap(ring->mqd_obj); + amdgpu_bo_unreserve(ring->mqd_obj); + } + + if (use_doorbell) { + tmp = RREG32(mmCP_PQ_STATUS); + tmp = REG_SET_FIELD(tmp, CP_PQ_STATUS, DOORBELL_ENABLE, 1); + WREG32(mmCP_PQ_STATUS, tmp); + } + + r = gfx_v8_0_cp_compute_start(adev); + if (r) + return r; + + for (i = 0; i < adev->gfx.num_compute_rings; i++) { + struct amdgpu_ring *ring = &adev->gfx.compute_ring[i]; + + ring->ready = true; + r = amdgpu_ring_test_ring(ring); + if (r) + ring->ready = false; + } + + return 0; +} + +static int gfx_v8_0_cp_resume(struct amdgpu_device *adev) +{ + int r; + + if (adev->asic_type != CHIP_CARRIZO) + gfx_v8_0_enable_gui_idle_interrupt(adev, false); + + if (!adev->firmware.smu_load) { + /* legacy firmware loading */ + r = gfx_v8_0_cp_gfx_load_microcode(adev); + if (r) + return r; + + r = gfx_v8_0_cp_compute_load_microcode(adev); + if (r) + return r; + } else { + r = adev->smu.smumgr_funcs->check_fw_load_finish(adev, + AMDGPU_UCODE_ID_CP_CE); + if (r) + return -EINVAL; + + r = adev->smu.smumgr_funcs->check_fw_load_finish(adev, + AMDGPU_UCODE_ID_CP_PFP); + if (r) + return -EINVAL; + + r = adev->smu.smumgr_funcs->check_fw_load_finish(adev, + AMDGPU_UCODE_ID_CP_ME); + if (r) + return -EINVAL; + + r = adev->smu.smumgr_funcs->check_fw_load_finish(adev, + AMDGPU_UCODE_ID_CP_MEC1); + if (r) + return -EINVAL; + } + + r = gfx_v8_0_cp_gfx_resume(adev); + if (r) + return r; + + r = gfx_v8_0_cp_compute_resume(adev); + if (r) + return r; + + gfx_v8_0_enable_gui_idle_interrupt(adev, true); + + return 0; +} + +static void gfx_v8_0_cp_enable(struct amdgpu_device *adev, bool enable) +{ + gfx_v8_0_cp_gfx_enable(adev, enable); + gfx_v8_0_cp_compute_enable(adev, enable); +} + +static int gfx_v8_0_hw_init(struct amdgpu_device *adev) +{ + int r; + + gfx_v8_0_init_golden_registers(adev); + + gfx_v8_0_gpu_init(adev); + + r = gfx_v8_0_rlc_resume(adev); + if (r) + return r; + + r = gfx_v8_0_cp_resume(adev); + if (r) + return r; + + return r; +} + +static int gfx_v8_0_hw_fini(struct amdgpu_device *adev) +{ + gfx_v8_0_cp_enable(adev, false); + gfx_v8_0_rlc_stop(adev); + gfx_v8_0_cp_compute_fini(adev); + + return 0; +} + +static int gfx_v8_0_suspend(struct amdgpu_device *adev) +{ + return gfx_v8_0_hw_fini(adev); +} + +static int gfx_v8_0_resume(struct amdgpu_device *adev) +{ + return gfx_v8_0_hw_init(adev); +} + +static bool gfx_v8_0_is_idle(struct amdgpu_device *adev) +{ + if (REG_GET_FIELD(RREG32(mmGRBM_STATUS), GRBM_STATUS, GUI_ACTIVE)) + return false; + else + return true; +} + +static int gfx_v8_0_wait_for_idle(struct amdgpu_device *adev) +{ + unsigned i; + u32 tmp; + + for (i = 0; i < adev->usec_timeout; i++) { + /* read MC_STATUS */ + tmp = RREG32(mmGRBM_STATUS) & GRBM_STATUS__GUI_ACTIVE_MASK; + + if (!REG_GET_FIELD(tmp, GRBM_STATUS, GUI_ACTIVE)) + return 0; + udelay(1); + } + return -ETIMEDOUT; +} + +static void gfx_v8_0_print_status(struct amdgpu_device *adev) +{ + int i; + + dev_info(adev->dev, "GFX 8.x registers\n"); + dev_info(adev->dev, " GRBM_STATUS=0x%08X\n", + RREG32(mmGRBM_STATUS)); + dev_info(adev->dev, " GRBM_STATUS2=0x%08X\n", + RREG32(mmGRBM_STATUS2)); + dev_info(adev->dev, " GRBM_STATUS_SE0=0x%08X\n", + RREG32(mmGRBM_STATUS_SE0)); + dev_info(adev->dev, " GRBM_STATUS_SE1=0x%08X\n", + RREG32(mmGRBM_STATUS_SE1)); + dev_info(adev->dev, " GRBM_STATUS_SE2=0x%08X\n", + RREG32(mmGRBM_STATUS_SE2)); + dev_info(adev->dev, " GRBM_STATUS_SE3=0x%08X\n", + RREG32(mmGRBM_STATUS_SE3)); + dev_info(adev->dev, " CP_STAT = 0x%08x\n", RREG32(mmCP_STAT)); + dev_info(adev->dev, " CP_STALLED_STAT1 = 0x%08x\n", + RREG32(mmCP_STALLED_STAT1)); + dev_info(adev->dev, " CP_STALLED_STAT2 = 0x%08x\n", + RREG32(mmCP_STALLED_STAT2)); + dev_info(adev->dev, " CP_STALLED_STAT3 = 0x%08x\n", + RREG32(mmCP_STALLED_STAT3)); + dev_info(adev->dev, " CP_CPF_BUSY_STAT = 0x%08x\n", + RREG32(mmCP_CPF_BUSY_STAT)); + dev_info(adev->dev, " CP_CPF_STALLED_STAT1 = 0x%08x\n", + RREG32(mmCP_CPF_STALLED_STAT1)); + dev_info(adev->dev, " CP_CPF_STATUS = 0x%08x\n", RREG32(mmCP_CPF_STATUS)); + dev_info(adev->dev, " CP_CPC_BUSY_STAT = 0x%08x\n", RREG32(mmCP_CPC_BUSY_STAT)); + dev_info(adev->dev, " CP_CPC_STALLED_STAT1 = 0x%08x\n", + RREG32(mmCP_CPC_STALLED_STAT1)); + dev_info(adev->dev, " CP_CPC_STATUS = 0x%08x\n", RREG32(mmCP_CPC_STATUS)); + + for (i = 0; i < 32; i++) { + dev_info(adev->dev, " GB_TILE_MODE%d=0x%08X\n", + i, RREG32(mmGB_TILE_MODE0 + (i * 4))); + } + for (i = 0; i < 16; i++) { + dev_info(adev->dev, " GB_MACROTILE_MODE%d=0x%08X\n", + i, RREG32(mmGB_MACROTILE_MODE0 + (i * 4))); + } + for (i = 0; i < adev->gfx.config.max_shader_engines; i++) { + dev_info(adev->dev, " se: %d\n", i); + gfx_v8_0_select_se_sh(adev, i, 0xffffffff); + dev_info(adev->dev, " PA_SC_RASTER_CONFIG=0x%08X\n", + RREG32(mmPA_SC_RASTER_CONFIG)); + dev_info(adev->dev, " PA_SC_RASTER_CONFIG_1=0x%08X\n", + RREG32(mmPA_SC_RASTER_CONFIG_1)); + } + gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff); + + dev_info(adev->dev, " GB_ADDR_CONFIG=0x%08X\n", + RREG32(mmGB_ADDR_CONFIG)); + dev_info(adev->dev, " HDP_ADDR_CONFIG=0x%08X\n", + RREG32(mmHDP_ADDR_CONFIG)); + dev_info(adev->dev, " DMIF_ADDR_CALC=0x%08X\n", + RREG32(mmDMIF_ADDR_CALC)); + dev_info(adev->dev, " SDMA0_TILING_CONFIG=0x%08X\n", + RREG32(mmSDMA0_TILING_CONFIG + SDMA0_REGISTER_OFFSET)); + dev_info(adev->dev, " SDMA1_TILING_CONFIG=0x%08X\n", + RREG32(mmSDMA0_TILING_CONFIG + SDMA1_REGISTER_OFFSET)); + dev_info(adev->dev, " UVD_UDEC_ADDR_CONFIG=0x%08X\n", + RREG32(mmUVD_UDEC_ADDR_CONFIG)); + dev_info(adev->dev, " UVD_UDEC_DB_ADDR_CONFIG=0x%08X\n", + RREG32(mmUVD_UDEC_DB_ADDR_CONFIG)); + dev_info(adev->dev, " UVD_UDEC_DBW_ADDR_CONFIG=0x%08X\n", + RREG32(mmUVD_UDEC_DBW_ADDR_CONFIG)); + + dev_info(adev->dev, " CP_MEQ_THRESHOLDS=0x%08X\n", + RREG32(mmCP_MEQ_THRESHOLDS)); + dev_info(adev->dev, " SX_DEBUG_1=0x%08X\n", + RREG32(mmSX_DEBUG_1)); + dev_info(adev->dev, " TA_CNTL_AUX=0x%08X\n", + RREG32(mmTA_CNTL_AUX)); + dev_info(adev->dev, " SPI_CONFIG_CNTL=0x%08X\n", + RREG32(mmSPI_CONFIG_CNTL)); + dev_info(adev->dev, " SQ_CONFIG=0x%08X\n", + RREG32(mmSQ_CONFIG)); + dev_info(adev->dev, " DB_DEBUG=0x%08X\n", + RREG32(mmDB_DEBUG)); + dev_info(adev->dev, " DB_DEBUG2=0x%08X\n", + RREG32(mmDB_DEBUG2)); + dev_info(adev->dev, " DB_DEBUG3=0x%08X\n", + RREG32(mmDB_DEBUG3)); + dev_info(adev->dev, " CB_HW_CONTROL=0x%08X\n", + RREG32(mmCB_HW_CONTROL)); + dev_info(adev->dev, " SPI_CONFIG_CNTL_1=0x%08X\n", + RREG32(mmSPI_CONFIG_CNTL_1)); + dev_info(adev->dev, " PA_SC_FIFO_SIZE=0x%08X\n", + RREG32(mmPA_SC_FIFO_SIZE)); + dev_info(adev->dev, " VGT_NUM_INSTANCES=0x%08X\n", + RREG32(mmVGT_NUM_INSTANCES)); + dev_info(adev->dev, " CP_PERFMON_CNTL=0x%08X\n", + RREG32(mmCP_PERFMON_CNTL)); + dev_info(adev->dev, " PA_SC_FORCE_EOV_MAX_CNTS=0x%08X\n", + RREG32(mmPA_SC_FORCE_EOV_MAX_CNTS)); + dev_info(adev->dev, " VGT_CACHE_INVALIDATION=0x%08X\n", + RREG32(mmVGT_CACHE_INVALIDATION)); + dev_info(adev->dev, " VGT_GS_VERTEX_REUSE=0x%08X\n", + RREG32(mmVGT_GS_VERTEX_REUSE)); + dev_info(adev->dev, " PA_SC_LINE_STIPPLE_STATE=0x%08X\n", + RREG32(mmPA_SC_LINE_STIPPLE_STATE)); + dev_info(adev->dev, " PA_CL_ENHANCE=0x%08X\n", + RREG32(mmPA_CL_ENHANCE)); + dev_info(adev->dev, " PA_SC_ENHANCE=0x%08X\n", + RREG32(mmPA_SC_ENHANCE)); + + dev_info(adev->dev, " CP_ME_CNTL=0x%08X\n", + RREG32(mmCP_ME_CNTL)); + dev_info(adev->dev, " CP_MAX_CONTEXT=0x%08X\n", + RREG32(mmCP_MAX_CONTEXT)); + dev_info(adev->dev, " CP_ENDIAN_SWAP=0x%08X\n", + RREG32(mmCP_ENDIAN_SWAP)); + dev_info(adev->dev, " CP_DEVICE_ID=0x%08X\n", + RREG32(mmCP_DEVICE_ID)); + + dev_info(adev->dev, " CP_SEM_WAIT_TIMER=0x%08X\n", + RREG32(mmCP_SEM_WAIT_TIMER)); + + dev_info(adev->dev, " CP_RB_WPTR_DELAY=0x%08X\n", + RREG32(mmCP_RB_WPTR_DELAY)); + dev_info(adev->dev, " CP_RB_VMID=0x%08X\n", + RREG32(mmCP_RB_VMID)); + dev_info(adev->dev, " CP_RB0_CNTL=0x%08X\n", + RREG32(mmCP_RB0_CNTL)); + dev_info(adev->dev, " CP_RB0_WPTR=0x%08X\n", + RREG32(mmCP_RB0_WPTR)); + dev_info(adev->dev, " CP_RB0_RPTR_ADDR=0x%08X\n", + RREG32(mmCP_RB0_RPTR_ADDR)); + dev_info(adev->dev, " CP_RB0_RPTR_ADDR_HI=0x%08X\n", + RREG32(mmCP_RB0_RPTR_ADDR_HI)); + dev_info(adev->dev, " CP_RB0_CNTL=0x%08X\n", + RREG32(mmCP_RB0_CNTL)); + dev_info(adev->dev, " CP_RB0_BASE=0x%08X\n", + RREG32(mmCP_RB0_BASE)); + dev_info(adev->dev, " CP_RB0_BASE_HI=0x%08X\n", + RREG32(mmCP_RB0_BASE_HI)); + dev_info(adev->dev, " CP_MEC_CNTL=0x%08X\n", + RREG32(mmCP_MEC_CNTL)); + dev_info(adev->dev, " CP_CPF_DEBUG=0x%08X\n", + RREG32(mmCP_CPF_DEBUG)); + + dev_info(adev->dev, " SCRATCH_ADDR=0x%08X\n", + RREG32(mmSCRATCH_ADDR)); + dev_info(adev->dev, " SCRATCH_UMSK=0x%08X\n", + RREG32(mmSCRATCH_UMSK)); + + dev_info(adev->dev, " CP_INT_CNTL_RING0=0x%08X\n", + RREG32(mmCP_INT_CNTL_RING0)); + dev_info(adev->dev, " RLC_LB_CNTL=0x%08X\n", + RREG32(mmRLC_LB_CNTL)); + dev_info(adev->dev, " RLC_CNTL=0x%08X\n", + RREG32(mmRLC_CNTL)); + dev_info(adev->dev, " RLC_CGCG_CGLS_CTRL=0x%08X\n", + RREG32(mmRLC_CGCG_CGLS_CTRL)); + dev_info(adev->dev, " RLC_LB_CNTR_INIT=0x%08X\n", + RREG32(mmRLC_LB_CNTR_INIT)); + dev_info(adev->dev, " RLC_LB_CNTR_MAX=0x%08X\n", + RREG32(mmRLC_LB_CNTR_MAX)); + dev_info(adev->dev, " RLC_LB_INIT_CU_MASK=0x%08X\n", + RREG32(mmRLC_LB_INIT_CU_MASK)); + dev_info(adev->dev, " RLC_LB_PARAMS=0x%08X\n", + RREG32(mmRLC_LB_PARAMS)); + dev_info(adev->dev, " RLC_LB_CNTL=0x%08X\n", + RREG32(mmRLC_LB_CNTL)); + dev_info(adev->dev, " RLC_MC_CNTL=0x%08X\n", + RREG32(mmRLC_MC_CNTL)); + dev_info(adev->dev, " RLC_UCODE_CNTL=0x%08X\n", + RREG32(mmRLC_UCODE_CNTL)); + + mutex_lock(&adev->srbm_mutex); + for (i = 0; i < 16; i++) { + vi_srbm_select(adev, 0, 0, 0, i); + dev_info(adev->dev, " VM %d:\n", i); + dev_info(adev->dev, " SH_MEM_CONFIG=0x%08X\n", + RREG32(mmSH_MEM_CONFIG)); + dev_info(adev->dev, " SH_MEM_APE1_BASE=0x%08X\n", + RREG32(mmSH_MEM_APE1_BASE)); + dev_info(adev->dev, " SH_MEM_APE1_LIMIT=0x%08X\n", + RREG32(mmSH_MEM_APE1_LIMIT)); + dev_info(adev->dev, " SH_MEM_BASES=0x%08X\n", + RREG32(mmSH_MEM_BASES)); + } + vi_srbm_select(adev, 0, 0, 0, 0); + mutex_unlock(&adev->srbm_mutex); +} + +static int gfx_v8_0_soft_reset(struct amdgpu_device *adev) +{ + u32 grbm_soft_reset = 0, srbm_soft_reset = 0; + u32 tmp; + + /* GRBM_STATUS */ + tmp = RREG32(mmGRBM_STATUS); + if (tmp & (GRBM_STATUS__PA_BUSY_MASK | GRBM_STATUS__SC_BUSY_MASK | + GRBM_STATUS__BCI_BUSY_MASK | GRBM_STATUS__SX_BUSY_MASK | + GRBM_STATUS__TA_BUSY_MASK | GRBM_STATUS__VGT_BUSY_MASK | + GRBM_STATUS__DB_BUSY_MASK | GRBM_STATUS__CB_BUSY_MASK | + GRBM_STATUS__GDS_BUSY_MASK | GRBM_STATUS__SPI_BUSY_MASK | + GRBM_STATUS__IA_BUSY_MASK | GRBM_STATUS__IA_BUSY_NO_DMA_MASK)) { + grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, + GRBM_SOFT_RESET, SOFT_RESET_CP, 1); + grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, + GRBM_SOFT_RESET, SOFT_RESET_GFX, 1); + } + + if (tmp & (GRBM_STATUS__CP_BUSY_MASK | GRBM_STATUS__CP_COHERENCY_BUSY_MASK)) { + grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, + GRBM_SOFT_RESET, SOFT_RESET_CP, 1); + srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, + SRBM_SOFT_RESET, SOFT_RESET_GRBM, 1); + } + + /* GRBM_STATUS2 */ + tmp = RREG32(mmGRBM_STATUS2); + if (REG_GET_FIELD(tmp, GRBM_STATUS2, RLC_BUSY)) + grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, + GRBM_SOFT_RESET, SOFT_RESET_RLC, 1); + + /* SRBM_STATUS */ + tmp = RREG32(mmSRBM_STATUS); + if (REG_GET_FIELD(tmp, SRBM_STATUS, GRBM_RQ_PENDING)) + srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, + SRBM_SOFT_RESET, SOFT_RESET_GRBM, 1); + + if (grbm_soft_reset || srbm_soft_reset) { + gfx_v8_0_print_status(adev); + /* stop the rlc */ + gfx_v8_0_rlc_stop(adev); + + /* Disable GFX parsing/prefetching */ + gfx_v8_0_cp_gfx_enable(adev, false); + + /* Disable MEC parsing/prefetching */ + /* XXX todo */ + + if (grbm_soft_reset) { + tmp = RREG32(mmGRBM_SOFT_RESET); + tmp |= grbm_soft_reset; + dev_info(adev->dev, "GRBM_SOFT_RESET=0x%08X\n", tmp); + WREG32(mmGRBM_SOFT_RESET, tmp); + tmp = RREG32(mmGRBM_SOFT_RESET); + + udelay(50); + + tmp &= ~grbm_soft_reset; + WREG32(mmGRBM_SOFT_RESET, tmp); + tmp = RREG32(mmGRBM_SOFT_RESET); + } + + if (srbm_soft_reset) { + tmp = RREG32(mmSRBM_SOFT_RESET); + tmp |= srbm_soft_reset; + dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp); + WREG32(mmSRBM_SOFT_RESET, tmp); + tmp = RREG32(mmSRBM_SOFT_RESET); + + udelay(50); + + tmp &= ~srbm_soft_reset; + WREG32(mmSRBM_SOFT_RESET, tmp); + tmp = RREG32(mmSRBM_SOFT_RESET); + } + /* Wait a little for things to settle down */ + udelay(50); + gfx_v8_0_print_status(adev); + } + return 0; +} + +/** + * gfx_v8_0_get_gpu_clock_counter - return GPU clock counter snapshot + * + * @adev: amdgpu_device pointer + * + * Fetches a GPU clock counter snapshot. + * Returns the 64 bit clock counter snapshot. + */ +uint64_t gfx_v8_0_get_gpu_clock_counter(struct amdgpu_device *adev) +{ + uint64_t clock; + + mutex_lock(&adev->gfx.gpu_clock_mutex); + WREG32(mmRLC_CAPTURE_GPU_CLOCK_COUNT, 1); + clock = (uint64_t)RREG32(mmRLC_GPU_CLOCK_COUNT_LSB) | + ((uint64_t)RREG32(mmRLC_GPU_CLOCK_COUNT_MSB) << 32ULL); + mutex_unlock(&adev->gfx.gpu_clock_mutex); + return clock; +} + +static void gfx_v8_0_ring_emit_gds_switch(struct amdgpu_ring *ring, + uint32_t vmid, + uint32_t gds_base, uint32_t gds_size, + uint32_t gws_base, uint32_t gws_size, + uint32_t oa_base, uint32_t oa_size) +{ + gds_base = gds_base >> AMDGPU_GDS_SHIFT; + gds_size = gds_size >> AMDGPU_GDS_SHIFT; + + gws_base = gws_base >> AMDGPU_GWS_SHIFT; + gws_size = gws_size >> AMDGPU_GWS_SHIFT; + + oa_base = oa_base >> AMDGPU_OA_SHIFT; + oa_size = oa_size >> AMDGPU_OA_SHIFT; + + /* GDS Base */ + amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); + amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) | + WRITE_DATA_DST_SEL(0))); + amdgpu_ring_write(ring, amdgpu_gds_reg_offset[vmid].mem_base); + amdgpu_ring_write(ring, 0); + amdgpu_ring_write(ring, gds_base); + + /* GDS Size */ + amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); + amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) | + WRITE_DATA_DST_SEL(0))); + amdgpu_ring_write(ring, amdgpu_gds_reg_offset[vmid].mem_size); + amdgpu_ring_write(ring, 0); + amdgpu_ring_write(ring, gds_size); + + /* GWS */ + amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); + amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) | + WRITE_DATA_DST_SEL(0))); + amdgpu_ring_write(ring, amdgpu_gds_reg_offset[vmid].gws); + amdgpu_ring_write(ring, 0); + amdgpu_ring_write(ring, gws_size << GDS_GWS_VMID0__SIZE__SHIFT | gws_base); + + /* OA */ + amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); + amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) | + WRITE_DATA_DST_SEL(0))); + amdgpu_ring_write(ring, amdgpu_gds_reg_offset[vmid].oa); + amdgpu_ring_write(ring, 0); + amdgpu_ring_write(ring, (1 << (oa_size + oa_base)) - (1 << oa_base)); +} + +static int gfx_v8_0_early_init(struct amdgpu_device *adev) +{ + + adev->gfx.num_gfx_rings = GFX8_NUM_GFX_RINGS; + adev->gfx.num_compute_rings = GFX8_NUM_COMPUTE_RINGS; + gfx_v8_0_set_ring_funcs(adev); + gfx_v8_0_set_irq_funcs(adev); + gfx_v8_0_set_gds_init(adev); + + return 0; +} + +static int gfx_v8_0_set_powergating_state(struct amdgpu_device *adev, + enum amdgpu_powergating_state state) +{ + return 0; +} + +static int gfx_v8_0_set_clockgating_state(struct amdgpu_device *adev, + enum amdgpu_clockgating_state state) +{ + return 0; +} + +static u32 gfx_v8_0_ring_get_rptr_gfx(struct amdgpu_ring *ring) +{ + u32 rptr; + + rptr = ring->adev->wb.wb[ring->rptr_offs]; + + return rptr; +} + +static u32 gfx_v8_0_ring_get_wptr_gfx(struct amdgpu_ring *ring) +{ + struct amdgpu_device *adev = ring->adev; + u32 wptr; + + if (ring->use_doorbell) + /* XXX check if swapping is necessary on BE */ + wptr = ring->adev->wb.wb[ring->wptr_offs]; + else + wptr = RREG32(mmCP_RB0_WPTR); + + return wptr; +} + +static void gfx_v8_0_ring_set_wptr_gfx(struct amdgpu_ring *ring) +{ + struct amdgpu_device *adev = ring->adev; + + if (ring->use_doorbell) { + /* XXX check if swapping is necessary on BE */ + adev->wb.wb[ring->wptr_offs] = ring->wptr; + WDOORBELL32(ring->doorbell_index, ring->wptr); + } else { + WREG32(mmCP_RB0_WPTR, ring->wptr); + (void)RREG32(mmCP_RB0_WPTR); + } +} + +static void gfx_v8_0_hdp_flush_cp_ring_emit(struct amdgpu_ring *ring) +{ + u32 ref_and_mask, reg_mem_engine; + + if (ring->type == AMDGPU_RING_TYPE_COMPUTE) { + switch (ring->me) { + case 1: + ref_and_mask = GPU_HDP_FLUSH_DONE__CP2_MASK << ring->pipe; + break; + case 2: + ref_and_mask = GPU_HDP_FLUSH_DONE__CP6_MASK << ring->pipe; + break; + default: + return; + } + reg_mem_engine = 0; + } else { + ref_and_mask = GPU_HDP_FLUSH_DONE__CP0_MASK; + reg_mem_engine = WAIT_REG_MEM_ENGINE(1); /* pfp */ + } + + amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5)); + amdgpu_ring_write(ring, (WAIT_REG_MEM_OPERATION(1) | /* write, wait, write */ + WAIT_REG_MEM_FUNCTION(3) | /* == */ + reg_mem_engine)); + amdgpu_ring_write(ring, mmGPU_HDP_FLUSH_REQ); + amdgpu_ring_write(ring, mmGPU_HDP_FLUSH_DONE); + amdgpu_ring_write(ring, ref_and_mask); + amdgpu_ring_write(ring, ref_and_mask); + amdgpu_ring_write(ring, 0x20); /* poll interval */ +} + +static void gfx_v8_0_ring_emit_ib(struct amdgpu_ring *ring, + struct amdgpu_ib *ib) +{ + u32 header, control = 0; + u32 next_rptr = ring->wptr + 5; + if (ring->type == AMDGPU_RING_TYPE_COMPUTE) + control |= INDIRECT_BUFFER_VALID; + + if (ib->flush_hdp_writefifo) + next_rptr += 7; + + if (ring->need_ctx_switch && ring->type == AMDGPU_RING_TYPE_GFX) + next_rptr += 2; + + next_rptr += 4; + amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); + amdgpu_ring_write(ring, WRITE_DATA_DST_SEL(5) | WR_CONFIRM); + amdgpu_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc); + amdgpu_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xffffffff); + amdgpu_ring_write(ring, next_rptr); + + if (ib->flush_hdp_writefifo) + gfx_v8_0_hdp_flush_cp_ring_emit(ring); + + /* insert SWITCH_BUFFER packet before first IB in the ring frame */ + if (ring->need_ctx_switch && ring->type == AMDGPU_RING_TYPE_GFX) { + amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0)); + amdgpu_ring_write(ring, 0); + ring->need_ctx_switch = false; + } + + if (ib->is_const_ib) + header = PACKET3(PACKET3_INDIRECT_BUFFER_CONST, 2); + else + header = PACKET3(PACKET3_INDIRECT_BUFFER, 2); + + control |= ib->length_dw | + (ib->vm ? (ib->vm->ids[ring->idx].id << 24) : 0); + + amdgpu_ring_write(ring, header); + amdgpu_ring_write(ring, +#ifdef __BIG_ENDIAN + (2 << 0) | +#endif + (ib->gpu_addr & 0xFFFFFFFC)); + amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFFFF); + amdgpu_ring_write(ring, control); +} + +static void gfx_v8_0_ring_emit_fence_gfx(struct amdgpu_ring *ring, u64 addr, + u64 seq, bool write64bit) +{ + /* EVENT_WRITE_EOP - flush caches, send int */ + amdgpu_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE_EOP, 4)); + amdgpu_ring_write(ring, (EOP_TCL1_ACTION_EN | + EOP_TC_ACTION_EN | + EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) | + EVENT_INDEX(5))); + amdgpu_ring_write(ring, addr & 0xfffffffc); + amdgpu_ring_write(ring, (upper_32_bits(addr) & 0xffff) | + DATA_SEL(write64bit ? 2 : 1) | INT_SEL(2)); + amdgpu_ring_write(ring, lower_32_bits(seq)); + amdgpu_ring_write(ring, upper_32_bits(seq)); +} + +/** + * gfx_v8_0_ring_emit_semaphore - emit a semaphore on the CP ring + * + * @ring: amdgpu ring buffer object + * @semaphore: amdgpu semaphore object + * @emit_wait: Is this a sempahore wait? + * + * Emits a semaphore signal/wait packet to the CP ring and prevents the PFP + * from running ahead of semaphore waits. + */ +static bool gfx_v8_0_ring_emit_semaphore(struct amdgpu_ring *ring, + struct amdgpu_semaphore *semaphore, + bool emit_wait) +{ + uint64_t addr = semaphore->gpu_addr; + unsigned sel = emit_wait ? PACKET3_SEM_SEL_WAIT : PACKET3_SEM_SEL_SIGNAL; + + if (ring->adev->asic_type == CHIP_TOPAZ || + ring->adev->asic_type == CHIP_TONGA) { + amdgpu_ring_write(ring, PACKET3(PACKET3_MEM_SEMAPHORE, 1)); + amdgpu_ring_write(ring, lower_32_bits(addr)); + amdgpu_ring_write(ring, (upper_32_bits(addr) & 0xffff) | sel); + } else { + amdgpu_ring_write(ring, PACKET3(PACKET3_MEM_SEMAPHORE, 2)); + amdgpu_ring_write(ring, lower_32_bits(addr)); + amdgpu_ring_write(ring, upper_32_bits(addr)); + amdgpu_ring_write(ring, sel); + } + + if (emit_wait && (ring->type == AMDGPU_RING_TYPE_GFX)) { + /* Prevent the PFP from running ahead of the semaphore wait */ + amdgpu_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0)); + amdgpu_ring_write(ring, 0x0); + } + + return true; +} + +static void gfx_v8_0_ce_sync_me(struct amdgpu_ring *ring) +{ + struct amdgpu_device *adev = ring->adev; + u64 gpu_addr = adev->wb.gpu_addr + adev->gfx.ce_sync_offs * 4; + + /* instruct DE to set a magic number */ + amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); + amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) | + WRITE_DATA_DST_SEL(5))); + amdgpu_ring_write(ring, gpu_addr & 0xfffffffc); + amdgpu_ring_write(ring, upper_32_bits(gpu_addr) & 0xffffffff); + amdgpu_ring_write(ring, 1); + + /* let CE wait till condition satisfied */ + amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5)); + amdgpu_ring_write(ring, (WAIT_REG_MEM_OPERATION(0) | /* wait */ + WAIT_REG_MEM_MEM_SPACE(1) | /* memory */ + WAIT_REG_MEM_FUNCTION(3) | /* == */ + WAIT_REG_MEM_ENGINE(2))); /* ce */ + amdgpu_ring_write(ring, gpu_addr & 0xfffffffc); + amdgpu_ring_write(ring, upper_32_bits(gpu_addr) & 0xffffffff); + amdgpu_ring_write(ring, 1); + amdgpu_ring_write(ring, 0xffffffff); + amdgpu_ring_write(ring, 4); /* poll interval */ + + /* instruct CE to reset wb of ce_sync to zero */ + amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); + amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(2) | + WRITE_DATA_DST_SEL(5) | + WR_CONFIRM)); + amdgpu_ring_write(ring, gpu_addr & 0xfffffffc); + amdgpu_ring_write(ring, upper_32_bits(gpu_addr) & 0xffffffff); + amdgpu_ring_write(ring, 0); +} + +static void gfx_v8_0_ring_emit_vm_flush(struct amdgpu_ring *ring, + unsigned vm_id, uint64_t pd_addr) +{ + int usepfp = (ring->type == AMDGPU_RING_TYPE_GFX); + u32 srbm_gfx_cntl = 0; + + amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); + amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(usepfp) | + WRITE_DATA_DST_SEL(0))); + if (vm_id < 8) { + amdgpu_ring_write(ring, + (mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + vm_id)); + } else { + amdgpu_ring_write(ring, + (mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + vm_id - 8)); + } + amdgpu_ring_write(ring, 0); + amdgpu_ring_write(ring, pd_addr >> 12); + + /* update SH_MEM_* regs */ + srbm_gfx_cntl = REG_SET_FIELD(srbm_gfx_cntl, SRBM_GFX_CNTL, VMID, vm_id); + amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); + amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) | + WRITE_DATA_DST_SEL(0))); + amdgpu_ring_write(ring, mmSRBM_GFX_CNTL); + amdgpu_ring_write(ring, 0); + amdgpu_ring_write(ring, srbm_gfx_cntl); + + amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 6)); + amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) | + WRITE_DATA_DST_SEL(0))); + amdgpu_ring_write(ring, mmSH_MEM_BASES); + amdgpu_ring_write(ring, 0); + + amdgpu_ring_write(ring, 0); /* SH_MEM_BASES */ + amdgpu_ring_write(ring, 0); /* SH_MEM_CONFIG */ + amdgpu_ring_write(ring, 1); /* SH_MEM_APE1_BASE */ + amdgpu_ring_write(ring, 0); /* SH_MEM_APE1_LIMIT */ + + srbm_gfx_cntl = REG_SET_FIELD(srbm_gfx_cntl, SRBM_GFX_CNTL, VMID, 0); + amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); + amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) | + WRITE_DATA_DST_SEL(0))); + amdgpu_ring_write(ring, mmSRBM_GFX_CNTL); + amdgpu_ring_write(ring, 0); + amdgpu_ring_write(ring, srbm_gfx_cntl); + + + /* bits 0-15 are the VM contexts0-15 */ + /* invalidate the cache */ + amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); + amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) | + WRITE_DATA_DST_SEL(0))); + amdgpu_ring_write(ring, mmVM_INVALIDATE_REQUEST); + amdgpu_ring_write(ring, 0); + amdgpu_ring_write(ring, 1 << vm_id); + + /* wait for the invalidate to complete */ + amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5)); + amdgpu_ring_write(ring, (WAIT_REG_MEM_OPERATION(0) | /* wait */ + WAIT_REG_MEM_FUNCTION(0) | /* always */ + WAIT_REG_MEM_ENGINE(0))); /* me */ + amdgpu_ring_write(ring, mmVM_INVALIDATE_REQUEST); + amdgpu_ring_write(ring, 0); + amdgpu_ring_write(ring, 0); /* ref */ + amdgpu_ring_write(ring, 0); /* mask */ + amdgpu_ring_write(ring, 0x20); /* poll interval */ + + /* compute doesn't have PFP */ + if (usepfp) { + /* sync PFP to ME, otherwise we might get invalid PFP reads */ + amdgpu_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0)); + amdgpu_ring_write(ring, 0x0); + + /* synce CE with ME to prevent CE fetch CEIB before context switch done */ + gfx_v8_0_ce_sync_me(ring); + } +} + +static bool gfx_v8_0_ring_is_lockup(struct amdgpu_ring *ring) +{ + if (gfx_v8_0_is_idle(ring->adev)) { + amdgpu_ring_lockup_update(ring); + return false; + } + return amdgpu_ring_test_lockup(ring); +} + +static u32 gfx_v8_0_ring_get_rptr_compute(struct amdgpu_ring *ring) +{ + return ring->adev->wb.wb[ring->rptr_offs]; +} + +static u32 gfx_v8_0_ring_get_wptr_compute(struct amdgpu_ring *ring) +{ + return ring->adev->wb.wb[ring->wptr_offs]; +} + +static void gfx_v8_0_ring_set_wptr_compute(struct amdgpu_ring *ring) +{ + struct amdgpu_device *adev = ring->adev; + + /* XXX check if swapping is necessary on BE */ + adev->wb.wb[ring->wptr_offs] = ring->wptr; + WDOORBELL32(ring->doorbell_index, ring->wptr); +} + +static void gfx_v8_0_ring_emit_fence_compute(struct amdgpu_ring *ring, + u64 addr, u64 seq, + bool write64bits) +{ + /* RELEASE_MEM - flush caches, send int */ + amdgpu_ring_write(ring, PACKET3(PACKET3_RELEASE_MEM, 5)); + amdgpu_ring_write(ring, (EOP_TCL1_ACTION_EN | + EOP_TC_ACTION_EN | + EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) | + EVENT_INDEX(5))); + amdgpu_ring_write(ring, DATA_SEL(write64bits ? 2 : 1) | INT_SEL(2)); + amdgpu_ring_write(ring, addr & 0xfffffffc); + amdgpu_ring_write(ring, upper_32_bits(addr)); + amdgpu_ring_write(ring, lower_32_bits(seq)); + amdgpu_ring_write(ring, upper_32_bits(seq)); +} + +static void gfx_v8_0_set_gfx_eop_interrupt_state(struct amdgpu_device *adev, + enum amdgpu_interrupt_state state) +{ + u32 cp_int_cntl; + + switch (state) { + case AMDGPU_IRQ_STATE_DISABLE: + cp_int_cntl = RREG32(mmCP_INT_CNTL_RING0); + cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0, + TIME_STAMP_INT_ENABLE, 0); + WREG32(mmCP_INT_CNTL_RING0, cp_int_cntl); + break; + case AMDGPU_IRQ_STATE_ENABLE: + cp_int_cntl = RREG32(mmCP_INT_CNTL_RING0); + cp_int_cntl = + REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0, + TIME_STAMP_INT_ENABLE, 1); + WREG32(mmCP_INT_CNTL_RING0, cp_int_cntl); + break; + default: + break; + } +} + +static void gfx_v8_0_set_compute_eop_interrupt_state(struct amdgpu_device *adev, + int me, int pipe, + enum amdgpu_interrupt_state state) +{ + u32 mec_int_cntl, mec_int_cntl_reg; + + /* + * amdgpu controls only pipe 0 of MEC1. That's why this function only + * handles the setting of interrupts for this specific pipe. All other + * pipes' interrupts are set by amdkfd. + */ + + if (me == 1) { + switch (pipe) { + case 0: + mec_int_cntl_reg = mmCP_ME1_PIPE0_INT_CNTL; + break; + default: + DRM_DEBUG("invalid pipe %d\n", pipe); + return; + } + } else { + DRM_DEBUG("invalid me %d\n", me); + return; + } + + switch (state) { + case AMDGPU_IRQ_STATE_DISABLE: + mec_int_cntl = RREG32(mec_int_cntl_reg); + mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL, + TIME_STAMP_INT_ENABLE, 0); + WREG32(mec_int_cntl_reg, mec_int_cntl); + break; + case AMDGPU_IRQ_STATE_ENABLE: + mec_int_cntl = RREG32(mec_int_cntl_reg); + mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL, + TIME_STAMP_INT_ENABLE, 1); + WREG32(mec_int_cntl_reg, mec_int_cntl); + break; + default: + break; + } +} + +static int gfx_v8_0_set_priv_reg_fault_state(struct amdgpu_device *adev, + struct amdgpu_irq_src *source, + unsigned type, + enum amdgpu_interrupt_state state) +{ + u32 cp_int_cntl; + + switch (state) { + case AMDGPU_IRQ_STATE_DISABLE: + cp_int_cntl = RREG32(mmCP_INT_CNTL_RING0); + cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0, + PRIV_REG_INT_ENABLE, 0); + WREG32(mmCP_INT_CNTL_RING0, cp_int_cntl); + break; + case AMDGPU_IRQ_STATE_ENABLE: + cp_int_cntl = RREG32(mmCP_INT_CNTL_RING0); + cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0, + PRIV_REG_INT_ENABLE, 0); + WREG32(mmCP_INT_CNTL_RING0, cp_int_cntl); + break; + default: + break; + } + + return 0; +} + +static int gfx_v8_0_set_priv_inst_fault_state(struct amdgpu_device *adev, + struct amdgpu_irq_src *source, + unsigned type, + enum amdgpu_interrupt_state state) +{ + u32 cp_int_cntl; + + switch (state) { + case AMDGPU_IRQ_STATE_DISABLE: + cp_int_cntl = RREG32(mmCP_INT_CNTL_RING0); + cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0, + PRIV_INSTR_INT_ENABLE, 0); + WREG32(mmCP_INT_CNTL_RING0, cp_int_cntl); + break; + case AMDGPU_IRQ_STATE_ENABLE: + cp_int_cntl = RREG32(mmCP_INT_CNTL_RING0); + cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0, + PRIV_INSTR_INT_ENABLE, 1); + WREG32(mmCP_INT_CNTL_RING0, cp_int_cntl); + break; + default: + break; + } + + return 0; +} + +static int gfx_v8_0_set_eop_interrupt_state(struct amdgpu_device *adev, + struct amdgpu_irq_src *src, + unsigned type, + enum amdgpu_interrupt_state state) +{ + switch (type) { + case AMDGPU_CP_IRQ_GFX_EOP: + gfx_v8_0_set_gfx_eop_interrupt_state(adev, state); + break; + case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP: + gfx_v8_0_set_compute_eop_interrupt_state(adev, 1, 0, state); + break; + case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE1_EOP: + gfx_v8_0_set_compute_eop_interrupt_state(adev, 1, 1, state); + break; + case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE2_EOP: + gfx_v8_0_set_compute_eop_interrupt_state(adev, 1, 2, state); + break; + case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE3_EOP: + gfx_v8_0_set_compute_eop_interrupt_state(adev, 1, 3, state); + break; + case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE0_EOP: + gfx_v8_0_set_compute_eop_interrupt_state(adev, 2, 0, state); + break; + case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE1_EOP: + gfx_v8_0_set_compute_eop_interrupt_state(adev, 2, 1, state); + break; + case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE2_EOP: + gfx_v8_0_set_compute_eop_interrupt_state(adev, 2, 2, state); + break; + case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE3_EOP: + gfx_v8_0_set_compute_eop_interrupt_state(adev, 2, 3, state); + break; + default: + break; + } + return 0; +} + +static int gfx_v8_0_eop_irq(struct amdgpu_device *adev, + struct amdgpu_irq_src *source, + struct amdgpu_iv_entry *entry) +{ + int i; + u8 me_id, pipe_id, queue_id; + struct amdgpu_ring *ring; + + DRM_DEBUG("IH: CP EOP\n"); + me_id = (entry->ring_id & 0x0c) >> 2; + pipe_id = (entry->ring_id & 0x03) >> 0; + queue_id = (entry->ring_id & 0x70) >> 4; + + switch (me_id) { + case 0: + amdgpu_fence_process(&adev->gfx.gfx_ring[0]); + break; + case 1: + case 2: + for (i = 0; i < adev->gfx.num_compute_rings; i++) { + ring = &adev->gfx.compute_ring[i]; + /* Per-queue interrupt is supported for MEC starting from VI. + * The interrupt can only be enabled/disabled per pipe instead of per queue. + */ + if ((ring->me == me_id) && (ring->pipe == pipe_id) && (ring->queue == queue_id)) + amdgpu_fence_process(ring); + } + break; + } + return 0; +} + +static int gfx_v8_0_priv_reg_irq(struct amdgpu_device *adev, + struct amdgpu_irq_src *source, + struct amdgpu_iv_entry *entry) +{ + DRM_ERROR("Illegal register access in command stream\n"); + schedule_work(&adev->reset_work); + return 0; +} + +static int gfx_v8_0_priv_inst_irq(struct amdgpu_device *adev, + struct amdgpu_irq_src *source, + struct amdgpu_iv_entry *entry) +{ + DRM_ERROR("Illegal instruction in command stream\n"); + schedule_work(&adev->reset_work); + return 0; +} + +const struct amdgpu_ip_funcs gfx_v8_0_ip_funcs = { + .early_init = gfx_v8_0_early_init, + .late_init = NULL, + .sw_init = gfx_v8_0_sw_init, + .sw_fini = gfx_v8_0_sw_fini, + .hw_init = gfx_v8_0_hw_init, + .hw_fini = gfx_v8_0_hw_fini, + .suspend = gfx_v8_0_suspend, + .resume = gfx_v8_0_resume, + .is_idle = gfx_v8_0_is_idle, + .wait_for_idle = gfx_v8_0_wait_for_idle, + .soft_reset = gfx_v8_0_soft_reset, + .print_status = gfx_v8_0_print_status, + .set_clockgating_state = gfx_v8_0_set_clockgating_state, + .set_powergating_state = gfx_v8_0_set_powergating_state, +}; + +static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_gfx = { + .get_rptr = gfx_v8_0_ring_get_rptr_gfx, + .get_wptr = gfx_v8_0_ring_get_wptr_gfx, + .set_wptr = gfx_v8_0_ring_set_wptr_gfx, + .parse_cs = NULL, + .emit_ib = gfx_v8_0_ring_emit_ib, + .emit_fence = gfx_v8_0_ring_emit_fence_gfx, + .emit_semaphore = gfx_v8_0_ring_emit_semaphore, + .emit_vm_flush = gfx_v8_0_ring_emit_vm_flush, + .emit_gds_switch = gfx_v8_0_ring_emit_gds_switch, + .test_ring = gfx_v8_0_ring_test_ring, + .test_ib = gfx_v8_0_ring_test_ib, + .is_lockup = gfx_v8_0_ring_is_lockup, +}; + +static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_compute = { + .get_rptr = gfx_v8_0_ring_get_rptr_compute, + .get_wptr = gfx_v8_0_ring_get_wptr_compute, + .set_wptr = gfx_v8_0_ring_set_wptr_compute, + .parse_cs = NULL, + .emit_ib = gfx_v8_0_ring_emit_ib, + .emit_fence = gfx_v8_0_ring_emit_fence_compute, + .emit_semaphore = gfx_v8_0_ring_emit_semaphore, + .emit_vm_flush = gfx_v8_0_ring_emit_vm_flush, + .emit_gds_switch = gfx_v8_0_ring_emit_gds_switch, + .test_ring = gfx_v8_0_ring_test_ring, + .test_ib = gfx_v8_0_ring_test_ib, + .is_lockup = gfx_v8_0_ring_is_lockup, +}; + +static void gfx_v8_0_set_ring_funcs(struct amdgpu_device *adev) +{ + int i; + + for (i = 0; i < adev->gfx.num_gfx_rings; i++) + adev->gfx.gfx_ring[i].funcs = &gfx_v8_0_ring_funcs_gfx; + + for (i = 0; i < adev->gfx.num_compute_rings; i++) + adev->gfx.compute_ring[i].funcs = &gfx_v8_0_ring_funcs_compute; +} + +static const struct amdgpu_irq_src_funcs gfx_v8_0_eop_irq_funcs = { + .set = gfx_v8_0_set_eop_interrupt_state, + .process = gfx_v8_0_eop_irq, +}; + +static const struct amdgpu_irq_src_funcs gfx_v8_0_priv_reg_irq_funcs = { + .set = gfx_v8_0_set_priv_reg_fault_state, + .process = gfx_v8_0_priv_reg_irq, +}; + +static const struct amdgpu_irq_src_funcs gfx_v8_0_priv_inst_irq_funcs = { + .set = gfx_v8_0_set_priv_inst_fault_state, + .process = gfx_v8_0_priv_inst_irq, +}; + +static void gfx_v8_0_set_irq_funcs(struct amdgpu_device *adev) +{ + adev->gfx.eop_irq.num_types = AMDGPU_CP_IRQ_LAST; + adev->gfx.eop_irq.funcs = &gfx_v8_0_eop_irq_funcs; + + adev->gfx.priv_reg_irq.num_types = 1; + adev->gfx.priv_reg_irq.funcs = &gfx_v8_0_priv_reg_irq_funcs; + + adev->gfx.priv_inst_irq.num_types = 1; + adev->gfx.priv_inst_irq.funcs = &gfx_v8_0_priv_inst_irq_funcs; +} + +static void gfx_v8_0_set_gds_init(struct amdgpu_device *adev) +{ + /* init asci gds info */ + adev->gds.mem.total_size = RREG32(mmGDS_VMID0_SIZE); + adev->gds.gws.total_size = 64; + adev->gds.oa.total_size = 16; + + if (adev->gds.mem.total_size == 64 * 1024) { + adev->gds.mem.gfx_partition_size = 4096; + adev->gds.mem.cs_partition_size = 4096; + + adev->gds.gws.gfx_partition_size = 4; + adev->gds.gws.cs_partition_size = 4; + + adev->gds.oa.gfx_partition_size = 4; + adev->gds.oa.cs_partition_size = 1; + } else { + adev->gds.mem.gfx_partition_size = 1024; + adev->gds.mem.cs_partition_size = 1024; + + adev->gds.gws.gfx_partition_size = 16; + adev->gds.gws.cs_partition_size = 16; + + adev->gds.oa.gfx_partition_size = 4; + adev->gds.oa.cs_partition_size = 4; + } +} + +static u32 gfx_v8_0_get_cu_active_bitmap(struct amdgpu_device *adev, + u32 se, u32 sh) +{ + u32 mask = 0, tmp, tmp1; + int i; + + gfx_v8_0_select_se_sh(adev, se, sh); + tmp = RREG32(mmCC_GC_SHADER_ARRAY_CONFIG); + tmp1 = RREG32(mmGC_USER_SHADER_ARRAY_CONFIG); + gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff); + + tmp &= 0xffff0000; + + tmp |= tmp1; + tmp >>= 16; + + for (i = 0; i < adev->gfx.config.max_cu_per_sh; i ++) { + mask <<= 1; + mask |= 1; + } + + return (~tmp) & mask; +} + +int gfx_v8_0_get_cu_info(struct amdgpu_device *adev, + struct amdgpu_cu_info *cu_info) +{ + int i, j, k, counter, active_cu_number = 0; + u32 mask, bitmap, ao_bitmap, ao_cu_mask = 0; + + if (!adev || !cu_info) + return -EINVAL; + + mutex_lock(&adev->grbm_idx_mutex); + for (i = 0; i < adev->gfx.config.max_shader_engines; i++) { + for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) { + mask = 1; + ao_bitmap = 0; + counter = 0; + bitmap = gfx_v8_0_get_cu_active_bitmap(adev, i, j); + cu_info->bitmap[i][j] = bitmap; + + for (k = 0; k < adev->gfx.config.max_cu_per_sh; k ++) { + if (bitmap & mask) { + if (counter < 2) + ao_bitmap |= mask; + counter ++; + } + mask <<= 1; + } + active_cu_number += counter; + ao_cu_mask |= (ao_bitmap << (i * 16 + j * 8)); + } + } + + cu_info->number = active_cu_number; + cu_info->ao_cu_mask = ao_cu_mask; + mutex_unlock(&adev->grbm_idx_mutex); + return 0; +} diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.h b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.h new file mode 100644 index 000000000000..be8a5f8e176e --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.h @@ -0,0 +1,33 @@ +/* + * Copyright 2014 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef __GFX_V8_0_H__ +#define __GFX_V8_0_H__ + +extern const struct amdgpu_ip_funcs gfx_v8_0_ip_funcs; + +uint64_t gfx_v8_0_get_gpu_clock_counter(struct amdgpu_device *adev); +void gfx_v8_0_select_se_sh(struct amdgpu_device *adev, u32 se_num, u32 sh_num); +int gfx_v8_0_get_cu_info(struct amdgpu_device *adev, struct amdgpu_cu_info *cu_info); + +#endif diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c new file mode 100644 index 000000000000..ac8cff85cde3 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c @@ -0,0 +1,1271 @@ +/* + * Copyright 2014 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#include +#include "drmP.h" +#include "amdgpu.h" +#include "gmc_v8_0.h" +#include "amdgpu_ucode.h" + +#include "gmc/gmc_8_1_d.h" +#include "gmc/gmc_8_1_sh_mask.h" + +#include "bif/bif_5_0_d.h" +#include "bif/bif_5_0_sh_mask.h" + +#include "oss/oss_3_0_d.h" +#include "oss/oss_3_0_sh_mask.h" + +#include "vid.h" +#include "vi.h" + +static void gmc_v8_0_set_gart_funcs(struct amdgpu_device *adev); +static void gmc_v8_0_set_irq_funcs(struct amdgpu_device *adev); + +MODULE_FIRMWARE("radeon/topaz_mc.bin"); +MODULE_FIRMWARE("radeon/tonga_mc.bin"); + +static const u32 golden_settings_tonga_a11[] = +{ + mmMC_ARB_WTM_GRPWT_RD, 0x00000003, 0x00000000, + mmMC_HUB_RDREQ_DMIF_LIMIT, 0x0000007f, 0x00000028, + mmMC_HUB_WDP_UMC, 0x00007fb6, 0x00000991, + mmVM_PRT_APERTURE0_LOW_ADDR, 0x0fffffff, 0x0fffffff, + mmVM_PRT_APERTURE1_LOW_ADDR, 0x0fffffff, 0x0fffffff, + mmVM_PRT_APERTURE2_LOW_ADDR, 0x0fffffff, 0x0fffffff, + mmVM_PRT_APERTURE3_LOW_ADDR, 0x0fffffff, 0x0fffffff, +}; + +static const u32 tonga_mgcg_cgcg_init[] = +{ + mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104 +}; + +static const u32 golden_settings_iceland_a11[] = +{ + mmVM_PRT_APERTURE0_LOW_ADDR, 0x0fffffff, 0x0fffffff, + mmVM_PRT_APERTURE1_LOW_ADDR, 0x0fffffff, 0x0fffffff, + mmVM_PRT_APERTURE2_LOW_ADDR, 0x0fffffff, 0x0fffffff, + mmVM_PRT_APERTURE3_LOW_ADDR, 0x0fffffff, 0x0fffffff +}; + +static const u32 iceland_mgcg_cgcg_init[] = +{ + mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104 +}; + +static const u32 cz_mgcg_cgcg_init[] = +{ + mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104 +}; + +static void gmc_v8_0_init_golden_registers(struct amdgpu_device *adev) +{ + switch (adev->asic_type) { + case CHIP_TOPAZ: + amdgpu_program_register_sequence(adev, + iceland_mgcg_cgcg_init, + (const u32)ARRAY_SIZE(iceland_mgcg_cgcg_init)); + amdgpu_program_register_sequence(adev, + golden_settings_iceland_a11, + (const u32)ARRAY_SIZE(golden_settings_iceland_a11)); + break; + case CHIP_TONGA: + amdgpu_program_register_sequence(adev, + tonga_mgcg_cgcg_init, + (const u32)ARRAY_SIZE(tonga_mgcg_cgcg_init)); + amdgpu_program_register_sequence(adev, + golden_settings_tonga_a11, + (const u32)ARRAY_SIZE(golden_settings_tonga_a11)); + break; + case CHIP_CARRIZO: + amdgpu_program_register_sequence(adev, + cz_mgcg_cgcg_init, + (const u32)ARRAY_SIZE(cz_mgcg_cgcg_init)); + break; + default: + break; + } +} + +/** + * gmc8_mc_wait_for_idle - wait for MC idle callback. + * + * @adev: amdgpu_device pointer + * + * Wait for the MC (memory controller) to be idle. + * (evergreen+). + * Returns 0 if the MC is idle, -1 if not. + */ +int gmc_v8_0_mc_wait_for_idle(struct amdgpu_device *adev) +{ + unsigned i; + u32 tmp; + + for (i = 0; i < adev->usec_timeout; i++) { + /* read MC_STATUS */ + tmp = RREG32(mmSRBM_STATUS) & (SRBM_STATUS__VMC_BUSY_MASK | + SRBM_STATUS__MCB_BUSY_MASK | + SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK | + SRBM_STATUS__MCC_BUSY_MASK | + SRBM_STATUS__MCD_BUSY_MASK | + SRBM_STATUS__VMC1_BUSY_MASK); + if (!tmp) + return 0; + udelay(1); + } + return -1; +} + +void gmc_v8_0_mc_stop(struct amdgpu_device *adev, + struct amdgpu_mode_mc_save *save) +{ + u32 blackout; + + if (adev->mode_info.num_crtc) + amdgpu_display_stop_mc_access(adev, save); + + amdgpu_asic_wait_for_mc_idle(adev); + + blackout = RREG32(mmMC_SHARED_BLACKOUT_CNTL); + if (REG_GET_FIELD(blackout, MC_SHARED_BLACKOUT_CNTL, BLACKOUT_MODE) != 1) { + /* Block CPU access */ + WREG32(mmBIF_FB_EN, 0); + /* blackout the MC */ + blackout = REG_SET_FIELD(blackout, + MC_SHARED_BLACKOUT_CNTL, BLACKOUT_MODE, 1); + WREG32(mmMC_SHARED_BLACKOUT_CNTL, blackout); + } + /* wait for the MC to settle */ + udelay(100); +} + +void gmc_v8_0_mc_resume(struct amdgpu_device *adev, + struct amdgpu_mode_mc_save *save) +{ + u32 tmp; + + /* unblackout the MC */ + tmp = RREG32(mmMC_SHARED_BLACKOUT_CNTL); + tmp = REG_SET_FIELD(tmp, MC_SHARED_BLACKOUT_CNTL, BLACKOUT_MODE, 0); + WREG32(mmMC_SHARED_BLACKOUT_CNTL, tmp); + /* allow CPU access */ + tmp = REG_SET_FIELD(0, BIF_FB_EN, FB_READ_EN, 1); + tmp = REG_SET_FIELD(tmp, BIF_FB_EN, FB_WRITE_EN, 1); + WREG32(mmBIF_FB_EN, tmp); + + if (adev->mode_info.num_crtc) + amdgpu_display_resume_mc_access(adev, save); +} + +/** + * gmc_v8_0_init_microcode - load ucode images from disk + * + * @adev: amdgpu_device pointer + * + * Use the firmware interface to load the ucode images into + * the driver (not loaded into hw). + * Returns 0 on success, error on failure. + */ +static int gmc_v8_0_init_microcode(struct amdgpu_device *adev) +{ + const char *chip_name; + char fw_name[30]; + int err; + + DRM_DEBUG("\n"); + + switch (adev->asic_type) { + case CHIP_TOPAZ: + chip_name = "topaz"; + break; + case CHIP_TONGA: + chip_name = "tonga"; + break; + case CHIP_CARRIZO: + return 0; + default: BUG(); + } + + snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", chip_name); + err = request_firmware(&adev->mc.fw, fw_name, adev->dev); + if (err) + goto out; + err = amdgpu_ucode_validate(adev->mc.fw); + +out: + if (err) { + printk(KERN_ERR + "mc: Failed to load firmware \"%s\"\n", + fw_name); + release_firmware(adev->mc.fw); + adev->mc.fw = NULL; + } + return err; +} + +/** + * gmc_v8_0_mc_load_microcode - load MC ucode into the hw + * + * @adev: amdgpu_device pointer + * + * Load the GDDR MC ucode into the hw (CIK). + * Returns 0 on success, error on failure. + */ +static int gmc_v8_0_mc_load_microcode(struct amdgpu_device *adev) +{ + const struct mc_firmware_header_v1_0 *hdr; + const __le32 *fw_data = NULL; + const __le32 *io_mc_regs = NULL; + u32 running, blackout = 0; + int i, ucode_size, regs_size; + + if (!adev->mc.fw) + return -EINVAL; + + hdr = (const struct mc_firmware_header_v1_0 *)adev->mc.fw->data; + amdgpu_ucode_print_mc_hdr(&hdr->header); + + adev->mc.fw_version = le32_to_cpu(hdr->header.ucode_version); + regs_size = le32_to_cpu(hdr->io_debug_size_bytes) / (4 * 2); + io_mc_regs = (const __le32 *) + (adev->mc.fw->data + le32_to_cpu(hdr->io_debug_array_offset_bytes)); + ucode_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4; + fw_data = (const __le32 *) + (adev->mc.fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes)); + + running = REG_GET_FIELD(RREG32(mmMC_SEQ_SUP_CNTL), MC_SEQ_SUP_CNTL, RUN); + + if (running == 0) { + if (running) { + blackout = RREG32(mmMC_SHARED_BLACKOUT_CNTL); + WREG32(mmMC_SHARED_BLACKOUT_CNTL, blackout | 1); + } + + /* reset the engine and set to writable */ + WREG32(mmMC_SEQ_SUP_CNTL, 0x00000008); + WREG32(mmMC_SEQ_SUP_CNTL, 0x00000010); + + /* load mc io regs */ + for (i = 0; i < regs_size; i++) { + WREG32(mmMC_SEQ_IO_DEBUG_INDEX, le32_to_cpup(io_mc_regs++)); + WREG32(mmMC_SEQ_IO_DEBUG_DATA, le32_to_cpup(io_mc_regs++)); + } + /* load the MC ucode */ + for (i = 0; i < ucode_size; i++) + WREG32(mmMC_SEQ_SUP_PGM, le32_to_cpup(fw_data++)); + + /* put the engine back into the active state */ + WREG32(mmMC_SEQ_SUP_CNTL, 0x00000008); + WREG32(mmMC_SEQ_SUP_CNTL, 0x00000004); + WREG32(mmMC_SEQ_SUP_CNTL, 0x00000001); + + /* wait for training to complete */ + for (i = 0; i < adev->usec_timeout; i++) { + if (REG_GET_FIELD(RREG32(mmMC_SEQ_TRAIN_WAKEUP_CNTL), + MC_SEQ_TRAIN_WAKEUP_CNTL, TRAIN_DONE_D0)) + break; + udelay(1); + } + for (i = 0; i < adev->usec_timeout; i++) { + if (REG_GET_FIELD(RREG32(mmMC_SEQ_TRAIN_WAKEUP_CNTL), + MC_SEQ_TRAIN_WAKEUP_CNTL, TRAIN_DONE_D1)) + break; + udelay(1); + } + + if (running) + WREG32(mmMC_SHARED_BLACKOUT_CNTL, blackout); + } + + return 0; +} + +static void gmc_v8_0_vram_gtt_location(struct amdgpu_device *adev, + struct amdgpu_mc *mc) +{ + if (mc->mc_vram_size > 0xFFC0000000ULL) { + /* leave room for at least 1024M GTT */ + dev_warn(adev->dev, "limiting VRAM\n"); + mc->real_vram_size = 0xFFC0000000ULL; + mc->mc_vram_size = 0xFFC0000000ULL; + } + amdgpu_vram_location(adev, &adev->mc, 0); + adev->mc.gtt_base_align = 0; + amdgpu_gtt_location(adev, mc); +} + +/** + * gmc_v8_0_mc_program - program the GPU memory controller + * + * @adev: amdgpu_device pointer + * + * Set the location of vram, gart, and AGP in the GPU's + * physical address space (CIK). + */ +static void gmc_v8_0_mc_program(struct amdgpu_device *adev) +{ + struct amdgpu_mode_mc_save save; + u32 tmp; + int i, j; + + /* Initialize HDP */ + for (i = 0, j = 0; i < 32; i++, j += 0x6) { + WREG32((0xb05 + j), 0x00000000); + WREG32((0xb06 + j), 0x00000000); + WREG32((0xb07 + j), 0x00000000); + WREG32((0xb08 + j), 0x00000000); + WREG32((0xb09 + j), 0x00000000); + } + WREG32(mmHDP_REG_COHERENCY_FLUSH_CNTL, 0); + + if (adev->mode_info.num_crtc) + amdgpu_display_set_vga_render_state(adev, false); + + gmc_v8_0_mc_stop(adev, &save); + if (amdgpu_asic_wait_for_mc_idle(adev)) { + dev_warn(adev->dev, "Wait for MC idle timedout !\n"); + } + /* Update configuration */ + WREG32(mmMC_VM_SYSTEM_APERTURE_LOW_ADDR, + adev->mc.vram_start >> 12); + WREG32(mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR, + adev->mc.vram_end >> 12); + WREG32(mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR, + adev->vram_scratch.gpu_addr >> 12); + tmp = ((adev->mc.vram_end >> 24) & 0xFFFF) << 16; + tmp |= ((adev->mc.vram_start >> 24) & 0xFFFF); + WREG32(mmMC_VM_FB_LOCATION, tmp); + /* XXX double check these! */ + WREG32(mmHDP_NONSURFACE_BASE, (adev->mc.vram_start >> 8)); + WREG32(mmHDP_NONSURFACE_INFO, (2 << 7) | (1 << 30)); + WREG32(mmHDP_NONSURFACE_SIZE, 0x3FFFFFFF); + WREG32(mmMC_VM_AGP_BASE, 0); + WREG32(mmMC_VM_AGP_TOP, 0x0FFFFFFF); + WREG32(mmMC_VM_AGP_BOT, 0x0FFFFFFF); + if (amdgpu_asic_wait_for_mc_idle(adev)) { + dev_warn(adev->dev, "Wait for MC idle timedout !\n"); + } + gmc_v8_0_mc_resume(adev, &save); + + WREG32(mmBIF_FB_EN, BIF_FB_EN__FB_READ_EN_MASK | BIF_FB_EN__FB_WRITE_EN_MASK); + + tmp = RREG32(mmHDP_MISC_CNTL); + tmp = REG_SET_FIELD(tmp, HDP_MISC_CNTL, FLUSH_INVALIDATE_CACHE, 1); + WREG32(mmHDP_MISC_CNTL, tmp); + + tmp = RREG32(mmHDP_HOST_PATH_CNTL); + WREG32(mmHDP_HOST_PATH_CNTL, tmp); +} + +/** + * gmc_v8_0_mc_init - initialize the memory controller driver params + * + * @adev: amdgpu_device pointer + * + * Look up the amount of vram, vram width, and decide how to place + * vram and gart within the GPU's physical address space (CIK). + * Returns 0 for success. + */ +static int gmc_v8_0_mc_init(struct amdgpu_device *adev) +{ + u32 tmp; + int chansize, numchan; + + /* Get VRAM informations */ + tmp = RREG32(mmMC_ARB_RAMCFG); + if (REG_GET_FIELD(tmp, MC_ARB_RAMCFG, CHANSIZE)) { + chansize = 64; + } else { + chansize = 32; + } + tmp = RREG32(mmMC_SHARED_CHMAP); + switch (REG_GET_FIELD(tmp, MC_SHARED_CHMAP, NOOFCHAN)) { + case 0: + default: + numchan = 1; + break; + case 1: + numchan = 2; + break; + case 2: + numchan = 4; + break; + case 3: + numchan = 8; + break; + case 4: + numchan = 3; + break; + case 5: + numchan = 6; + break; + case 6: + numchan = 10; + break; + case 7: + numchan = 12; + break; + case 8: + numchan = 16; + break; + } + adev->mc.vram_width = numchan * chansize; + /* Could aper size report 0 ? */ + adev->mc.aper_base = pci_resource_start(adev->pdev, 0); + adev->mc.aper_size = pci_resource_len(adev->pdev, 0); + /* size in MB on si */ + adev->mc.mc_vram_size = RREG32(mmCONFIG_MEMSIZE) * 1024ULL * 1024ULL; + adev->mc.real_vram_size = RREG32(mmCONFIG_MEMSIZE) * 1024ULL * 1024ULL; + adev->mc.visible_vram_size = adev->mc.aper_size; + + /* unless the user had overridden it, set the gart + * size equal to the 1024 or vram, whichever is larger. + */ + if (amdgpu_gart_size == -1) + adev->mc.gtt_size = max((1024ULL << 20), adev->mc.mc_vram_size); + else + adev->mc.gtt_size = (uint64_t)amdgpu_gart_size << 20; + + gmc_v8_0_vram_gtt_location(adev, &adev->mc); + + return 0; +} + +/* + * GART + * VMID 0 is the physical GPU addresses as used by the kernel. + * VMIDs 1-15 are used for userspace clients and are handled + * by the amdgpu vm/hsa code. + */ + +/** + * gmc_v8_0_gart_flush_gpu_tlb - gart tlb flush callback + * + * @adev: amdgpu_device pointer + * @vmid: vm instance to flush + * + * Flush the TLB for the requested page table (CIK). + */ +static void gmc_v8_0_gart_flush_gpu_tlb(struct amdgpu_device *adev, + uint32_t vmid) +{ + /* flush hdp cache */ + WREG32(mmHDP_MEM_COHERENCY_FLUSH_CNTL, 0); + + /* bits 0-15 are the VM contexts0-15 */ + WREG32(mmVM_INVALIDATE_REQUEST, 1 << vmid); +} + +/** + * gmc_v8_0_gart_set_pte_pde - update the page tables using MMIO + * + * @adev: amdgpu_device pointer + * @cpu_pt_addr: cpu address of the page table + * @gpu_page_idx: entry in the page table to update + * @addr: dst addr to write into pte/pde + * @flags: access flags + * + * Update the page tables using the CPU. + */ +static int gmc_v8_0_gart_set_pte_pde(struct amdgpu_device *adev, + void *cpu_pt_addr, + uint32_t gpu_page_idx, + uint64_t addr, + uint32_t flags) +{ + void __iomem *ptr = (void *)cpu_pt_addr; + uint64_t value; + + /* + * PTE format on VI: + * 63:40 reserved + * 39:12 4k physical page base address + * 11:7 fragment + * 6 write + * 5 read + * 4 exe + * 3 reserved + * 2 snooped + * 1 system + * 0 valid + * + * PDE format on VI: + * 63:59 block fragment size + * 58:40 reserved + * 39:1 physical base address of PTE + * bits 5:1 must be 0. + * 0 valid + */ + value = addr & 0x000000FFFFFFF000ULL; + value |= flags; + writeq(value, ptr + (gpu_page_idx * 8)); + + return 0; +} + +/** + * gmc_v8_0_gart_enable - gart enable + * + * @adev: amdgpu_device pointer + * + * This sets up the TLBs, programs the page tables for VMID0, + * sets up the hw for VMIDs 1-15 which are allocated on + * demand, and sets up the global locations for the LDS, GDS, + * and GPUVM for FSA64 clients (CIK). + * Returns 0 for success, errors for failure. + */ +static int gmc_v8_0_gart_enable(struct amdgpu_device *adev) +{ + int r, i; + u32 tmp; + + if (adev->gart.robj == NULL) { + dev_err(adev->dev, "No VRAM object for PCIE GART.\n"); + return -EINVAL; + } + r = amdgpu_gart_table_vram_pin(adev); + if (r) + return r; + /* Setup TLB control */ + tmp = RREG32(mmMC_VM_MX_L1_TLB_CNTL); + tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB, 1); + tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_FRAGMENT_PROCESSING, 1); + tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, SYSTEM_ACCESS_MODE, 3); + tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_ADVANCED_DRIVER_MODEL, 1); + tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, SYSTEM_APERTURE_UNMAPPED_ACCESS, 0); + WREG32(mmMC_VM_MX_L1_TLB_CNTL, tmp); + /* Setup L2 cache */ + tmp = RREG32(mmVM_L2_CNTL); + tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_CACHE, 1); + tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_FRAGMENT_PROCESSING, 1); + tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE, 1); + tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE, 1); + tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, EFFECTIVE_L2_QUEUE_SIZE, 7); + tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, CONTEXT1_IDENTITY_ACCESS_MODE, 1); + WREG32(mmVM_L2_CNTL, tmp); + tmp = RREG32(mmVM_L2_CNTL2); + tmp = REG_SET_FIELD(tmp, VM_L2_CNTL2, INVALIDATE_ALL_L1_TLBS, 1); + tmp = REG_SET_FIELD(tmp, VM_L2_CNTL2, INVALIDATE_L2_CACHE, 1); + WREG32(mmVM_L2_CNTL2, tmp); + tmp = RREG32(mmVM_L2_CNTL3); + tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, L2_CACHE_BIGK_ASSOCIATIVITY, 1); + tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, BANK_SELECT, 4); + tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, L2_CACHE_BIGK_FRAGMENT_SIZE, 4); + WREG32(mmVM_L2_CNTL3, tmp); + /* XXX: set to enable PTE/PDE in system memory */ + tmp = RREG32(mmVM_L2_CNTL4); + tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT0_PDE_REQUEST_PHYSICAL, 0); + tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT0_PDE_REQUEST_SHARED, 0); + tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT0_PDE_REQUEST_SNOOP, 0); + tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT0_PTE_REQUEST_PHYSICAL, 0); + tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT0_PTE_REQUEST_SHARED, 0); + tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT0_PTE_REQUEST_SNOOP, 0); + tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT1_PDE_REQUEST_PHYSICAL, 0); + tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT1_PDE_REQUEST_SHARED, 0); + tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT1_PDE_REQUEST_SNOOP, 0); + tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT1_PTE_REQUEST_PHYSICAL, 0); + tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT1_PTE_REQUEST_SHARED, 0); + tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT1_PTE_REQUEST_SNOOP, 0); + WREG32(mmVM_L2_CNTL4, tmp); + /* setup context0 */ + WREG32(mmVM_CONTEXT0_PAGE_TABLE_START_ADDR, adev->mc.gtt_start >> 12); + WREG32(mmVM_CONTEXT0_PAGE_TABLE_END_ADDR, adev->mc.gtt_end >> 12); + WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR, adev->gart.table_addr >> 12); + WREG32(mmVM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR, + (u32)(adev->dummy_page.addr >> 12)); + WREG32(mmVM_CONTEXT0_CNTL2, 0); + tmp = RREG32(mmVM_CONTEXT0_CNTL); + tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, ENABLE_CONTEXT, 1); + tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, PAGE_TABLE_DEPTH, 0); + tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, 1); + WREG32(mmVM_CONTEXT0_CNTL, tmp); + + WREG32(mmVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR, 0); + WREG32(mmVM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR, 0); + WREG32(mmVM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET, 0); + + /* empty context1-15 */ + /* FIXME start with 4G, once using 2 level pt switch to full + * vm size space + */ + /* set vm size, must be a multiple of 4 */ + WREG32(mmVM_CONTEXT1_PAGE_TABLE_START_ADDR, 0); + WREG32(mmVM_CONTEXT1_PAGE_TABLE_END_ADDR, adev->vm_manager.max_pfn); + for (i = 1; i < 16; i++) { + if (i < 8) + WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + i, + adev->gart.table_addr >> 12); + else + WREG32(mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + i - 8, + adev->gart.table_addr >> 12); + } + + /* enable context1-15 */ + WREG32(mmVM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR, + (u32)(adev->dummy_page.addr >> 12)); + WREG32(mmVM_CONTEXT1_CNTL2, 4); + tmp = RREG32(mmVM_CONTEXT1_CNTL); + tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, ENABLE_CONTEXT, 1); + tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, PAGE_TABLE_DEPTH, 1); + tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT, 1); + tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, 1); + tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT, 1); + tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT, 1); + tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT, 1); + tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, PDE0_PROTECTION_FAULT_ENABLE_DEFAULT, 1); + tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, VALID_PROTECTION_FAULT_ENABLE_INTERRUPT, 1); + tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, VALID_PROTECTION_FAULT_ENABLE_DEFAULT, 1); + tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, READ_PROTECTION_FAULT_ENABLE_INTERRUPT, 1); + tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, READ_PROTECTION_FAULT_ENABLE_DEFAULT, 1); + tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT, 1); + tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, 1); + tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT, 1); + tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT, 1); + tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, PAGE_TABLE_BLOCK_SIZE, + amdgpu_vm_block_size - 9); + WREG32(mmVM_CONTEXT1_CNTL, tmp); + + gmc_v8_0_gart_flush_gpu_tlb(adev, 0); + DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n", + (unsigned)(adev->mc.gtt_size >> 20), + (unsigned long long)adev->gart.table_addr); + adev->gart.ready = true; + return 0; +} + +static int gmc_v8_0_gart_init(struct amdgpu_device *adev) +{ + int r; + + if (adev->gart.robj) { + WARN(1, "R600 PCIE GART already initialized\n"); + return 0; + } + /* Initialize common gart structure */ + r = amdgpu_gart_init(adev); + if (r) + return r; + adev->gart.table_size = adev->gart.num_gpu_pages * 8; + return amdgpu_gart_table_vram_alloc(adev); +} + +/** + * gmc_v8_0_gart_disable - gart disable + * + * @adev: amdgpu_device pointer + * + * This disables all VM page table (CIK). + */ +static void gmc_v8_0_gart_disable(struct amdgpu_device *adev) +{ + u32 tmp; + + /* Disable all tables */ + WREG32(mmVM_CONTEXT0_CNTL, 0); + WREG32(mmVM_CONTEXT1_CNTL, 0); + /* Setup TLB control */ + tmp = RREG32(mmMC_VM_MX_L1_TLB_CNTL); + tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB, 0); + tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_FRAGMENT_PROCESSING, 0); + tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_ADVANCED_DRIVER_MODEL, 0); + WREG32(mmMC_VM_MX_L1_TLB_CNTL, tmp); + /* Setup L2 cache */ + tmp = RREG32(mmVM_L2_CNTL); + tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_CACHE, 0); + WREG32(mmVM_L2_CNTL, tmp); + WREG32(mmVM_L2_CNTL2, 0); + amdgpu_gart_table_vram_unpin(adev); +} + +/** + * gmc_v8_0_gart_fini - vm fini callback + * + * @adev: amdgpu_device pointer + * + * Tears down the driver GART/VM setup (CIK). + */ +static void gmc_v8_0_gart_fini(struct amdgpu_device *adev) +{ + amdgpu_gart_table_vram_free(adev); + amdgpu_gart_fini(adev); +} + +/* + * vm + * VMID 0 is the physical GPU addresses as used by the kernel. + * VMIDs 1-15 are used for userspace clients and are handled + * by the amdgpu vm/hsa code. + */ +/** + * gmc_v8_0_vm_init - cik vm init callback + * + * @adev: amdgpu_device pointer + * + * Inits cik specific vm parameters (number of VMs, base of vram for + * VMIDs 1-15) (CIK). + * Returns 0 for success. + */ +static int gmc_v8_0_vm_init(struct amdgpu_device *adev) +{ + /* + * number of VMs + * VMID 0 is reserved for System + * amdgpu graphics/compute will use VMIDs 1-7 + * amdkfd will use VMIDs 8-15 + */ + adev->vm_manager.nvm = AMDGPU_NUM_OF_VMIDS; + + /* base offset of vram pages */ + if (adev->flags & AMDGPU_IS_APU) { + u64 tmp = RREG32(mmMC_VM_FB_OFFSET); + tmp <<= 22; + adev->vm_manager.vram_base_offset = tmp; + } else + adev->vm_manager.vram_base_offset = 0; + + return 0; +} + +/** + * gmc_v8_0_vm_fini - cik vm fini callback + * + * @adev: amdgpu_device pointer + * + * Tear down any asic specific VM setup (CIK). + */ +static void gmc_v8_0_vm_fini(struct amdgpu_device *adev) +{ +} + +/** + * gmc_v8_0_vm_decode_fault - print human readable fault info + * + * @adev: amdgpu_device pointer + * @status: VM_CONTEXT1_PROTECTION_FAULT_STATUS register value + * @addr: VM_CONTEXT1_PROTECTION_FAULT_ADDR register value + * + * Print human readable fault information (CIK). + */ +static void gmc_v8_0_vm_decode_fault(struct amdgpu_device *adev, + u32 status, u32 addr, u32 mc_client) +{ + u32 mc_id; + u32 vmid = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS, VMID); + u32 protections = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS, + PROTECTIONS); + char block[5] = { mc_client >> 24, (mc_client >> 16) & 0xff, + (mc_client >> 8) & 0xff, mc_client & 0xff, 0 }; + + mc_id = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS, + MEMORY_CLIENT_ID); + + printk("VM fault (0x%02x, vmid %d) at page %u, %s from '%s' (0x%08x) (%d)\n", + protections, vmid, addr, + REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS, + MEMORY_CLIENT_RW) ? + "write" : "read", block, mc_client, mc_id); +} + +static int gmc_v8_0_early_init(struct amdgpu_device *adev) +{ + gmc_v8_0_set_gart_funcs(adev); + gmc_v8_0_set_irq_funcs(adev); + + if (adev->flags & AMDGPU_IS_APU) { + adev->mc.is_gddr5 = false; + } else { + u32 tmp = RREG32(mmMC_SEQ_MISC0); + + if (((tmp & MC_SEQ_MISC0__GDDR5_MASK) >> + MC_SEQ_MISC0__GDDR5__SHIFT) == MC_SEQ_MISC0__GDDR5_VALUE) + adev->mc.is_gddr5 = true; + else + adev->mc.is_gddr5 = false; + } + + return 0; +} + +static int gmc_v8_0_sw_init(struct amdgpu_device *adev) +{ + int r; + int dma_bits; + + r = amdgpu_gem_init(adev); + if (r) + return r; + + r = amdgpu_irq_add_id(adev, 146, &adev->mc.vm_fault); + if (r) + return r; + + r = amdgpu_irq_add_id(adev, 147, &adev->mc.vm_fault); + if (r) + return r; + + /* Adjust VM size here. + * Currently set to 4GB ((1 << 20) 4k pages). + * Max GPUVM size for cayman and SI is 40 bits. + */ + adev->vm_manager.max_pfn = amdgpu_vm_size << 18; + + /* Set the internal MC address mask + * This is the max address of the GPU's + * internal address space. + */ + adev->mc.mc_mask = 0xffffffffffULL; /* 40 bit MC */ + + /* set DMA mask + need_dma32 flags. + * PCIE - can handle 40-bits. + * IGP - can handle 40-bits + * PCI - dma32 for legacy pci gart, 40 bits on newer asics + */ + adev->need_dma32 = false; + dma_bits = adev->need_dma32 ? 32 : 40; + r = pci_set_dma_mask(adev->pdev, DMA_BIT_MASK(dma_bits)); + if (r) { + adev->need_dma32 = true; + dma_bits = 32; + printk(KERN_WARNING "amdgpu: No suitable DMA available.\n"); + } + r = pci_set_consistent_dma_mask(adev->pdev, DMA_BIT_MASK(dma_bits)); + if (r) { + pci_set_consistent_dma_mask(adev->pdev, DMA_BIT_MASK(32)); + printk(KERN_WARNING "amdgpu: No coherent DMA available.\n"); + } + + r = gmc_v8_0_init_microcode(adev); + if (r) { + DRM_ERROR("Failed to load mc firmware!\n"); + return r; + } + + r = gmc_v8_0_mc_init(adev); + if (r) + return r; + + /* Memory manager */ + r = amdgpu_bo_init(adev); + if (r) + return r; + + r = gmc_v8_0_gart_init(adev); + if (r) + return r; + + if (!adev->vm_manager.enabled) { + r = gmc_v8_0_vm_init(adev); + if (r) { + dev_err(adev->dev, "vm manager initialization failed (%d).\n", r); + return r; + } + adev->vm_manager.enabled = true; + } + + return r; +} + +static int gmc_v8_0_sw_fini(struct amdgpu_device *adev) +{ + int i; + + if (adev->vm_manager.enabled) { + for (i = 0; i < AMDGPU_NUM_VM; ++i) + amdgpu_fence_unref(&adev->vm_manager.active[i]); + gmc_v8_0_vm_fini(adev); + adev->vm_manager.enabled = false; + } + gmc_v8_0_gart_fini(adev); + amdgpu_gem_fini(adev); + amdgpu_bo_fini(adev); + + return 0; +} + +static int gmc_v8_0_hw_init(struct amdgpu_device *adev) +{ + int r; + + gmc_v8_0_init_golden_registers(adev); + + gmc_v8_0_mc_program(adev); + + if (!(adev->flags & AMDGPU_IS_APU)) { + r = gmc_v8_0_mc_load_microcode(adev); + if (r) { + DRM_ERROR("Failed to load MC firmware!\n"); + return r; + } + } + + r = gmc_v8_0_gart_enable(adev); + if (r) + return r; + + return r; +} + +static int gmc_v8_0_hw_fini(struct amdgpu_device *adev) +{ + gmc_v8_0_gart_disable(adev); + + return 0; +} + +static int gmc_v8_0_suspend(struct amdgpu_device *adev) +{ + int i; + + if (adev->vm_manager.enabled) { + for (i = 0; i < AMDGPU_NUM_VM; ++i) + amdgpu_fence_unref(&adev->vm_manager.active[i]); + gmc_v8_0_vm_fini(adev); + adev->vm_manager.enabled = false; + } + gmc_v8_0_hw_fini(adev); + + return 0; +} + +static int gmc_v8_0_resume(struct amdgpu_device *adev) +{ + int r; + + r = gmc_v8_0_hw_init(adev); + if (r) + return r; + + if (!adev->vm_manager.enabled) { + r = gmc_v8_0_vm_init(adev); + if (r) { + dev_err(adev->dev, "vm manager initialization failed (%d).\n", r); + return r; + } + adev->vm_manager.enabled = true; + } + + return r; +} + +static bool gmc_v8_0_is_idle(struct amdgpu_device *adev) +{ + u32 tmp = RREG32(mmSRBM_STATUS); + + if (tmp & (SRBM_STATUS__MCB_BUSY_MASK | SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK | + SRBM_STATUS__MCC_BUSY_MASK | SRBM_STATUS__MCD_BUSY_MASK | SRBM_STATUS__VMC_BUSY_MASK)) + return false; + + return true; +} + +static int gmc_v8_0_wait_for_idle(struct amdgpu_device *adev) +{ + unsigned i; + u32 tmp; + + for (i = 0; i < adev->usec_timeout; i++) { + /* read MC_STATUS */ + tmp = RREG32(mmSRBM_STATUS) & (SRBM_STATUS__MCB_BUSY_MASK | + SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK | + SRBM_STATUS__MCC_BUSY_MASK | + SRBM_STATUS__MCD_BUSY_MASK | + SRBM_STATUS__VMC_BUSY_MASK | + SRBM_STATUS__VMC1_BUSY_MASK); + if (!tmp) + return 0; + udelay(1); + } + return -ETIMEDOUT; + +} + +static void gmc_v8_0_print_status(struct amdgpu_device *adev) +{ + int i, j; + + dev_info(adev->dev, "GMC 8.x registers\n"); + dev_info(adev->dev, " SRBM_STATUS=0x%08X\n", + RREG32(mmSRBM_STATUS)); + dev_info(adev->dev, " SRBM_STATUS2=0x%08X\n", + RREG32(mmSRBM_STATUS2)); + + dev_info(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n", + RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_ADDR)); + dev_info(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n", + RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_STATUS)); + dev_info(adev->dev, " MC_VM_MX_L1_TLB_CNTL=0x%08X\n", + RREG32(mmMC_VM_MX_L1_TLB_CNTL)); + dev_info(adev->dev, " VM_L2_CNTL=0x%08X\n", + RREG32(mmVM_L2_CNTL)); + dev_info(adev->dev, " VM_L2_CNTL2=0x%08X\n", + RREG32(mmVM_L2_CNTL2)); + dev_info(adev->dev, " VM_L2_CNTL3=0x%08X\n", + RREG32(mmVM_L2_CNTL3)); + dev_info(adev->dev, " VM_L2_CNTL4=0x%08X\n", + RREG32(mmVM_L2_CNTL4)); + dev_info(adev->dev, " VM_CONTEXT0_PAGE_TABLE_START_ADDR=0x%08X\n", + RREG32(mmVM_CONTEXT0_PAGE_TABLE_START_ADDR)); + dev_info(adev->dev, " VM_CONTEXT0_PAGE_TABLE_END_ADDR=0x%08X\n", + RREG32(mmVM_CONTEXT0_PAGE_TABLE_END_ADDR)); + dev_info(adev->dev, " VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR=0x%08X\n", + RREG32(mmVM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR)); + dev_info(adev->dev, " VM_CONTEXT0_CNTL2=0x%08X\n", + RREG32(mmVM_CONTEXT0_CNTL2)); + dev_info(adev->dev, " VM_CONTEXT0_CNTL=0x%08X\n", + RREG32(mmVM_CONTEXT0_CNTL)); + dev_info(adev->dev, " VM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR=0x%08X\n", + RREG32(mmVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR)); + dev_info(adev->dev, " VM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR=0x%08X\n", + RREG32(mmVM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR)); + dev_info(adev->dev, " mmVM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET=0x%08X\n", + RREG32(mmVM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET)); + dev_info(adev->dev, " VM_CONTEXT1_PAGE_TABLE_START_ADDR=0x%08X\n", + RREG32(mmVM_CONTEXT1_PAGE_TABLE_START_ADDR)); + dev_info(adev->dev, " VM_CONTEXT1_PAGE_TABLE_END_ADDR=0x%08X\n", + RREG32(mmVM_CONTEXT1_PAGE_TABLE_END_ADDR)); + dev_info(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR=0x%08X\n", + RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR)); + dev_info(adev->dev, " VM_CONTEXT1_CNTL2=0x%08X\n", + RREG32(mmVM_CONTEXT1_CNTL2)); + dev_info(adev->dev, " VM_CONTEXT1_CNTL=0x%08X\n", + RREG32(mmVM_CONTEXT1_CNTL)); + for (i = 0; i < 16; i++) { + if (i < 8) + dev_info(adev->dev, " VM_CONTEXT%d_PAGE_TABLE_BASE_ADDR=0x%08X\n", + i, RREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + i)); + else + dev_info(adev->dev, " VM_CONTEXT%d_PAGE_TABLE_BASE_ADDR=0x%08X\n", + i, RREG32(mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + i - 8)); + } + dev_info(adev->dev, " MC_VM_SYSTEM_APERTURE_LOW_ADDR=0x%08X\n", + RREG32(mmMC_VM_SYSTEM_APERTURE_LOW_ADDR)); + dev_info(adev->dev, " MC_VM_SYSTEM_APERTURE_HIGH_ADDR=0x%08X\n", + RREG32(mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR)); + dev_info(adev->dev, " MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR=0x%08X\n", + RREG32(mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR)); + dev_info(adev->dev, " MC_VM_FB_LOCATION=0x%08X\n", + RREG32(mmMC_VM_FB_LOCATION)); + dev_info(adev->dev, " MC_VM_AGP_BASE=0x%08X\n", + RREG32(mmMC_VM_AGP_BASE)); + dev_info(adev->dev, " MC_VM_AGP_TOP=0x%08X\n", + RREG32(mmMC_VM_AGP_TOP)); + dev_info(adev->dev, " MC_VM_AGP_BOT=0x%08X\n", + RREG32(mmMC_VM_AGP_BOT)); + + dev_info(adev->dev, " HDP_REG_COHERENCY_FLUSH_CNTL=0x%08X\n", + RREG32(mmHDP_REG_COHERENCY_FLUSH_CNTL)); + dev_info(adev->dev, " HDP_NONSURFACE_BASE=0x%08X\n", + RREG32(mmHDP_NONSURFACE_BASE)); + dev_info(adev->dev, " HDP_NONSURFACE_INFO=0x%08X\n", + RREG32(mmHDP_NONSURFACE_INFO)); + dev_info(adev->dev, " HDP_NONSURFACE_SIZE=0x%08X\n", + RREG32(mmHDP_NONSURFACE_SIZE)); + dev_info(adev->dev, " HDP_MISC_CNTL=0x%08X\n", + RREG32(mmHDP_MISC_CNTL)); + dev_info(adev->dev, " HDP_HOST_PATH_CNTL=0x%08X\n", + RREG32(mmHDP_HOST_PATH_CNTL)); + + for (i = 0, j = 0; i < 32; i++, j += 0x6) { + dev_info(adev->dev, " %d:\n", i); + dev_info(adev->dev, " 0x%04X=0x%08X\n", + 0xb05 + j, RREG32(0xb05 + j)); + dev_info(adev->dev, " 0x%04X=0x%08X\n", + 0xb06 + j, RREG32(0xb06 + j)); + dev_info(adev->dev, " 0x%04X=0x%08X\n", + 0xb07 + j, RREG32(0xb07 + j)); + dev_info(adev->dev, " 0x%04X=0x%08X\n", + 0xb08 + j, RREG32(0xb08 + j)); + dev_info(adev->dev, " 0x%04X=0x%08X\n", + 0xb09 + j, RREG32(0xb09 + j)); + } + + dev_info(adev->dev, " BIF_FB_EN=0x%08X\n", + RREG32(mmBIF_FB_EN)); +} + +static int gmc_v8_0_soft_reset(struct amdgpu_device *adev) +{ + struct amdgpu_mode_mc_save save; + u32 srbm_soft_reset = 0; + u32 tmp = RREG32(mmSRBM_STATUS); + + if (tmp & SRBM_STATUS__VMC_BUSY_MASK) + srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, + SRBM_SOFT_RESET, SOFT_RESET_VMC, 1); + + if (tmp & (SRBM_STATUS__MCB_BUSY_MASK | SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK | + SRBM_STATUS__MCC_BUSY_MASK | SRBM_STATUS__MCD_BUSY_MASK)) { + if (!(adev->flags & AMDGPU_IS_APU)) + srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, + SRBM_SOFT_RESET, SOFT_RESET_MC, 1); + } + + if (srbm_soft_reset) { + gmc_v8_0_print_status(adev); + + gmc_v8_0_mc_stop(adev, &save); + if (gmc_v8_0_wait_for_idle(adev)) { + dev_warn(adev->dev, "Wait for GMC idle timed out !\n"); + } + + + tmp = RREG32(mmSRBM_SOFT_RESET); + tmp |= srbm_soft_reset; + dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp); + WREG32(mmSRBM_SOFT_RESET, tmp); + tmp = RREG32(mmSRBM_SOFT_RESET); + + udelay(50); + + tmp &= ~srbm_soft_reset; + WREG32(mmSRBM_SOFT_RESET, tmp); + tmp = RREG32(mmSRBM_SOFT_RESET); + + /* Wait a little for things to settle down */ + udelay(50); + + gmc_v8_0_mc_resume(adev, &save); + udelay(50); + + gmc_v8_0_print_status(adev); + } + + return 0; +} + +static int gmc_v8_0_vm_fault_interrupt_state(struct amdgpu_device *adev, + struct amdgpu_irq_src *src, + unsigned type, + enum amdgpu_interrupt_state state) +{ + u32 tmp; + u32 bits = (VM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | + VM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | + VM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | + VM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | + VM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | + VM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | + VM_CONTEXT1_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK); + + switch (state) { + case AMDGPU_IRQ_STATE_DISABLE: + /* system context */ + tmp = RREG32(mmVM_CONTEXT0_CNTL); + tmp &= ~bits; + WREG32(mmVM_CONTEXT0_CNTL, tmp); + /* VMs */ + tmp = RREG32(mmVM_CONTEXT1_CNTL); + tmp &= ~bits; + WREG32(mmVM_CONTEXT1_CNTL, tmp); + break; + case AMDGPU_IRQ_STATE_ENABLE: + /* system context */ + tmp = RREG32(mmVM_CONTEXT0_CNTL); + tmp |= bits; + WREG32(mmVM_CONTEXT0_CNTL, tmp); + /* VMs */ + tmp = RREG32(mmVM_CONTEXT1_CNTL); + tmp |= bits; + WREG32(mmVM_CONTEXT1_CNTL, tmp); + break; + default: + break; + } + + return 0; +} + +static int gmc_v8_0_process_interrupt(struct amdgpu_device *adev, + struct amdgpu_irq_src *source, + struct amdgpu_iv_entry *entry) +{ + u32 addr, status, mc_client; + + addr = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_ADDR); + status = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_STATUS); + mc_client = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_MCCLIENT); + dev_err(adev->dev, "GPU fault detected: %d 0x%08x\n", + entry->src_id, entry->src_data); + dev_err(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n", + addr); + dev_err(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n", + status); + gmc_v8_0_vm_decode_fault(adev, status, addr, mc_client); + /* reset addr and status */ + WREG32_P(mmVM_CONTEXT1_CNTL2, 1, ~1); + + return 0; +} + +static int gmc_v8_0_set_clockgating_state(struct amdgpu_device *adev, + enum amdgpu_clockgating_state state) +{ + /* XXX handled via the smc on VI */ + + return 0; +} + +static int gmc_v8_0_set_powergating_state(struct amdgpu_device *adev, + enum amdgpu_powergating_state state) +{ + return 0; +} + +const struct amdgpu_ip_funcs gmc_v8_0_ip_funcs = { + .early_init = gmc_v8_0_early_init, + .late_init = NULL, + .sw_init = gmc_v8_0_sw_init, + .sw_fini = gmc_v8_0_sw_fini, + .hw_init = gmc_v8_0_hw_init, + .hw_fini = gmc_v8_0_hw_fini, + .suspend = gmc_v8_0_suspend, + .resume = gmc_v8_0_resume, + .is_idle = gmc_v8_0_is_idle, + .wait_for_idle = gmc_v8_0_wait_for_idle, + .soft_reset = gmc_v8_0_soft_reset, + .print_status = gmc_v8_0_print_status, + .set_clockgating_state = gmc_v8_0_set_clockgating_state, + .set_powergating_state = gmc_v8_0_set_powergating_state, +}; + +static const struct amdgpu_gart_funcs gmc_v8_0_gart_funcs = { + .flush_gpu_tlb = gmc_v8_0_gart_flush_gpu_tlb, + .set_pte_pde = gmc_v8_0_gart_set_pte_pde, +}; + +static const struct amdgpu_irq_src_funcs gmc_v8_0_irq_funcs = { + .set = gmc_v8_0_vm_fault_interrupt_state, + .process = gmc_v8_0_process_interrupt, +}; + +static void gmc_v8_0_set_gart_funcs(struct amdgpu_device *adev) +{ + if (adev->gart.gart_funcs == NULL) + adev->gart.gart_funcs = &gmc_v8_0_gart_funcs; +} + +static void gmc_v8_0_set_irq_funcs(struct amdgpu_device *adev) +{ + adev->mc.vm_fault.num_types = 1; + adev->mc.vm_fault.funcs = &gmc_v8_0_irq_funcs; +} diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.h b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.h new file mode 100644 index 000000000000..2dd7f809d4e1 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.h @@ -0,0 +1,36 @@ +/* + * Copyright 2014 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef __GMC_V8_0_H__ +#define __GMC_V8_0_H__ + +extern const struct amdgpu_ip_funcs gmc_v8_0_ip_funcs; + +/* XXX these shouldn't be exported */ +void gmc_v8_0_mc_stop(struct amdgpu_device *adev, + struct amdgpu_mode_mc_save *save); +void gmc_v8_0_mc_resume(struct amdgpu_device *adev, + struct amdgpu_mode_mc_save *save); +int gmc_v8_0_mc_wait_for_idle(struct amdgpu_device *adev); + +#endif diff --git a/drivers/gpu/drm/amd/amdgpu/iceland_dpm.c b/drivers/gpu/drm/amd/amdgpu/iceland_dpm.c new file mode 100644 index 000000000000..8f5c54be70b0 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/iceland_dpm.c @@ -0,0 +1,172 @@ +/* + * Copyright 2014 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#include +#include "drmP.h" +#include "amdgpu.h" +#include "iceland_smumgr.h" + +MODULE_FIRMWARE("radeon/topaz_smc.bin"); + +static void iceland_dpm_set_funcs(struct amdgpu_device *adev); + +static int iceland_dpm_early_init(struct amdgpu_device *adev) +{ + iceland_dpm_set_funcs(adev); + + return 0; +} + +static int iceland_dpm_init_microcode(struct amdgpu_device *adev) +{ + char fw_name[30] = "radeon/topaz_smc.bin"; + int err; + + err = request_firmware(&adev->pm.fw, fw_name, adev->dev); + if (err) + goto out; + err = amdgpu_ucode_validate(adev->pm.fw); + +out: + if (err) { + DRM_ERROR("Failed to load firmware \"%s\"", fw_name); + release_firmware(adev->pm.fw); + adev->pm.fw = NULL; + } + return err; +} + +static int iceland_dpm_sw_init(struct amdgpu_device *adev) +{ + int ret; + + ret = iceland_dpm_init_microcode(adev); + if (ret) + return ret; + + return 0; +} + +static int iceland_dpm_sw_fini(struct amdgpu_device *adev) +{ + return 0; +} + +static int iceland_dpm_hw_init(struct amdgpu_device *adev) +{ + int ret; + + mutex_lock(&adev->pm.mutex); + + ret = iceland_smu_init(adev); + if (ret) { + DRM_ERROR("SMU initialization failed\n"); + goto fail; + } + + ret = iceland_smu_start(adev); + if (ret) { + DRM_ERROR("SMU start failed\n"); + goto fail; + } + + mutex_unlock(&adev->pm.mutex); + return 0; + +fail: + adev->firmware.smu_load = false; + mutex_unlock(&adev->pm.mutex); + return -EINVAL; +} + +static int iceland_dpm_hw_fini(struct amdgpu_device *adev) +{ + mutex_lock(&adev->pm.mutex); + iceland_smu_fini(adev); + mutex_unlock(&adev->pm.mutex); + return 0; +} + +static int iceland_dpm_suspend(struct amdgpu_device *adev) +{ + iceland_dpm_hw_fini(adev); + + return 0; +} + +static int iceland_dpm_resume(struct amdgpu_device *adev) +{ + iceland_dpm_hw_init(adev); + + return 0; +} + +static int iceland_dpm_set_clockgating_state(struct amdgpu_device *adev, + enum amdgpu_clockgating_state state) +{ + return 0; +} + +static int iceland_dpm_set_powergating_state(struct amdgpu_device *adev, + enum amdgpu_powergating_state state) +{ + return 0; +} + +const struct amdgpu_ip_funcs iceland_dpm_ip_funcs = { + .early_init = iceland_dpm_early_init, + .late_init = NULL, + .sw_init = iceland_dpm_sw_init, + .sw_fini = iceland_dpm_sw_fini, + .hw_init = iceland_dpm_hw_init, + .hw_fini = iceland_dpm_hw_fini, + .suspend = iceland_dpm_suspend, + .resume = iceland_dpm_resume, + .is_idle = NULL, + .wait_for_idle = NULL, + .soft_reset = NULL, + .print_status = NULL, + .set_clockgating_state = iceland_dpm_set_clockgating_state, + .set_powergating_state = iceland_dpm_set_powergating_state, +}; + +static const struct amdgpu_dpm_funcs iceland_dpm_funcs = { + .get_temperature = NULL, + .pre_set_power_state = NULL, + .set_power_state = NULL, + .post_set_power_state = NULL, + .display_configuration_changed = NULL, + .get_sclk = NULL, + .get_mclk = NULL, + .print_power_state = NULL, + .debugfs_print_current_performance_level = NULL, + .force_performance_level = NULL, + .vblank_too_short = NULL, + .powergate_uvd = NULL, +}; + +static void iceland_dpm_set_funcs(struct amdgpu_device *adev) +{ + if (NULL == adev->pm.funcs) + adev->pm.funcs = &iceland_dpm_funcs; +} diff --git a/drivers/gpu/drm/amd/amdgpu/iceland_ih.c b/drivers/gpu/drm/amd/amdgpu/iceland_ih.c new file mode 100644 index 000000000000..2de8adfac471 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/iceland_ih.c @@ -0,0 +1,435 @@ +/* + * Copyright 2014 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#include "drmP.h" +#include "amdgpu.h" +#include "amdgpu_ih.h" +#include "vid.h" + +#include "oss/oss_2_4_d.h" +#include "oss/oss_2_4_sh_mask.h" + +#include "bif/bif_5_1_d.h" +#include "bif/bif_5_1_sh_mask.h" + +/* + * Interrupts + * Starting with r6xx, interrupts are handled via a ring buffer. + * Ring buffers are areas of GPU accessible memory that the GPU + * writes interrupt vectors into and the host reads vectors out of. + * There is a rptr (read pointer) that determines where the + * host is currently reading, and a wptr (write pointer) + * which determines where the GPU has written. When the + * pointers are equal, the ring is idle. When the GPU + * writes vectors to the ring buffer, it increments the + * wptr. When there is an interrupt, the host then starts + * fetching commands and processing them until the pointers are + * equal again at which point it updates the rptr. + */ + +static void iceland_ih_set_interrupt_funcs(struct amdgpu_device *adev); + +/** + * iceland_ih_enable_interrupts - Enable the interrupt ring buffer + * + * @adev: amdgpu_device pointer + * + * Enable the interrupt ring buffer (VI). + */ +static void iceland_ih_enable_interrupts(struct amdgpu_device *adev) +{ + u32 ih_cntl = RREG32(mmIH_CNTL); + u32 ih_rb_cntl = RREG32(mmIH_RB_CNTL); + + ih_cntl = REG_SET_FIELD(ih_cntl, IH_CNTL, ENABLE_INTR, 1); + ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, RB_ENABLE, 1); + WREG32(mmIH_CNTL, ih_cntl); + WREG32(mmIH_RB_CNTL, ih_rb_cntl); + adev->irq.ih.enabled = true; +} + +/** + * iceland_ih_disable_interrupts - Disable the interrupt ring buffer + * + * @adev: amdgpu_device pointer + * + * Disable the interrupt ring buffer (VI). + */ +static void iceland_ih_disable_interrupts(struct amdgpu_device *adev) +{ + u32 ih_rb_cntl = RREG32(mmIH_RB_CNTL); + u32 ih_cntl = RREG32(mmIH_CNTL); + + ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, RB_ENABLE, 0); + ih_cntl = REG_SET_FIELD(ih_cntl, IH_CNTL, ENABLE_INTR, 0); + WREG32(mmIH_RB_CNTL, ih_rb_cntl); + WREG32(mmIH_CNTL, ih_cntl); + /* set rptr, wptr to 0 */ + WREG32(mmIH_RB_RPTR, 0); + WREG32(mmIH_RB_WPTR, 0); + adev->irq.ih.enabled = false; + adev->irq.ih.rptr = 0; +} + +/** + * iceland_ih_irq_init - init and enable the interrupt ring + * + * @adev: amdgpu_device pointer + * + * Allocate a ring buffer for the interrupt controller, + * enable the RLC, disable interrupts, enable the IH + * ring buffer and enable it (VI). + * Called at device load and reume. + * Returns 0 for success, errors for failure. + */ +static int iceland_ih_irq_init(struct amdgpu_device *adev) +{ + int ret = 0; + int rb_bufsz; + u32 interrupt_cntl, ih_cntl, ih_rb_cntl; + u64 wptr_off; + + /* disable irqs */ + iceland_ih_disable_interrupts(adev); + + /* setup interrupt control */ + WREG32(mmINTERRUPT_CNTL2, adev->dummy_page.addr >> 8); + interrupt_cntl = RREG32(mmINTERRUPT_CNTL); + /* INTERRUPT_CNTL__IH_DUMMY_RD_OVERRIDE_MASK=0 - dummy read disabled with msi, enabled without msi + * INTERRUPT_CNTL__IH_DUMMY_RD_OVERRIDE_MASK=1 - dummy read controlled by IH_DUMMY_RD_EN + */ + interrupt_cntl = REG_SET_FIELD(interrupt_cntl, INTERRUPT_CNTL, IH_DUMMY_RD_OVERRIDE, 0); + /* INTERRUPT_CNTL__IH_REQ_NONSNOOP_EN_MASK=1 if ring is in non-cacheable memory, e.g., vram */ + interrupt_cntl = REG_SET_FIELD(interrupt_cntl, INTERRUPT_CNTL, IH_REQ_NONSNOOP_EN, 0); + WREG32(mmINTERRUPT_CNTL, interrupt_cntl); + + /* Ring Buffer base. [39:8] of 40-bit address of the beginning of the ring buffer*/ + WREG32(mmIH_RB_BASE, adev->irq.ih.gpu_addr >> 8); + + rb_bufsz = order_base_2(adev->irq.ih.ring_size / 4); + ih_rb_cntl = REG_SET_FIELD(0, IH_RB_CNTL, WPTR_OVERFLOW_ENABLE, 1); + ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 1); + ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, RB_SIZE, rb_bufsz); + + /* Ring Buffer write pointer writeback. If enabled, IH_RB_WPTR register value is written to memory */ + ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, WPTR_WRITEBACK_ENABLE, 1); + + /* set the writeback address whether it's enabled or not */ + wptr_off = adev->wb.gpu_addr + (adev->irq.ih.wptr_offs * 4); + WREG32(mmIH_RB_WPTR_ADDR_LO, lower_32_bits(wptr_off)); + WREG32(mmIH_RB_WPTR_ADDR_HI, upper_32_bits(wptr_off) & 0xFF); + + WREG32(mmIH_RB_CNTL, ih_rb_cntl); + + /* set rptr, wptr to 0 */ + WREG32(mmIH_RB_RPTR, 0); + WREG32(mmIH_RB_WPTR, 0); + + /* Default settings for IH_CNTL (disabled at first) */ + ih_cntl = RREG32(mmIH_CNTL); + ih_cntl = REG_SET_FIELD(ih_cntl, IH_CNTL, MC_VMID, 0); + + if (adev->irq.msi_enabled) + ih_cntl = REG_SET_FIELD(ih_cntl, IH_CNTL, RPTR_REARM, 1); + WREG32(mmIH_CNTL, ih_cntl); + + pci_set_master(adev->pdev); + + /* enable interrupts */ + iceland_ih_enable_interrupts(adev); + + return ret; +} + +/** + * iceland_ih_irq_disable - disable interrupts + * + * @adev: amdgpu_device pointer + * + * Disable interrupts on the hw (VI). + */ +static void iceland_ih_irq_disable(struct amdgpu_device *adev) +{ + iceland_ih_disable_interrupts(adev); + + /* Wait and acknowledge irq */ + mdelay(1); +} + +/** + * iceland_ih_get_wptr - get the IH ring buffer wptr + * + * @adev: amdgpu_device pointer + * + * Get the IH ring buffer wptr from either the register + * or the writeback memory buffer (VI). Also check for + * ring buffer overflow and deal with it. + * Used by cz_irq_process(VI). + * Returns the value of the wptr. + */ +static u32 iceland_ih_get_wptr(struct amdgpu_device *adev) +{ + u32 wptr, tmp; + + wptr = le32_to_cpu(adev->wb.wb[adev->irq.ih.wptr_offs]); + + if (REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW)) { + wptr = REG_SET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW, 0); + /* When a ring buffer overflow happen start parsing interrupt + * from the last not overwritten vector (wptr + 16). Hopefully + * this should allow us to catchup. + */ + dev_warn(adev->dev, "IH ring buffer overflow (0x%08X, 0x%08X, 0x%08X)\n", + wptr, adev->irq.ih.rptr, (wptr + 16) & adev->irq.ih.ptr_mask); + adev->irq.ih.rptr = (wptr + 16) & adev->irq.ih.ptr_mask; + tmp = RREG32(mmIH_RB_CNTL); + tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 1); + WREG32(mmIH_RB_CNTL, tmp); + } + return (wptr & adev->irq.ih.ptr_mask); +} + +/** + * iceland_ih_decode_iv - decode an interrupt vector + * + * @adev: amdgpu_device pointer + * + * Decodes the interrupt vector at the current rptr + * position and also advance the position. + */ +static void iceland_ih_decode_iv(struct amdgpu_device *adev, + struct amdgpu_iv_entry *entry) +{ + /* wptr/rptr are in bytes! */ + u32 ring_index = adev->irq.ih.rptr >> 2; + uint32_t dw[4]; + + dw[0] = le32_to_cpu(adev->irq.ih.ring[ring_index + 0]); + dw[1] = le32_to_cpu(adev->irq.ih.ring[ring_index + 1]); + dw[2] = le32_to_cpu(adev->irq.ih.ring[ring_index + 2]); + dw[3] = le32_to_cpu(adev->irq.ih.ring[ring_index + 3]); + + entry->src_id = dw[0] & 0xff; + entry->src_data = dw[1] & 0xfffffff; + entry->ring_id = dw[2] & 0xff; + entry->vm_id = (dw[2] >> 8) & 0xff; + entry->pas_id = (dw[2] >> 16) & 0xffff; + + /* wptr/rptr are in bytes! */ + adev->irq.ih.rptr += 16; +} + +/** + * iceland_ih_set_rptr - set the IH ring buffer rptr + * + * @adev: amdgpu_device pointer + * + * Set the IH ring buffer rptr. + */ +static void iceland_ih_set_rptr(struct amdgpu_device *adev) +{ + WREG32(mmIH_RB_RPTR, adev->irq.ih.rptr); +} + +static int iceland_ih_early_init(struct amdgpu_device *adev) +{ + iceland_ih_set_interrupt_funcs(adev); + return 0; +} + +static int iceland_ih_sw_init(struct amdgpu_device *adev) +{ + int r; + + r = amdgpu_ih_ring_init(adev, 64 * 1024, false); + if (r) + return r; + + r = amdgpu_irq_init(adev); + + return r; +} + +static int iceland_ih_sw_fini(struct amdgpu_device *adev) +{ + amdgpu_irq_fini(adev); + amdgpu_ih_ring_fini(adev); + + return 0; +} + +static int iceland_ih_hw_init(struct amdgpu_device *adev) +{ + int r; + + r = iceland_ih_irq_init(adev); + if (r) + return r; + + return 0; +} + +static int iceland_ih_hw_fini(struct amdgpu_device *adev) +{ + iceland_ih_irq_disable(adev); + + return 0; +} + +static int iceland_ih_suspend(struct amdgpu_device *adev) +{ + return iceland_ih_hw_fini(adev); +} + +static int iceland_ih_resume(struct amdgpu_device *adev) +{ + return iceland_ih_hw_init(adev); +} + +static bool iceland_ih_is_idle(struct amdgpu_device *adev) +{ + u32 tmp = RREG32(mmSRBM_STATUS); + + if (REG_GET_FIELD(tmp, SRBM_STATUS, IH_BUSY)) + return false; + + return true; +} + +static int iceland_ih_wait_for_idle(struct amdgpu_device *adev) +{ + unsigned i; + u32 tmp; + + for (i = 0; i < adev->usec_timeout; i++) { + /* read MC_STATUS */ + tmp = RREG32(mmSRBM_STATUS); + if (!REG_GET_FIELD(tmp, SRBM_STATUS, IH_BUSY)) + return 0; + udelay(1); + } + return -ETIMEDOUT; +} + +static void iceland_ih_print_status(struct amdgpu_device *adev) +{ + dev_info(adev->dev, "ICELAND IH registers\n"); + dev_info(adev->dev, " SRBM_STATUS=0x%08X\n", + RREG32(mmSRBM_STATUS)); + dev_info(adev->dev, " SRBM_STATUS2=0x%08X\n", + RREG32(mmSRBM_STATUS2)); + dev_info(adev->dev, " INTERRUPT_CNTL=0x%08X\n", + RREG32(mmINTERRUPT_CNTL)); + dev_info(adev->dev, " INTERRUPT_CNTL2=0x%08X\n", + RREG32(mmINTERRUPT_CNTL2)); + dev_info(adev->dev, " IH_CNTL=0x%08X\n", + RREG32(mmIH_CNTL)); + dev_info(adev->dev, " IH_RB_CNTL=0x%08X\n", + RREG32(mmIH_RB_CNTL)); + dev_info(adev->dev, " IH_RB_BASE=0x%08X\n", + RREG32(mmIH_RB_BASE)); + dev_info(adev->dev, " IH_RB_WPTR_ADDR_LO=0x%08X\n", + RREG32(mmIH_RB_WPTR_ADDR_LO)); + dev_info(adev->dev, " IH_RB_WPTR_ADDR_HI=0x%08X\n", + RREG32(mmIH_RB_WPTR_ADDR_HI)); + dev_info(adev->dev, " IH_RB_RPTR=0x%08X\n", + RREG32(mmIH_RB_RPTR)); + dev_info(adev->dev, " IH_RB_WPTR=0x%08X\n", + RREG32(mmIH_RB_WPTR)); +} + +static int iceland_ih_soft_reset(struct amdgpu_device *adev) +{ + u32 srbm_soft_reset = 0; + u32 tmp = RREG32(mmSRBM_STATUS); + + if (tmp & SRBM_STATUS__IH_BUSY_MASK) + srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, + SOFT_RESET_IH, 1); + + if (srbm_soft_reset) { + iceland_ih_print_status(adev); + + tmp = RREG32(mmSRBM_SOFT_RESET); + tmp |= srbm_soft_reset; + dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp); + WREG32(mmSRBM_SOFT_RESET, tmp); + tmp = RREG32(mmSRBM_SOFT_RESET); + + udelay(50); + + tmp &= ~srbm_soft_reset; + WREG32(mmSRBM_SOFT_RESET, tmp); + tmp = RREG32(mmSRBM_SOFT_RESET); + + /* Wait a little for things to settle down */ + udelay(50); + + iceland_ih_print_status(adev); + } + + return 0; +} + +static int iceland_ih_set_clockgating_state(struct amdgpu_device *adev, + enum amdgpu_clockgating_state state) +{ + // TODO + return 0; +} + +static int iceland_ih_set_powergating_state(struct amdgpu_device *adev, + enum amdgpu_powergating_state state) +{ + // TODO + return 0; +} + +const struct amdgpu_ip_funcs iceland_ih_ip_funcs = { + .early_init = iceland_ih_early_init, + .late_init = NULL, + .sw_init = iceland_ih_sw_init, + .sw_fini = iceland_ih_sw_fini, + .hw_init = iceland_ih_hw_init, + .hw_fini = iceland_ih_hw_fini, + .suspend = iceland_ih_suspend, + .resume = iceland_ih_resume, + .is_idle = iceland_ih_is_idle, + .wait_for_idle = iceland_ih_wait_for_idle, + .soft_reset = iceland_ih_soft_reset, + .print_status = iceland_ih_print_status, + .set_clockgating_state = iceland_ih_set_clockgating_state, + .set_powergating_state = iceland_ih_set_powergating_state, +}; + +static const struct amdgpu_ih_funcs iceland_ih_funcs = { + .get_wptr = iceland_ih_get_wptr, + .decode_iv = iceland_ih_decode_iv, + .set_rptr = iceland_ih_set_rptr +}; + +static void iceland_ih_set_interrupt_funcs(struct amdgpu_device *adev) +{ + if (adev->irq.ih_funcs == NULL) + adev->irq.ih_funcs = &iceland_ih_funcs; +} + diff --git a/drivers/gpu/drm/amd/amdgpu/iceland_ih.h b/drivers/gpu/drm/amd/amdgpu/iceland_ih.h new file mode 100644 index 000000000000..d001895eb93b --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/iceland_ih.h @@ -0,0 +1,29 @@ +/* + * Copyright 2014 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef __ICELAND_IH_H__ +#define __ICELAND_IH_H__ + +extern const struct amdgpu_ip_funcs iceland_ih_ip_funcs; + +#endif /* __ICELAND_IH_H__ */ diff --git a/drivers/gpu/drm/amd/amdgpu/iceland_sdma_pkt_open.h b/drivers/gpu/drm/amd/amdgpu/iceland_sdma_pkt_open.h new file mode 100644 index 000000000000..c723602c7b0c --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/iceland_sdma_pkt_open.h @@ -0,0 +1,2167 @@ +/* + * Copyright (C) 2014 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN + * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef __ICELAND_SDMA_PKT_OPEN_H_ +#define __ICELAND_SDMA_PKT_OPEN_H_ + +#define SDMA_OP_NOP 0 +#define SDMA_OP_COPY 1 +#define SDMA_OP_WRITE 2 +#define SDMA_OP_INDIRECT 4 +#define SDMA_OP_FENCE 5 +#define SDMA_OP_TRAP 6 +#define SDMA_OP_SEM 7 +#define SDMA_OP_POLL_REGMEM 8 +#define SDMA_OP_COND_EXE 9 +#define SDMA_OP_ATOMIC 10 +#define SDMA_OP_CONST_FILL 11 +#define SDMA_OP_GEN_PTEPDE 12 +#define SDMA_OP_TIMESTAMP 13 +#define SDMA_OP_SRBM_WRITE 14 +#define SDMA_OP_PRE_EXE 15 +#define SDMA_SUBOP_TIMESTAMP_SET 0 +#define SDMA_SUBOP_TIMESTAMP_GET 1 +#define SDMA_SUBOP_TIMESTAMP_GET_GLOBAL 2 +#define SDMA_SUBOP_COPY_LINEAR 0 +#define SDMA_SUBOP_COPY_LINEAR_SUB_WIND 4 +#define SDMA_SUBOP_COPY_TILED 1 +#define SDMA_SUBOP_COPY_TILED_SUB_WIND 5 +#define SDMA_SUBOP_COPY_T2T_SUB_WIND 6 +#define SDMA_SUBOP_COPY_SOA 3 +#define SDMA_SUBOP_WRITE_LINEAR 0 +#define SDMA_SUBOP_WRITE_TILED 1 + +/*define for op field*/ +#define SDMA_PKT_HEADER_op_offset 0 +#define SDMA_PKT_HEADER_op_mask 0x000000FF +#define SDMA_PKT_HEADER_op_shift 0 +#define SDMA_PKT_HEADER_OP(x) (((x) & SDMA_PKT_HEADER_op_mask) << SDMA_PKT_HEADER_op_shift) + +/*define for sub_op field*/ +#define SDMA_PKT_HEADER_sub_op_offset 0 +#define SDMA_PKT_HEADER_sub_op_mask 0x000000FF +#define SDMA_PKT_HEADER_sub_op_shift 8 +#define SDMA_PKT_HEADER_SUB_OP(x) (((x) & SDMA_PKT_HEADER_sub_op_mask) << SDMA_PKT_HEADER_sub_op_shift) + +/* +** Definitions for SDMA_PKT_COPY_LINEAR packet +*/ + +/*define for HEADER word*/ +/*define for op field*/ +#define SDMA_PKT_COPY_LINEAR_HEADER_op_offset 0 +#define SDMA_PKT_COPY_LINEAR_HEADER_op_mask 0x000000FF +#define SDMA_PKT_COPY_LINEAR_HEADER_op_shift 0 +#define SDMA_PKT_COPY_LINEAR_HEADER_OP(x) (((x) & SDMA_PKT_COPY_LINEAR_HEADER_op_mask) << SDMA_PKT_COPY_LINEAR_HEADER_op_shift) + +/*define for sub_op field*/ +#define SDMA_PKT_COPY_LINEAR_HEADER_sub_op_offset 0 +#define SDMA_PKT_COPY_LINEAR_HEADER_sub_op_mask 0x000000FF +#define SDMA_PKT_COPY_LINEAR_HEADER_sub_op_shift 8 +#define SDMA_PKT_COPY_LINEAR_HEADER_SUB_OP(x) (((x) & SDMA_PKT_COPY_LINEAR_HEADER_sub_op_mask) << SDMA_PKT_COPY_LINEAR_HEADER_sub_op_shift) + +/*define for broadcast field*/ +#define SDMA_PKT_COPY_LINEAR_HEADER_broadcast_offset 0 +#define SDMA_PKT_COPY_LINEAR_HEADER_broadcast_mask 0x00000001 +#define SDMA_PKT_COPY_LINEAR_HEADER_broadcast_shift 27 +#define SDMA_PKT_COPY_LINEAR_HEADER_BROADCAST(x) (((x) & SDMA_PKT_COPY_LINEAR_HEADER_broadcast_mask) << SDMA_PKT_COPY_LINEAR_HEADER_broadcast_shift) + +/*define for COUNT word*/ +/*define for count field*/ +#define SDMA_PKT_COPY_LINEAR_COUNT_count_offset 1 +#define SDMA_PKT_COPY_LINEAR_COUNT_count_mask 0x003FFFFF +#define SDMA_PKT_COPY_LINEAR_COUNT_count_shift 0 +#define SDMA_PKT_COPY_LINEAR_COUNT_COUNT(x) (((x) & SDMA_PKT_COPY_LINEAR_COUNT_count_mask) << SDMA_PKT_COPY_LINEAR_COUNT_count_shift) + +/*define for PARAMETER word*/ +/*define for dst_sw field*/ +#define SDMA_PKT_COPY_LINEAR_PARAMETER_dst_sw_offset 2 +#define SDMA_PKT_COPY_LINEAR_PARAMETER_dst_sw_mask 0x00000003 +#define SDMA_PKT_COPY_LINEAR_PARAMETER_dst_sw_shift 16 +#define SDMA_PKT_COPY_LINEAR_PARAMETER_DST_SW(x) (((x) & SDMA_PKT_COPY_LINEAR_PARAMETER_dst_sw_mask) << SDMA_PKT_COPY_LINEAR_PARAMETER_dst_sw_shift) + +/*define for dst_ha field*/ +#define SDMA_PKT_COPY_LINEAR_PARAMETER_dst_ha_offset 2 +#define SDMA_PKT_COPY_LINEAR_PARAMETER_dst_ha_mask 0x00000001 +#define SDMA_PKT_COPY_LINEAR_PARAMETER_dst_ha_shift 22 +#define SDMA_PKT_COPY_LINEAR_PARAMETER_DST_HA(x) (((x) & SDMA_PKT_COPY_LINEAR_PARAMETER_dst_ha_mask) << SDMA_PKT_COPY_LINEAR_PARAMETER_dst_ha_shift) + +/*define for src_sw field*/ +#define SDMA_PKT_COPY_LINEAR_PARAMETER_src_sw_offset 2 +#define SDMA_PKT_COPY_LINEAR_PARAMETER_src_sw_mask 0x00000003 +#define SDMA_PKT_COPY_LINEAR_PARAMETER_src_sw_shift 24 +#define SDMA_PKT_COPY_LINEAR_PARAMETER_SRC_SW(x) (((x) & SDMA_PKT_COPY_LINEAR_PARAMETER_src_sw_mask) << SDMA_PKT_COPY_LINEAR_PARAMETER_src_sw_shift) + +/*define for src_ha field*/ +#define SDMA_PKT_COPY_LINEAR_PARAMETER_src_ha_offset 2 +#define SDMA_PKT_COPY_LINEAR_PARAMETER_src_ha_mask 0x00000001 +#define SDMA_PKT_COPY_LINEAR_PARAMETER_src_ha_shift 30 +#define SDMA_PKT_COPY_LINEAR_PARAMETER_SRC_HA(x) (((x) & SDMA_PKT_COPY_LINEAR_PARAMETER_src_ha_mask) << SDMA_PKT_COPY_LINEAR_PARAMETER_src_ha_shift) + +/*define for SRC_ADDR_LO word*/ +/*define for src_addr_31_0 field*/ +#define SDMA_PKT_COPY_LINEAR_SRC_ADDR_LO_src_addr_31_0_offset 3 +#define SDMA_PKT_COPY_LINEAR_SRC_ADDR_LO_src_addr_31_0_mask 0xFFFFFFFF +#define SDMA_PKT_COPY_LINEAR_SRC_ADDR_LO_src_addr_31_0_shift 0 +#define SDMA_PKT_COPY_LINEAR_SRC_ADDR_LO_SRC_ADDR_31_0(x) (((x) & SDMA_PKT_COPY_LINEAR_SRC_ADDR_LO_src_addr_31_0_mask) << SDMA_PKT_COPY_LINEAR_SRC_ADDR_LO_src_addr_31_0_shift) + +/*define for SRC_ADDR_HI word*/ +/*define for src_addr_63_32 field*/ +#define SDMA_PKT_COPY_LINEAR_SRC_ADDR_HI_src_addr_63_32_offset 4 +#define SDMA_PKT_COPY_LINEAR_SRC_ADDR_HI_src_addr_63_32_mask 0xFFFFFFFF +#define SDMA_PKT_COPY_LINEAR_SRC_ADDR_HI_src_addr_63_32_shift 0 +#define SDMA_PKT_COPY_LINEAR_SRC_ADDR_HI_SRC_ADDR_63_32(x) (((x) & SDMA_PKT_COPY_LINEAR_SRC_ADDR_HI_src_addr_63_32_mask) << SDMA_PKT_COPY_LINEAR_SRC_ADDR_HI_src_addr_63_32_shift) + +/*define for DST_ADDR_LO word*/ +/*define for dst_addr_31_0 field*/ +#define SDMA_PKT_COPY_LINEAR_DST_ADDR_LO_dst_addr_31_0_offset 5 +#define SDMA_PKT_COPY_LINEAR_DST_ADDR_LO_dst_addr_31_0_mask 0xFFFFFFFF +#define SDMA_PKT_COPY_LINEAR_DST_ADDR_LO_dst_addr_31_0_shift 0 +#define SDMA_PKT_COPY_LINEAR_DST_ADDR_LO_DST_ADDR_31_0(x) (((x) & SDMA_PKT_COPY_LINEAR_DST_ADDR_LO_dst_addr_31_0_mask) << SDMA_PKT_COPY_LINEAR_DST_ADDR_LO_dst_addr_31_0_shift) + +/*define for DST_ADDR_HI word*/ +/*define for dst_addr_63_32 field*/ +#define SDMA_PKT_COPY_LINEAR_DST_ADDR_HI_dst_addr_63_32_offset 6 +#define SDMA_PKT_COPY_LINEAR_DST_ADDR_HI_dst_addr_63_32_mask 0xFFFFFFFF +#define SDMA_PKT_COPY_LINEAR_DST_ADDR_HI_dst_addr_63_32_shift 0 +#define SDMA_PKT_COPY_LINEAR_DST_ADDR_HI_DST_ADDR_63_32(x) (((x) & SDMA_PKT_COPY_LINEAR_DST_ADDR_HI_dst_addr_63_32_mask) << SDMA_PKT_COPY_LINEAR_DST_ADDR_HI_dst_addr_63_32_shift) + + +/* +** Definitions for SDMA_PKT_COPY_BROADCAST_LINEAR packet +*/ + +/*define for HEADER word*/ +/*define for op field*/ +#define SDMA_PKT_COPY_BROADCAST_LINEAR_HEADER_op_offset 0 +#define SDMA_PKT_COPY_BROADCAST_LINEAR_HEADER_op_mask 0x000000FF +#define SDMA_PKT_COPY_BROADCAST_LINEAR_HEADER_op_shift 0 +#define SDMA_PKT_COPY_BROADCAST_LINEAR_HEADER_OP(x) (((x) & SDMA_PKT_COPY_BROADCAST_LINEAR_HEADER_op_mask) << SDMA_PKT_COPY_BROADCAST_LINEAR_HEADER_op_shift) + +/*define for sub_op field*/ +#define SDMA_PKT_COPY_BROADCAST_LINEAR_HEADER_sub_op_offset 0 +#define SDMA_PKT_COPY_BROADCAST_LINEAR_HEADER_sub_op_mask 0x000000FF +#define SDMA_PKT_COPY_BROADCAST_LINEAR_HEADER_sub_op_shift 8 +#define SDMA_PKT_COPY_BROADCAST_LINEAR_HEADER_SUB_OP(x) (((x) & SDMA_PKT_COPY_BROADCAST_LINEAR_HEADER_sub_op_mask) << SDMA_PKT_COPY_BROADCAST_LINEAR_HEADER_sub_op_shift) + +/*define for broadcast field*/ +#define SDMA_PKT_COPY_BROADCAST_LINEAR_HEADER_broadcast_offset 0 +#define SDMA_PKT_COPY_BROADCAST_LINEAR_HEADER_broadcast_mask 0x00000001 +#define SDMA_PKT_COPY_BROADCAST_LINEAR_HEADER_broadcast_shift 27 +#define SDMA_PKT_COPY_BROADCAST_LINEAR_HEADER_BROADCAST(x) (((x) & SDMA_PKT_COPY_BROADCAST_LINEAR_HEADER_broadcast_mask) << SDMA_PKT_COPY_BROADCAST_LINEAR_HEADER_broadcast_shift) + +/*define for COUNT word*/ +/*define for count field*/ +#define SDMA_PKT_COPY_BROADCAST_LINEAR_COUNT_count_offset 1 +#define SDMA_PKT_COPY_BROADCAST_LINEAR_COUNT_count_mask 0x003FFFFF +#define SDMA_PKT_COPY_BROADCAST_LINEAR_COUNT_count_shift 0 +#define SDMA_PKT_COPY_BROADCAST_LINEAR_COUNT_COUNT(x) (((x) & SDMA_PKT_COPY_BROADCAST_LINEAR_COUNT_count_mask) << SDMA_PKT_COPY_BROADCAST_LINEAR_COUNT_count_shift) + +/*define for PARAMETER word*/ +/*define for dst2_sw field*/ +#define SDMA_PKT_COPY_BROADCAST_LINEAR_PARAMETER_dst2_sw_offset 2 +#define SDMA_PKT_COPY_BROADCAST_LINEAR_PARAMETER_dst2_sw_mask 0x00000003 +#define SDMA_PKT_COPY_BROADCAST_LINEAR_PARAMETER_dst2_sw_shift 8 +#define SDMA_PKT_COPY_BROADCAST_LINEAR_PARAMETER_DST2_SW(x) (((x) & SDMA_PKT_COPY_BROADCAST_LINEAR_PARAMETER_dst2_sw_mask) << SDMA_PKT_COPY_BROADCAST_LINEAR_PARAMETER_dst2_sw_shift) + +/*define for dst2_ha field*/ +#define SDMA_PKT_COPY_BROADCAST_LINEAR_PARAMETER_dst2_ha_offset 2 +#define SDMA_PKT_COPY_BROADCAST_LINEAR_PARAMETER_dst2_ha_mask 0x00000001 +#define SDMA_PKT_COPY_BROADCAST_LINEAR_PARAMETER_dst2_ha_shift 14 +#define SDMA_PKT_COPY_BROADCAST_LINEAR_PARAMETER_DST2_HA(x) (((x) & SDMA_PKT_COPY_BROADCAST_LINEAR_PARAMETER_dst2_ha_mask) << SDMA_PKT_COPY_BROADCAST_LINEAR_PARAMETER_dst2_ha_shift) + +/*define for dst1_sw field*/ +#define SDMA_PKT_COPY_BROADCAST_LINEAR_PARAMETER_dst1_sw_offset 2 +#define SDMA_PKT_COPY_BROADCAST_LINEAR_PARAMETER_dst1_sw_mask 0x00000003 +#define SDMA_PKT_COPY_BROADCAST_LINEAR_PARAMETER_dst1_sw_shift 16 +#define SDMA_PKT_COPY_BROADCAST_LINEAR_PARAMETER_DST1_SW(x) (((x) & SDMA_PKT_COPY_BROADCAST_LINEAR_PARAMETER_dst1_sw_mask) << SDMA_PKT_COPY_BROADCAST_LINEAR_PARAMETER_dst1_sw_shift) + +/*define for dst1_ha field*/ +#define SDMA_PKT_COPY_BROADCAST_LINEAR_PARAMETER_dst1_ha_offset 2 +#define SDMA_PKT_COPY_BROADCAST_LINEAR_PARAMETER_dst1_ha_mask 0x00000001 +#define SDMA_PKT_COPY_BROADCAST_LINEAR_PARAMETER_dst1_ha_shift 22 +#define SDMA_PKT_COPY_BROADCAST_LINEAR_PARAMETER_DST1_HA(x) (((x) & SDMA_PKT_COPY_BROADCAST_LINEAR_PARAMETER_dst1_ha_mask) << SDMA_PKT_COPY_BROADCAST_LINEAR_PARAMETER_dst1_ha_shift) + +/*define for src_sw field*/ +#define SDMA_PKT_COPY_BROADCAST_LINEAR_PARAMETER_src_sw_offset 2 +#define SDMA_PKT_COPY_BROADCAST_LINEAR_PARAMETER_src_sw_mask 0x00000003 +#define SDMA_PKT_COPY_BROADCAST_LINEAR_PARAMETER_src_sw_shift 24 +#define SDMA_PKT_COPY_BROADCAST_LINEAR_PARAMETER_SRC_SW(x) (((x) & SDMA_PKT_COPY_BROADCAST_LINEAR_PARAMETER_src_sw_mask) << SDMA_PKT_COPY_BROADCAST_LINEAR_PARAMETER_src_sw_shift) + +/*define for src_ha field*/ +#define SDMA_PKT_COPY_BROADCAST_LINEAR_PARAMETER_src_ha_offset 2 +#define SDMA_PKT_COPY_BROADCAST_LINEAR_PARAMETER_src_ha_mask 0x00000001 +#define SDMA_PKT_COPY_BROADCAST_LINEAR_PARAMETER_src_ha_shift 30 +#define SDMA_PKT_COPY_BROADCAST_LINEAR_PARAMETER_SRC_HA(x) (((x) & SDMA_PKT_COPY_BROADCAST_LINEAR_PARAMETER_src_ha_mask) << SDMA_PKT_COPY_BROADCAST_LINEAR_PARAMETER_src_ha_shift) + +/*define for SRC_ADDR_LO word*/ +/*define for src_addr_31_0 field*/ +#define SDMA_PKT_COPY_BROADCAST_LINEAR_SRC_ADDR_LO_src_addr_31_0_offset 3 +#define SDMA_PKT_COPY_BROADCAST_LINEAR_SRC_ADDR_LO_src_addr_31_0_mask 0xFFFFFFFF +#define SDMA_PKT_COPY_BROADCAST_LINEAR_SRC_ADDR_LO_src_addr_31_0_shift 0 +#define SDMA_PKT_COPY_BROADCAST_LINEAR_SRC_ADDR_LO_SRC_ADDR_31_0(x) (((x) & SDMA_PKT_COPY_BROADCAST_LINEAR_SRC_ADDR_LO_src_addr_31_0_mask) << SDMA_PKT_COPY_BROADCAST_LINEAR_SRC_ADDR_LO_src_addr_31_0_shift) + +/*define for SRC_ADDR_HI word*/ +/*define for src_addr_63_32 field*/ +#define SDMA_PKT_COPY_BROADCAST_LINEAR_SRC_ADDR_HI_src_addr_63_32_offset 4 +#define SDMA_PKT_COPY_BROADCAST_LINEAR_SRC_ADDR_HI_src_addr_63_32_mask 0xFFFFFFFF +#define SDMA_PKT_COPY_BROADCAST_LINEAR_SRC_ADDR_HI_src_addr_63_32_shift 0 +#define SDMA_PKT_COPY_BROADCAST_LINEAR_SRC_ADDR_HI_SRC_ADDR_63_32(x) (((x) & SDMA_PKT_COPY_BROADCAST_LINEAR_SRC_ADDR_HI_src_addr_63_32_mask) << SDMA_PKT_COPY_BROADCAST_LINEAR_SRC_ADDR_HI_src_addr_63_32_shift) + +/*define for DST1_ADDR_LO word*/ +/*define for dst1_addr_31_0 field*/ +#define SDMA_PKT_COPY_BROADCAST_LINEAR_DST1_ADDR_LO_dst1_addr_31_0_offset 5 +#define SDMA_PKT_COPY_BROADCAST_LINEAR_DST1_ADDR_LO_dst1_addr_31_0_mask 0xFFFFFFFF +#define SDMA_PKT_COPY_BROADCAST_LINEAR_DST1_ADDR_LO_dst1_addr_31_0_shift 0 +#define SDMA_PKT_COPY_BROADCAST_LINEAR_DST1_ADDR_LO_DST1_ADDR_31_0(x) (((x) & SDMA_PKT_COPY_BROADCAST_LINEAR_DST1_ADDR_LO_dst1_addr_31_0_mask) << SDMA_PKT_COPY_BROADCAST_LINEAR_DST1_ADDR_LO_dst1_addr_31_0_shift) + +/*define for DST1_ADDR_HI word*/ +/*define for dst1_addr_63_32 field*/ +#define SDMA_PKT_COPY_BROADCAST_LINEAR_DST1_ADDR_HI_dst1_addr_63_32_offset 6 +#define SDMA_PKT_COPY_BROADCAST_LINEAR_DST1_ADDR_HI_dst1_addr_63_32_mask 0xFFFFFFFF +#define SDMA_PKT_COPY_BROADCAST_LINEAR_DST1_ADDR_HI_dst1_addr_63_32_shift 0 +#define SDMA_PKT_COPY_BROADCAST_LINEAR_DST1_ADDR_HI_DST1_ADDR_63_32(x) (((x) & SDMA_PKT_COPY_BROADCAST_LINEAR_DST1_ADDR_HI_dst1_addr_63_32_mask) << SDMA_PKT_COPY_BROADCAST_LINEAR_DST1_ADDR_HI_dst1_addr_63_32_shift) + +/*define for DST2_ADDR_LO word*/ +/*define for dst2_addr_31_0 field*/ +#define SDMA_PKT_COPY_BROADCAST_LINEAR_DST2_ADDR_LO_dst2_addr_31_0_offset 7 +#define SDMA_PKT_COPY_BROADCAST_LINEAR_DST2_ADDR_LO_dst2_addr_31_0_mask 0xFFFFFFFF +#define SDMA_PKT_COPY_BROADCAST_LINEAR_DST2_ADDR_LO_dst2_addr_31_0_shift 0 +#define SDMA_PKT_COPY_BROADCAST_LINEAR_DST2_ADDR_LO_DST2_ADDR_31_0(x) (((x) & SDMA_PKT_COPY_BROADCAST_LINEAR_DST2_ADDR_LO_dst2_addr_31_0_mask) << SDMA_PKT_COPY_BROADCAST_LINEAR_DST2_ADDR_LO_dst2_addr_31_0_shift) + +/*define for DST2_ADDR_HI word*/ +/*define for dst2_addr_63_32 field*/ +#define SDMA_PKT_COPY_BROADCAST_LINEAR_DST2_ADDR_HI_dst2_addr_63_32_offset 8 +#define SDMA_PKT_COPY_BROADCAST_LINEAR_DST2_ADDR_HI_dst2_addr_63_32_mask 0xFFFFFFFF +#define SDMA_PKT_COPY_BROADCAST_LINEAR_DST2_ADDR_HI_dst2_addr_63_32_shift 0 +#define SDMA_PKT_COPY_BROADCAST_LINEAR_DST2_ADDR_HI_DST2_ADDR_63_32(x) (((x) & SDMA_PKT_COPY_BROADCAST_LINEAR_DST2_ADDR_HI_dst2_addr_63_32_mask) << SDMA_PKT_COPY_BROADCAST_LINEAR_DST2_ADDR_HI_dst2_addr_63_32_shift) + + +/* +** Definitions for SDMA_PKT_COPY_LINEAR_SUBWIN packet +*/ + +/*define for HEADER word*/ +/*define for op field*/ +#define SDMA_PKT_COPY_LINEAR_SUBWIN_HEADER_op_offset 0 +#define SDMA_PKT_COPY_LINEAR_SUBWIN_HEADER_op_mask 0x000000FF +#define SDMA_PKT_COPY_LINEAR_SUBWIN_HEADER_op_shift 0 +#define SDMA_PKT_COPY_LINEAR_SUBWIN_HEADER_OP(x) (((x) & SDMA_PKT_COPY_LINEAR_SUBWIN_HEADER_op_mask) << SDMA_PKT_COPY_LINEAR_SUBWIN_HEADER_op_shift) + +/*define for sub_op field*/ +#define SDMA_PKT_COPY_LINEAR_SUBWIN_HEADER_sub_op_offset 0 +#define SDMA_PKT_COPY_LINEAR_SUBWIN_HEADER_sub_op_mask 0x000000FF +#define SDMA_PKT_COPY_LINEAR_SUBWIN_HEADER_sub_op_shift 8 +#define SDMA_PKT_COPY_LINEAR_SUBWIN_HEADER_SUB_OP(x) (((x) & SDMA_PKT_COPY_LINEAR_SUBWIN_HEADER_sub_op_mask) << SDMA_PKT_COPY_LINEAR_SUBWIN_HEADER_sub_op_shift) + +/*define for elementsize field*/ +#define SDMA_PKT_COPY_LINEAR_SUBWIN_HEADER_elementsize_offset 0 +#define SDMA_PKT_COPY_LINEAR_SUBWIN_HEADER_elementsize_mask 0x00000007 +#define SDMA_PKT_COPY_LINEAR_SUBWIN_HEADER_elementsize_shift 29 +#define SDMA_PKT_COPY_LINEAR_SUBWIN_HEADER_ELEMENTSIZE(x) (((x) & SDMA_PKT_COPY_LINEAR_SUBWIN_HEADER_elementsize_mask) << SDMA_PKT_COPY_LINEAR_SUBWIN_HEADER_elementsize_shift) + +/*define for SRC_ADDR_LO word*/ +/*define for src_addr_31_0 field*/ +#define SDMA_PKT_COPY_LINEAR_SUBWIN_SRC_ADDR_LO_src_addr_31_0_offset 1 +#define SDMA_PKT_COPY_LINEAR_SUBWIN_SRC_ADDR_LO_src_addr_31_0_mask 0xFFFFFFFF +#define SDMA_PKT_COPY_LINEAR_SUBWIN_SRC_ADDR_LO_src_addr_31_0_shift 0 +#define SDMA_PKT_COPY_LINEAR_SUBWIN_SRC_ADDR_LO_SRC_ADDR_31_0(x) (((x) & SDMA_PKT_COPY_LINEAR_SUBWIN_SRC_ADDR_LO_src_addr_31_0_mask) << SDMA_PKT_COPY_LINEAR_SUBWIN_SRC_ADDR_LO_src_addr_31_0_shift) + +/*define for SRC_ADDR_HI word*/ +/*define for src_addr_63_32 field*/ +#define SDMA_PKT_COPY_LINEAR_SUBWIN_SRC_ADDR_HI_src_addr_63_32_offset 2 +#define SDMA_PKT_COPY_LINEAR_SUBWIN_SRC_ADDR_HI_src_addr_63_32_mask 0xFFFFFFFF +#define SDMA_PKT_COPY_LINEAR_SUBWIN_SRC_ADDR_HI_src_addr_63_32_shift 0 +#define SDMA_PKT_COPY_LINEAR_SUBWIN_SRC_ADDR_HI_SRC_ADDR_63_32(x) (((x) & SDMA_PKT_COPY_LINEAR_SUBWIN_SRC_ADDR_HI_src_addr_63_32_mask) << SDMA_PKT_COPY_LINEAR_SUBWIN_SRC_ADDR_HI_src_addr_63_32_shift) + +/*define for DW_3 word*/ +/*define for src_x field*/ +#define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_3_src_x_offset 3 +#define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_3_src_x_mask 0x00003FFF +#define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_3_src_x_shift 0 +#define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_3_SRC_X(x) (((x) & SDMA_PKT_COPY_LINEAR_SUBWIN_DW_3_src_x_mask) << SDMA_PKT_COPY_LINEAR_SUBWIN_DW_3_src_x_shift) + +/*define for src_y field*/ +#define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_3_src_y_offset 3 +#define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_3_src_y_mask 0x00003FFF +#define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_3_src_y_shift 16 +#define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_3_SRC_Y(x) (((x) & SDMA_PKT_COPY_LINEAR_SUBWIN_DW_3_src_y_mask) << SDMA_PKT_COPY_LINEAR_SUBWIN_DW_3_src_y_shift) + +/*define for DW_4 word*/ +/*define for src_z field*/ +#define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_4_src_z_offset 4 +#define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_4_src_z_mask 0x000007FF +#define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_4_src_z_shift 0 +#define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_4_SRC_Z(x) (((x) & SDMA_PKT_COPY_LINEAR_SUBWIN_DW_4_src_z_mask) << SDMA_PKT_COPY_LINEAR_SUBWIN_DW_4_src_z_shift) + +/*define for src_pitch field*/ +#define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_4_src_pitch_offset 4 +#define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_4_src_pitch_mask 0x00003FFF +#define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_4_src_pitch_shift 16 +#define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_4_SRC_PITCH(x) (((x) & SDMA_PKT_COPY_LINEAR_SUBWIN_DW_4_src_pitch_mask) << SDMA_PKT_COPY_LINEAR_SUBWIN_DW_4_src_pitch_shift) + +/*define for DW_5 word*/ +/*define for src_slice_pitch field*/ +#define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_5_src_slice_pitch_offset 5 +#define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_5_src_slice_pitch_mask 0x0FFFFFFF +#define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_5_src_slice_pitch_shift 0 +#define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_5_SRC_SLICE_PITCH(x) (((x) & SDMA_PKT_COPY_LINEAR_SUBWIN_DW_5_src_slice_pitch_mask) << SDMA_PKT_COPY_LINEAR_SUBWIN_DW_5_src_slice_pitch_shift) + +/*define for DST_ADDR_LO word*/ +/*define for dst_addr_31_0 field*/ +#define SDMA_PKT_COPY_LINEAR_SUBWIN_DST_ADDR_LO_dst_addr_31_0_offset 6 +#define SDMA_PKT_COPY_LINEAR_SUBWIN_DST_ADDR_LO_dst_addr_31_0_mask 0xFFFFFFFF +#define SDMA_PKT_COPY_LINEAR_SUBWIN_DST_ADDR_LO_dst_addr_31_0_shift 0 +#define SDMA_PKT_COPY_LINEAR_SUBWIN_DST_ADDR_LO_DST_ADDR_31_0(x) (((x) & SDMA_PKT_COPY_LINEAR_SUBWIN_DST_ADDR_LO_dst_addr_31_0_mask) << SDMA_PKT_COPY_LINEAR_SUBWIN_DST_ADDR_LO_dst_addr_31_0_shift) + +/*define for DST_ADDR_HI word*/ +/*define for dst_addr_63_32 field*/ +#define SDMA_PKT_COPY_LINEAR_SUBWIN_DST_ADDR_HI_dst_addr_63_32_offset 7 +#define SDMA_PKT_COPY_LINEAR_SUBWIN_DST_ADDR_HI_dst_addr_63_32_mask 0xFFFFFFFF +#define SDMA_PKT_COPY_LINEAR_SUBWIN_DST_ADDR_HI_dst_addr_63_32_shift 0 +#define SDMA_PKT_COPY_LINEAR_SUBWIN_DST_ADDR_HI_DST_ADDR_63_32(x) (((x) & SDMA_PKT_COPY_LINEAR_SUBWIN_DST_ADDR_HI_dst_addr_63_32_mask) << SDMA_PKT_COPY_LINEAR_SUBWIN_DST_ADDR_HI_dst_addr_63_32_shift) + +/*define for DW_8 word*/ +/*define for dst_x field*/ +#define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_8_dst_x_offset 8 +#define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_8_dst_x_mask 0x00003FFF +#define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_8_dst_x_shift 0 +#define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_8_DST_X(x) (((x) & SDMA_PKT_COPY_LINEAR_SUBWIN_DW_8_dst_x_mask) << SDMA_PKT_COPY_LINEAR_SUBWIN_DW_8_dst_x_shift) + +/*define for dst_y field*/ +#define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_8_dst_y_offset 8 +#define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_8_dst_y_mask 0x00003FFF +#define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_8_dst_y_shift 16 +#define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_8_DST_Y(x) (((x) & SDMA_PKT_COPY_LINEAR_SUBWIN_DW_8_dst_y_mask) << SDMA_PKT_COPY_LINEAR_SUBWIN_DW_8_dst_y_shift) + +/*define for DW_9 word*/ +/*define for dst_z field*/ +#define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_9_dst_z_offset 9 +#define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_9_dst_z_mask 0x000007FF +#define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_9_dst_z_shift 0 +#define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_9_DST_Z(x) (((x) & SDMA_PKT_COPY_LINEAR_SUBWIN_DW_9_dst_z_mask) << SDMA_PKT_COPY_LINEAR_SUBWIN_DW_9_dst_z_shift) + +/*define for dst_pitch field*/ +#define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_9_dst_pitch_offset 9 +#define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_9_dst_pitch_mask 0x00003FFF +#define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_9_dst_pitch_shift 16 +#define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_9_DST_PITCH(x) (((x) & SDMA_PKT_COPY_LINEAR_SUBWIN_DW_9_dst_pitch_mask) << SDMA_PKT_COPY_LINEAR_SUBWIN_DW_9_dst_pitch_shift) + +/*define for DW_10 word*/ +/*define for dst_slice_pitch field*/ +#define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_10_dst_slice_pitch_offset 10 +#define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_10_dst_slice_pitch_mask 0x0FFFFFFF +#define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_10_dst_slice_pitch_shift 0 +#define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_10_DST_SLICE_PITCH(x) (((x) & SDMA_PKT_COPY_LINEAR_SUBWIN_DW_10_dst_slice_pitch_mask) << SDMA_PKT_COPY_LINEAR_SUBWIN_DW_10_dst_slice_pitch_shift) + +/*define for DW_11 word*/ +/*define for rect_x field*/ +#define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_11_rect_x_offset 11 +#define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_11_rect_x_mask 0x00003FFF +#define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_11_rect_x_shift 0 +#define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_11_RECT_X(x) (((x) & SDMA_PKT_COPY_LINEAR_SUBWIN_DW_11_rect_x_mask) << SDMA_PKT_COPY_LINEAR_SUBWIN_DW_11_rect_x_shift) + +/*define for rect_y field*/ +#define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_11_rect_y_offset 11 +#define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_11_rect_y_mask 0x00003FFF +#define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_11_rect_y_shift 16 +#define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_11_RECT_Y(x) (((x) & SDMA_PKT_COPY_LINEAR_SUBWIN_DW_11_rect_y_mask) << SDMA_PKT_COPY_LINEAR_SUBWIN_DW_11_rect_y_shift) + +/*define for DW_12 word*/ +/*define for rect_z field*/ +#define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_12_rect_z_offset 12 +#define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_12_rect_z_mask 0x000007FF +#define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_12_rect_z_shift 0 +#define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_12_RECT_Z(x) (((x) & SDMA_PKT_COPY_LINEAR_SUBWIN_DW_12_rect_z_mask) << SDMA_PKT_COPY_LINEAR_SUBWIN_DW_12_rect_z_shift) + +/*define for dst_sw field*/ +#define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_12_dst_sw_offset 12 +#define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_12_dst_sw_mask 0x00000003 +#define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_12_dst_sw_shift 16 +#define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_12_DST_SW(x) (((x) & SDMA_PKT_COPY_LINEAR_SUBWIN_DW_12_dst_sw_mask) << SDMA_PKT_COPY_LINEAR_SUBWIN_DW_12_dst_sw_shift) + +/*define for dst_ha field*/ +#define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_12_dst_ha_offset 12 +#define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_12_dst_ha_mask 0x00000001 +#define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_12_dst_ha_shift 22 +#define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_12_DST_HA(x) (((x) & SDMA_PKT_COPY_LINEAR_SUBWIN_DW_12_dst_ha_mask) << SDMA_PKT_COPY_LINEAR_SUBWIN_DW_12_dst_ha_shift) + +/*define for src_sw field*/ +#define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_12_src_sw_offset 12 +#define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_12_src_sw_mask 0x00000003 +#define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_12_src_sw_shift 24 +#define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_12_SRC_SW(x) (((x) & SDMA_PKT_COPY_LINEAR_SUBWIN_DW_12_src_sw_mask) << SDMA_PKT_COPY_LINEAR_SUBWIN_DW_12_src_sw_shift) + +/*define for src_ha field*/ +#define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_12_src_ha_offset 12 +#define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_12_src_ha_mask 0x00000001 +#define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_12_src_ha_shift 30 +#define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_12_SRC_HA(x) (((x) & SDMA_PKT_COPY_LINEAR_SUBWIN_DW_12_src_ha_mask) << SDMA_PKT_COPY_LINEAR_SUBWIN_DW_12_src_ha_shift) + + +/* +** Definitions for SDMA_PKT_COPY_TILED packet +*/ + +/*define for HEADER word*/ +/*define for op field*/ +#define SDMA_PKT_COPY_TILED_HEADER_op_offset 0 +#define SDMA_PKT_COPY_TILED_HEADER_op_mask 0x000000FF +#define SDMA_PKT_COPY_TILED_HEADER_op_shift 0 +#define SDMA_PKT_COPY_TILED_HEADER_OP(x) (((x) & SDMA_PKT_COPY_TILED_HEADER_op_mask) << SDMA_PKT_COPY_TILED_HEADER_op_shift) + +/*define for sub_op field*/ +#define SDMA_PKT_COPY_TILED_HEADER_sub_op_offset 0 +#define SDMA_PKT_COPY_TILED_HEADER_sub_op_mask 0x000000FF +#define SDMA_PKT_COPY_TILED_HEADER_sub_op_shift 8 +#define SDMA_PKT_COPY_TILED_HEADER_SUB_OP(x) (((x) & SDMA_PKT_COPY_TILED_HEADER_sub_op_mask) << SDMA_PKT_COPY_TILED_HEADER_sub_op_shift) + +/*define for detile field*/ +#define SDMA_PKT_COPY_TILED_HEADER_detile_offset 0 +#define SDMA_PKT_COPY_TILED_HEADER_detile_mask 0x00000001 +#define SDMA_PKT_COPY_TILED_HEADER_detile_shift 31 +#define SDMA_PKT_COPY_TILED_HEADER_DETILE(x) (((x) & SDMA_PKT_COPY_TILED_HEADER_detile_mask) << SDMA_PKT_COPY_TILED_HEADER_detile_shift) + +/*define for TILED_ADDR_LO word*/ +/*define for tiled_addr_31_0 field*/ +#define SDMA_PKT_COPY_TILED_TILED_ADDR_LO_tiled_addr_31_0_offset 1 +#define SDMA_PKT_COPY_TILED_TILED_ADDR_LO_tiled_addr_31_0_mask 0xFFFFFFFF +#define SDMA_PKT_COPY_TILED_TILED_ADDR_LO_tiled_addr_31_0_shift 0 +#define SDMA_PKT_COPY_TILED_TILED_ADDR_LO_TILED_ADDR_31_0(x) (((x) & SDMA_PKT_COPY_TILED_TILED_ADDR_LO_tiled_addr_31_0_mask) << SDMA_PKT_COPY_TILED_TILED_ADDR_LO_tiled_addr_31_0_shift) + +/*define for TILED_ADDR_HI word*/ +/*define for tiled_addr_63_32 field*/ +#define SDMA_PKT_COPY_TILED_TILED_ADDR_HI_tiled_addr_63_32_offset 2 +#define SDMA_PKT_COPY_TILED_TILED_ADDR_HI_tiled_addr_63_32_mask 0xFFFFFFFF +#define SDMA_PKT_COPY_TILED_TILED_ADDR_HI_tiled_addr_63_32_shift 0 +#define SDMA_PKT_COPY_TILED_TILED_ADDR_HI_TILED_ADDR_63_32(x) (((x) & SDMA_PKT_COPY_TILED_TILED_ADDR_HI_tiled_addr_63_32_mask) << SDMA_PKT_COPY_TILED_TILED_ADDR_HI_tiled_addr_63_32_shift) + +/*define for DW_3 word*/ +/*define for pitch_in_tile field*/ +#define SDMA_PKT_COPY_TILED_DW_3_pitch_in_tile_offset 3 +#define SDMA_PKT_COPY_TILED_DW_3_pitch_in_tile_mask 0x000007FF +#define SDMA_PKT_COPY_TILED_DW_3_pitch_in_tile_shift 0 +#define SDMA_PKT_COPY_TILED_DW_3_PITCH_IN_TILE(x) (((x) & SDMA_PKT_COPY_TILED_DW_3_pitch_in_tile_mask) << SDMA_PKT_COPY_TILED_DW_3_pitch_in_tile_shift) + +/*define for height field*/ +#define SDMA_PKT_COPY_TILED_DW_3_height_offset 3 +#define SDMA_PKT_COPY_TILED_DW_3_height_mask 0x00003FFF +#define SDMA_PKT_COPY_TILED_DW_3_height_shift 16 +#define SDMA_PKT_COPY_TILED_DW_3_HEIGHT(x) (((x) & SDMA_PKT_COPY_TILED_DW_3_height_mask) << SDMA_PKT_COPY_TILED_DW_3_height_shift) + +/*define for DW_4 word*/ +/*define for slice_pitch field*/ +#define SDMA_PKT_COPY_TILED_DW_4_slice_pitch_offset 4 +#define SDMA_PKT_COPY_TILED_DW_4_slice_pitch_mask 0x003FFFFF +#define SDMA_PKT_COPY_TILED_DW_4_slice_pitch_shift 0 +#define SDMA_PKT_COPY_TILED_DW_4_SLICE_PITCH(x) (((x) & SDMA_PKT_COPY_TILED_DW_4_slice_pitch_mask) << SDMA_PKT_COPY_TILED_DW_4_slice_pitch_shift) + +/*define for DW_5 word*/ +/*define for element_size field*/ +#define SDMA_PKT_COPY_TILED_DW_5_element_size_offset 5 +#define SDMA_PKT_COPY_TILED_DW_5_element_size_mask 0x00000007 +#define SDMA_PKT_COPY_TILED_DW_5_element_size_shift 0 +#define SDMA_PKT_COPY_TILED_DW_5_ELEMENT_SIZE(x) (((x) & SDMA_PKT_COPY_TILED_DW_5_element_size_mask) << SDMA_PKT_COPY_TILED_DW_5_element_size_shift) + +/*define for array_mode field*/ +#define SDMA_PKT_COPY_TILED_DW_5_array_mode_offset 5 +#define SDMA_PKT_COPY_TILED_DW_5_array_mode_mask 0x0000000F +#define SDMA_PKT_COPY_TILED_DW_5_array_mode_shift 3 +#define SDMA_PKT_COPY_TILED_DW_5_ARRAY_MODE(x) (((x) & SDMA_PKT_COPY_TILED_DW_5_array_mode_mask) << SDMA_PKT_COPY_TILED_DW_5_array_mode_shift) + +/*define for mit_mode field*/ +#define SDMA_PKT_COPY_TILED_DW_5_mit_mode_offset 5 +#define SDMA_PKT_COPY_TILED_DW_5_mit_mode_mask 0x00000007 +#define SDMA_PKT_COPY_TILED_DW_5_mit_mode_shift 8 +#define SDMA_PKT_COPY_TILED_DW_5_MIT_MODE(x) (((x) & SDMA_PKT_COPY_TILED_DW_5_mit_mode_mask) << SDMA_PKT_COPY_TILED_DW_5_mit_mode_shift) + +/*define for tilesplit_size field*/ +#define SDMA_PKT_COPY_TILED_DW_5_tilesplit_size_offset 5 +#define SDMA_PKT_COPY_TILED_DW_5_tilesplit_size_mask 0x00000007 +#define SDMA_PKT_COPY_TILED_DW_5_tilesplit_size_shift 11 +#define SDMA_PKT_COPY_TILED_DW_5_TILESPLIT_SIZE(x) (((x) & SDMA_PKT_COPY_TILED_DW_5_tilesplit_size_mask) << SDMA_PKT_COPY_TILED_DW_5_tilesplit_size_shift) + +/*define for bank_w field*/ +#define SDMA_PKT_COPY_TILED_DW_5_bank_w_offset 5 +#define SDMA_PKT_COPY_TILED_DW_5_bank_w_mask 0x00000003 +#define SDMA_PKT_COPY_TILED_DW_5_bank_w_shift 15 +#define SDMA_PKT_COPY_TILED_DW_5_BANK_W(x) (((x) & SDMA_PKT_COPY_TILED_DW_5_bank_w_mask) << SDMA_PKT_COPY_TILED_DW_5_bank_w_shift) + +/*define for bank_h field*/ +#define SDMA_PKT_COPY_TILED_DW_5_bank_h_offset 5 +#define SDMA_PKT_COPY_TILED_DW_5_bank_h_mask 0x00000003 +#define SDMA_PKT_COPY_TILED_DW_5_bank_h_shift 18 +#define SDMA_PKT_COPY_TILED_DW_5_BANK_H(x) (((x) & SDMA_PKT_COPY_TILED_DW_5_bank_h_mask) << SDMA_PKT_COPY_TILED_DW_5_bank_h_shift) + +/*define for num_bank field*/ +#define SDMA_PKT_COPY_TILED_DW_5_num_bank_offset 5 +#define SDMA_PKT_COPY_TILED_DW_5_num_bank_mask 0x00000003 +#define SDMA_PKT_COPY_TILED_DW_5_num_bank_shift 21 +#define SDMA_PKT_COPY_TILED_DW_5_NUM_BANK(x) (((x) & SDMA_PKT_COPY_TILED_DW_5_num_bank_mask) << SDMA_PKT_COPY_TILED_DW_5_num_bank_shift) + +/*define for mat_aspt field*/ +#define SDMA_PKT_COPY_TILED_DW_5_mat_aspt_offset 5 +#define SDMA_PKT_COPY_TILED_DW_5_mat_aspt_mask 0x00000003 +#define SDMA_PKT_COPY_TILED_DW_5_mat_aspt_shift 24 +#define SDMA_PKT_COPY_TILED_DW_5_MAT_ASPT(x) (((x) & SDMA_PKT_COPY_TILED_DW_5_mat_aspt_mask) << SDMA_PKT_COPY_TILED_DW_5_mat_aspt_shift) + +/*define for pipe_config field*/ +#define SDMA_PKT_COPY_TILED_DW_5_pipe_config_offset 5 +#define SDMA_PKT_COPY_TILED_DW_5_pipe_config_mask 0x0000001F +#define SDMA_PKT_COPY_TILED_DW_5_pipe_config_shift 26 +#define SDMA_PKT_COPY_TILED_DW_5_PIPE_CONFIG(x) (((x) & SDMA_PKT_COPY_TILED_DW_5_pipe_config_mask) << SDMA_PKT_COPY_TILED_DW_5_pipe_config_shift) + +/*define for DW_6 word*/ +/*define for x field*/ +#define SDMA_PKT_COPY_TILED_DW_6_x_offset 6 +#define SDMA_PKT_COPY_TILED_DW_6_x_mask 0x00003FFF +#define SDMA_PKT_COPY_TILED_DW_6_x_shift 0 +#define SDMA_PKT_COPY_TILED_DW_6_X(x) (((x) & SDMA_PKT_COPY_TILED_DW_6_x_mask) << SDMA_PKT_COPY_TILED_DW_6_x_shift) + +/*define for y field*/ +#define SDMA_PKT_COPY_TILED_DW_6_y_offset 6 +#define SDMA_PKT_COPY_TILED_DW_6_y_mask 0x00003FFF +#define SDMA_PKT_COPY_TILED_DW_6_y_shift 16 +#define SDMA_PKT_COPY_TILED_DW_6_Y(x) (((x) & SDMA_PKT_COPY_TILED_DW_6_y_mask) << SDMA_PKT_COPY_TILED_DW_6_y_shift) + +/*define for DW_7 word*/ +/*define for z field*/ +#define SDMA_PKT_COPY_TILED_DW_7_z_offset 7 +#define SDMA_PKT_COPY_TILED_DW_7_z_mask 0x00000FFF +#define SDMA_PKT_COPY_TILED_DW_7_z_shift 0 +#define SDMA_PKT_COPY_TILED_DW_7_Z(x) (((x) & SDMA_PKT_COPY_TILED_DW_7_z_mask) << SDMA_PKT_COPY_TILED_DW_7_z_shift) + +/*define for linear_sw field*/ +#define SDMA_PKT_COPY_TILED_DW_7_linear_sw_offset 7 +#define SDMA_PKT_COPY_TILED_DW_7_linear_sw_mask 0x00000003 +#define SDMA_PKT_COPY_TILED_DW_7_linear_sw_shift 16 +#define SDMA_PKT_COPY_TILED_DW_7_LINEAR_SW(x) (((x) & SDMA_PKT_COPY_TILED_DW_7_linear_sw_mask) << SDMA_PKT_COPY_TILED_DW_7_linear_sw_shift) + +/*define for tile_sw field*/ +#define SDMA_PKT_COPY_TILED_DW_7_tile_sw_offset 7 +#define SDMA_PKT_COPY_TILED_DW_7_tile_sw_mask 0x00000003 +#define SDMA_PKT_COPY_TILED_DW_7_tile_sw_shift 24 +#define SDMA_PKT_COPY_TILED_DW_7_TILE_SW(x) (((x) & SDMA_PKT_COPY_TILED_DW_7_tile_sw_mask) << SDMA_PKT_COPY_TILED_DW_7_tile_sw_shift) + +/*define for LINEAR_ADDR_LO word*/ +/*define for linear_addr_31_0 field*/ +#define SDMA_PKT_COPY_TILED_LINEAR_ADDR_LO_linear_addr_31_0_offset 8 +#define SDMA_PKT_COPY_TILED_LINEAR_ADDR_LO_linear_addr_31_0_mask 0xFFFFFFFF +#define SDMA_PKT_COPY_TILED_LINEAR_ADDR_LO_linear_addr_31_0_shift 0 +#define SDMA_PKT_COPY_TILED_LINEAR_ADDR_LO_LINEAR_ADDR_31_0(x) (((x) & SDMA_PKT_COPY_TILED_LINEAR_ADDR_LO_linear_addr_31_0_mask) << SDMA_PKT_COPY_TILED_LINEAR_ADDR_LO_linear_addr_31_0_shift) + +/*define for LINEAR_ADDR_HI word*/ +/*define for linear_addr_63_32 field*/ +#define SDMA_PKT_COPY_TILED_LINEAR_ADDR_HI_linear_addr_63_32_offset 9 +#define SDMA_PKT_COPY_TILED_LINEAR_ADDR_HI_linear_addr_63_32_mask 0xFFFFFFFF +#define SDMA_PKT_COPY_TILED_LINEAR_ADDR_HI_linear_addr_63_32_shift 0 +#define SDMA_PKT_COPY_TILED_LINEAR_ADDR_HI_LINEAR_ADDR_63_32(x) (((x) & SDMA_PKT_COPY_TILED_LINEAR_ADDR_HI_linear_addr_63_32_mask) << SDMA_PKT_COPY_TILED_LINEAR_ADDR_HI_linear_addr_63_32_shift) + +/*define for LINEAR_PITCH word*/ +/*define for linear_pitch field*/ +#define SDMA_PKT_COPY_TILED_LINEAR_PITCH_linear_pitch_offset 10 +#define SDMA_PKT_COPY_TILED_LINEAR_PITCH_linear_pitch_mask 0x0007FFFF +#define SDMA_PKT_COPY_TILED_LINEAR_PITCH_linear_pitch_shift 0 +#define SDMA_PKT_COPY_TILED_LINEAR_PITCH_LINEAR_PITCH(x) (((x) & SDMA_PKT_COPY_TILED_LINEAR_PITCH_linear_pitch_mask) << SDMA_PKT_COPY_TILED_LINEAR_PITCH_linear_pitch_shift) + +/*define for COUNT word*/ +/*define for count field*/ +#define SDMA_PKT_COPY_TILED_COUNT_count_offset 11 +#define SDMA_PKT_COPY_TILED_COUNT_count_mask 0x000FFFFF +#define SDMA_PKT_COPY_TILED_COUNT_count_shift 0 +#define SDMA_PKT_COPY_TILED_COUNT_COUNT(x) (((x) & SDMA_PKT_COPY_TILED_COUNT_count_mask) << SDMA_PKT_COPY_TILED_COUNT_count_shift) + + +/* +** Definitions for SDMA_PKT_COPY_L2T_BROADCAST packet +*/ + +/*define for HEADER word*/ +/*define for op field*/ +#define SDMA_PKT_COPY_L2T_BROADCAST_HEADER_op_offset 0 +#define SDMA_PKT_COPY_L2T_BROADCAST_HEADER_op_mask 0x000000FF +#define SDMA_PKT_COPY_L2T_BROADCAST_HEADER_op_shift 0 +#define SDMA_PKT_COPY_L2T_BROADCAST_HEADER_OP(x) (((x) & SDMA_PKT_COPY_L2T_BROADCAST_HEADER_op_mask) << SDMA_PKT_COPY_L2T_BROADCAST_HEADER_op_shift) + +/*define for sub_op field*/ +#define SDMA_PKT_COPY_L2T_BROADCAST_HEADER_sub_op_offset 0 +#define SDMA_PKT_COPY_L2T_BROADCAST_HEADER_sub_op_mask 0x000000FF +#define SDMA_PKT_COPY_L2T_BROADCAST_HEADER_sub_op_shift 8 +#define SDMA_PKT_COPY_L2T_BROADCAST_HEADER_SUB_OP(x) (((x) & SDMA_PKT_COPY_L2T_BROADCAST_HEADER_sub_op_mask) << SDMA_PKT_COPY_L2T_BROADCAST_HEADER_sub_op_shift) + +/*define for videocopy field*/ +#define SDMA_PKT_COPY_L2T_BROADCAST_HEADER_videocopy_offset 0 +#define SDMA_PKT_COPY_L2T_BROADCAST_HEADER_videocopy_mask 0x00000001 +#define SDMA_PKT_COPY_L2T_BROADCAST_HEADER_videocopy_shift 26 +#define SDMA_PKT_COPY_L2T_BROADCAST_HEADER_VIDEOCOPY(x) (((x) & SDMA_PKT_COPY_L2T_BROADCAST_HEADER_videocopy_mask) << SDMA_PKT_COPY_L2T_BROADCAST_HEADER_videocopy_shift) + +/*define for broadcast field*/ +#define SDMA_PKT_COPY_L2T_BROADCAST_HEADER_broadcast_offset 0 +#define SDMA_PKT_COPY_L2T_BROADCAST_HEADER_broadcast_mask 0x00000001 +#define SDMA_PKT_COPY_L2T_BROADCAST_HEADER_broadcast_shift 27 +#define SDMA_PKT_COPY_L2T_BROADCAST_HEADER_BROADCAST(x) (((x) & SDMA_PKT_COPY_L2T_BROADCAST_HEADER_broadcast_mask) << SDMA_PKT_COPY_L2T_BROADCAST_HEADER_broadcast_shift) + +/*define for TILED_ADDR_LO_0 word*/ +/*define for tiled_addr0_31_0 field*/ +#define SDMA_PKT_COPY_L2T_BROADCAST_TILED_ADDR_LO_0_tiled_addr0_31_0_offset 1 +#define SDMA_PKT_COPY_L2T_BROADCAST_TILED_ADDR_LO_0_tiled_addr0_31_0_mask 0xFFFFFFFF +#define SDMA_PKT_COPY_L2T_BROADCAST_TILED_ADDR_LO_0_tiled_addr0_31_0_shift 0 +#define SDMA_PKT_COPY_L2T_BROADCAST_TILED_ADDR_LO_0_TILED_ADDR0_31_0(x) (((x) & SDMA_PKT_COPY_L2T_BROADCAST_TILED_ADDR_LO_0_tiled_addr0_31_0_mask) << SDMA_PKT_COPY_L2T_BROADCAST_TILED_ADDR_LO_0_tiled_addr0_31_0_shift) + +/*define for TILED_ADDR_HI_0 word*/ +/*define for tiled_addr0_63_32 field*/ +#define SDMA_PKT_COPY_L2T_BROADCAST_TILED_ADDR_HI_0_tiled_addr0_63_32_offset 2 +#define SDMA_PKT_COPY_L2T_BROADCAST_TILED_ADDR_HI_0_tiled_addr0_63_32_mask 0xFFFFFFFF +#define SDMA_PKT_COPY_L2T_BROADCAST_TILED_ADDR_HI_0_tiled_addr0_63_32_shift 0 +#define SDMA_PKT_COPY_L2T_BROADCAST_TILED_ADDR_HI_0_TILED_ADDR0_63_32(x) (((x) & SDMA_PKT_COPY_L2T_BROADCAST_TILED_ADDR_HI_0_tiled_addr0_63_32_mask) << SDMA_PKT_COPY_L2T_BROADCAST_TILED_ADDR_HI_0_tiled_addr0_63_32_shift) + +/*define for TILED_ADDR_LO_1 word*/ +/*define for tiled_addr1_31_0 field*/ +#define SDMA_PKT_COPY_L2T_BROADCAST_TILED_ADDR_LO_1_tiled_addr1_31_0_offset 3 +#define SDMA_PKT_COPY_L2T_BROADCAST_TILED_ADDR_LO_1_tiled_addr1_31_0_mask 0xFFFFFFFF +#define SDMA_PKT_COPY_L2T_BROADCAST_TILED_ADDR_LO_1_tiled_addr1_31_0_shift 0 +#define SDMA_PKT_COPY_L2T_BROADCAST_TILED_ADDR_LO_1_TILED_ADDR1_31_0(x) (((x) & SDMA_PKT_COPY_L2T_BROADCAST_TILED_ADDR_LO_1_tiled_addr1_31_0_mask) << SDMA_PKT_COPY_L2T_BROADCAST_TILED_ADDR_LO_1_tiled_addr1_31_0_shift) + +/*define for TILED_ADDR_HI_1 word*/ +/*define for tiled_addr1_63_32 field*/ +#define SDMA_PKT_COPY_L2T_BROADCAST_TILED_ADDR_HI_1_tiled_addr1_63_32_offset 4 +#define SDMA_PKT_COPY_L2T_BROADCAST_TILED_ADDR_HI_1_tiled_addr1_63_32_mask 0xFFFFFFFF +#define SDMA_PKT_COPY_L2T_BROADCAST_TILED_ADDR_HI_1_tiled_addr1_63_32_shift 0 +#define SDMA_PKT_COPY_L2T_BROADCAST_TILED_ADDR_HI_1_TILED_ADDR1_63_32(x) (((x) & SDMA_PKT_COPY_L2T_BROADCAST_TILED_ADDR_HI_1_tiled_addr1_63_32_mask) << SDMA_PKT_COPY_L2T_BROADCAST_TILED_ADDR_HI_1_tiled_addr1_63_32_shift) + +/*define for DW_5 word*/ +/*define for pitch_in_tile field*/ +#define SDMA_PKT_COPY_L2T_BROADCAST_DW_5_pitch_in_tile_offset 5 +#define SDMA_PKT_COPY_L2T_BROADCAST_DW_5_pitch_in_tile_mask 0x000007FF +#define SDMA_PKT_COPY_L2T_BROADCAST_DW_5_pitch_in_tile_shift 0 +#define SDMA_PKT_COPY_L2T_BROADCAST_DW_5_PITCH_IN_TILE(x) (((x) & SDMA_PKT_COPY_L2T_BROADCAST_DW_5_pitch_in_tile_mask) << SDMA_PKT_COPY_L2T_BROADCAST_DW_5_pitch_in_tile_shift) + +/*define for height field*/ +#define SDMA_PKT_COPY_L2T_BROADCAST_DW_5_height_offset 5 +#define SDMA_PKT_COPY_L2T_BROADCAST_DW_5_height_mask 0x00003FFF +#define SDMA_PKT_COPY_L2T_BROADCAST_DW_5_height_shift 16 +#define SDMA_PKT_COPY_L2T_BROADCAST_DW_5_HEIGHT(x) (((x) & SDMA_PKT_COPY_L2T_BROADCAST_DW_5_height_mask) << SDMA_PKT_COPY_L2T_BROADCAST_DW_5_height_shift) + +/*define for DW_6 word*/ +/*define for slice_pitch field*/ +#define SDMA_PKT_COPY_L2T_BROADCAST_DW_6_slice_pitch_offset 6 +#define SDMA_PKT_COPY_L2T_BROADCAST_DW_6_slice_pitch_mask 0x003FFFFF +#define SDMA_PKT_COPY_L2T_BROADCAST_DW_6_slice_pitch_shift 0 +#define SDMA_PKT_COPY_L2T_BROADCAST_DW_6_SLICE_PITCH(x) (((x) & SDMA_PKT_COPY_L2T_BROADCAST_DW_6_slice_pitch_mask) << SDMA_PKT_COPY_L2T_BROADCAST_DW_6_slice_pitch_shift) + +/*define for DW_7 word*/ +/*define for element_size field*/ +#define SDMA_PKT_COPY_L2T_BROADCAST_DW_7_element_size_offset 7 +#define SDMA_PKT_COPY_L2T_BROADCAST_DW_7_element_size_mask 0x00000007 +#define SDMA_PKT_COPY_L2T_BROADCAST_DW_7_element_size_shift 0 +#define SDMA_PKT_COPY_L2T_BROADCAST_DW_7_ELEMENT_SIZE(x) (((x) & SDMA_PKT_COPY_L2T_BROADCAST_DW_7_element_size_mask) << SDMA_PKT_COPY_L2T_BROADCAST_DW_7_element_size_shift) + +/*define for array_mode field*/ +#define SDMA_PKT_COPY_L2T_BROADCAST_DW_7_array_mode_offset 7 +#define SDMA_PKT_COPY_L2T_BROADCAST_DW_7_array_mode_mask 0x0000000F +#define SDMA_PKT_COPY_L2T_BROADCAST_DW_7_array_mode_shift 3 +#define SDMA_PKT_COPY_L2T_BROADCAST_DW_7_ARRAY_MODE(x) (((x) & SDMA_PKT_COPY_L2T_BROADCAST_DW_7_array_mode_mask) << SDMA_PKT_COPY_L2T_BROADCAST_DW_7_array_mode_shift) + +/*define for mit_mode field*/ +#define SDMA_PKT_COPY_L2T_BROADCAST_DW_7_mit_mode_offset 7 +#define SDMA_PKT_COPY_L2T_BROADCAST_DW_7_mit_mode_mask 0x00000007 +#define SDMA_PKT_COPY_L2T_BROADCAST_DW_7_mit_mode_shift 8 +#define SDMA_PKT_COPY_L2T_BROADCAST_DW_7_MIT_MODE(x) (((x) & SDMA_PKT_COPY_L2T_BROADCAST_DW_7_mit_mode_mask) << SDMA_PKT_COPY_L2T_BROADCAST_DW_7_mit_mode_shift) + +/*define for tilesplit_size field*/ +#define SDMA_PKT_COPY_L2T_BROADCAST_DW_7_tilesplit_size_offset 7 +#define SDMA_PKT_COPY_L2T_BROADCAST_DW_7_tilesplit_size_mask 0x00000007 +#define SDMA_PKT_COPY_L2T_BROADCAST_DW_7_tilesplit_size_shift 11 +#define SDMA_PKT_COPY_L2T_BROADCAST_DW_7_TILESPLIT_SIZE(x) (((x) & SDMA_PKT_COPY_L2T_BROADCAST_DW_7_tilesplit_size_mask) << SDMA_PKT_COPY_L2T_BROADCAST_DW_7_tilesplit_size_shift) + +/*define for bank_w field*/ +#define SDMA_PKT_COPY_L2T_BROADCAST_DW_7_bank_w_offset 7 +#define SDMA_PKT_COPY_L2T_BROADCAST_DW_7_bank_w_mask 0x00000003 +#define SDMA_PKT_COPY_L2T_BROADCAST_DW_7_bank_w_shift 15 +#define SDMA_PKT_COPY_L2T_BROADCAST_DW_7_BANK_W(x) (((x) & SDMA_PKT_COPY_L2T_BROADCAST_DW_7_bank_w_mask) << SDMA_PKT_COPY_L2T_BROADCAST_DW_7_bank_w_shift) + +/*define for bank_h field*/ +#define SDMA_PKT_COPY_L2T_BROADCAST_DW_7_bank_h_offset 7 +#define SDMA_PKT_COPY_L2T_BROADCAST_DW_7_bank_h_mask 0x00000003 +#define SDMA_PKT_COPY_L2T_BROADCAST_DW_7_bank_h_shift 18 +#define SDMA_PKT_COPY_L2T_BROADCAST_DW_7_BANK_H(x) (((x) & SDMA_PKT_COPY_L2T_BROADCAST_DW_7_bank_h_mask) << SDMA_PKT_COPY_L2T_BROADCAST_DW_7_bank_h_shift) + +/*define for num_bank field*/ +#define SDMA_PKT_COPY_L2T_BROADCAST_DW_7_num_bank_offset 7 +#define SDMA_PKT_COPY_L2T_BROADCAST_DW_7_num_bank_mask 0x00000003 +#define SDMA_PKT_COPY_L2T_BROADCAST_DW_7_num_bank_shift 21 +#define SDMA_PKT_COPY_L2T_BROADCAST_DW_7_NUM_BANK(x) (((x) & SDMA_PKT_COPY_L2T_BROADCAST_DW_7_num_bank_mask) << SDMA_PKT_COPY_L2T_BROADCAST_DW_7_num_bank_shift) + +/*define for mat_aspt field*/ +#define SDMA_PKT_COPY_L2T_BROADCAST_DW_7_mat_aspt_offset 7 +#define SDMA_PKT_COPY_L2T_BROADCAST_DW_7_mat_aspt_mask 0x00000003 +#define SDMA_PKT_COPY_L2T_BROADCAST_DW_7_mat_aspt_shift 24 +#define SDMA_PKT_COPY_L2T_BROADCAST_DW_7_MAT_ASPT(x) (((x) & SDMA_PKT_COPY_L2T_BROADCAST_DW_7_mat_aspt_mask) << SDMA_PKT_COPY_L2T_BROADCAST_DW_7_mat_aspt_shift) + +/*define for pipe_config field*/ +#define SDMA_PKT_COPY_L2T_BROADCAST_DW_7_pipe_config_offset 7 +#define SDMA_PKT_COPY_L2T_BROADCAST_DW_7_pipe_config_mask 0x0000001F +#define SDMA_PKT_COPY_L2T_BROADCAST_DW_7_pipe_config_shift 26 +#define SDMA_PKT_COPY_L2T_BROADCAST_DW_7_PIPE_CONFIG(x) (((x) & SDMA_PKT_COPY_L2T_BROADCAST_DW_7_pipe_config_mask) << SDMA_PKT_COPY_L2T_BROADCAST_DW_7_pipe_config_shift) + +/*define for DW_8 word*/ +/*define for x field*/ +#define SDMA_PKT_COPY_L2T_BROADCAST_DW_8_x_offset 8 +#define SDMA_PKT_COPY_L2T_BROADCAST_DW_8_x_mask 0x00003FFF +#define SDMA_PKT_COPY_L2T_BROADCAST_DW_8_x_shift 0 +#define SDMA_PKT_COPY_L2T_BROADCAST_DW_8_X(x) (((x) & SDMA_PKT_COPY_L2T_BROADCAST_DW_8_x_mask) << SDMA_PKT_COPY_L2T_BROADCAST_DW_8_x_shift) + +/*define for y field*/ +#define SDMA_PKT_COPY_L2T_BROADCAST_DW_8_y_offset 8 +#define SDMA_PKT_COPY_L2T_BROADCAST_DW_8_y_mask 0x00003FFF +#define SDMA_PKT_COPY_L2T_BROADCAST_DW_8_y_shift 16 +#define SDMA_PKT_COPY_L2T_BROADCAST_DW_8_Y(x) (((x) & SDMA_PKT_COPY_L2T_BROADCAST_DW_8_y_mask) << SDMA_PKT_COPY_L2T_BROADCAST_DW_8_y_shift) + +/*define for DW_9 word*/ +/*define for z field*/ +#define SDMA_PKT_COPY_L2T_BROADCAST_DW_9_z_offset 9 +#define SDMA_PKT_COPY_L2T_BROADCAST_DW_9_z_mask 0x00000FFF +#define SDMA_PKT_COPY_L2T_BROADCAST_DW_9_z_shift 0 +#define SDMA_PKT_COPY_L2T_BROADCAST_DW_9_Z(x) (((x) & SDMA_PKT_COPY_L2T_BROADCAST_DW_9_z_mask) << SDMA_PKT_COPY_L2T_BROADCAST_DW_9_z_shift) + +/*define for DW_10 word*/ +/*define for dst2_sw field*/ +#define SDMA_PKT_COPY_L2T_BROADCAST_DW_10_dst2_sw_offset 10 +#define SDMA_PKT_COPY_L2T_BROADCAST_DW_10_dst2_sw_mask 0x00000003 +#define SDMA_PKT_COPY_L2T_BROADCAST_DW_10_dst2_sw_shift 8 +#define SDMA_PKT_COPY_L2T_BROADCAST_DW_10_DST2_SW(x) (((x) & SDMA_PKT_COPY_L2T_BROADCAST_DW_10_dst2_sw_mask) << SDMA_PKT_COPY_L2T_BROADCAST_DW_10_dst2_sw_shift) + +/*define for dst2_ha field*/ +#define SDMA_PKT_COPY_L2T_BROADCAST_DW_10_dst2_ha_offset 10 +#define SDMA_PKT_COPY_L2T_BROADCAST_DW_10_dst2_ha_mask 0x00000001 +#define SDMA_PKT_COPY_L2T_BROADCAST_DW_10_dst2_ha_shift 14 +#define SDMA_PKT_COPY_L2T_BROADCAST_DW_10_DST2_HA(x) (((x) & SDMA_PKT_COPY_L2T_BROADCAST_DW_10_dst2_ha_mask) << SDMA_PKT_COPY_L2T_BROADCAST_DW_10_dst2_ha_shift) + +/*define for linear_sw field*/ +#define SDMA_PKT_COPY_L2T_BROADCAST_DW_10_linear_sw_offset 10 +#define SDMA_PKT_COPY_L2T_BROADCAST_DW_10_linear_sw_mask 0x00000003 +#define SDMA_PKT_COPY_L2T_BROADCAST_DW_10_linear_sw_shift 16 +#define SDMA_PKT_COPY_L2T_BROADCAST_DW_10_LINEAR_SW(x) (((x) & SDMA_PKT_COPY_L2T_BROADCAST_DW_10_linear_sw_mask) << SDMA_PKT_COPY_L2T_BROADCAST_DW_10_linear_sw_shift) + +/*define for tile_sw field*/ +#define SDMA_PKT_COPY_L2T_BROADCAST_DW_10_tile_sw_offset 10 +#define SDMA_PKT_COPY_L2T_BROADCAST_DW_10_tile_sw_mask 0x00000003 +#define SDMA_PKT_COPY_L2T_BROADCAST_DW_10_tile_sw_shift 24 +#define SDMA_PKT_COPY_L2T_BROADCAST_DW_10_TILE_SW(x) (((x) & SDMA_PKT_COPY_L2T_BROADCAST_DW_10_tile_sw_mask) << SDMA_PKT_COPY_L2T_BROADCAST_DW_10_tile_sw_shift) + +/*define for LINEAR_ADDR_LO word*/ +/*define for linear_addr_31_0 field*/ +#define SDMA_PKT_COPY_L2T_BROADCAST_LINEAR_ADDR_LO_linear_addr_31_0_offset 11 +#define SDMA_PKT_COPY_L2T_BROADCAST_LINEAR_ADDR_LO_linear_addr_31_0_mask 0xFFFFFFFF +#define SDMA_PKT_COPY_L2T_BROADCAST_LINEAR_ADDR_LO_linear_addr_31_0_shift 0 +#define SDMA_PKT_COPY_L2T_BROADCAST_LINEAR_ADDR_LO_LINEAR_ADDR_31_0(x) (((x) & SDMA_PKT_COPY_L2T_BROADCAST_LINEAR_ADDR_LO_linear_addr_31_0_mask) << SDMA_PKT_COPY_L2T_BROADCAST_LINEAR_ADDR_LO_linear_addr_31_0_shift) + +/*define for LINEAR_ADDR_HI word*/ +/*define for linear_addr_63_32 field*/ +#define SDMA_PKT_COPY_L2T_BROADCAST_LINEAR_ADDR_HI_linear_addr_63_32_offset 12 +#define SDMA_PKT_COPY_L2T_BROADCAST_LINEAR_ADDR_HI_linear_addr_63_32_mask 0xFFFFFFFF +#define SDMA_PKT_COPY_L2T_BROADCAST_LINEAR_ADDR_HI_linear_addr_63_32_shift 0 +#define SDMA_PKT_COPY_L2T_BROADCAST_LINEAR_ADDR_HI_LINEAR_ADDR_63_32(x) (((x) & SDMA_PKT_COPY_L2T_BROADCAST_LINEAR_ADDR_HI_linear_addr_63_32_mask) << SDMA_PKT_COPY_L2T_BROADCAST_LINEAR_ADDR_HI_linear_addr_63_32_shift) + +/*define for LINEAR_PITCH word*/ +/*define for linear_pitch field*/ +#define SDMA_PKT_COPY_L2T_BROADCAST_LINEAR_PITCH_linear_pitch_offset 13 +#define SDMA_PKT_COPY_L2T_BROADCAST_LINEAR_PITCH_linear_pitch_mask 0x0007FFFF +#define SDMA_PKT_COPY_L2T_BROADCAST_LINEAR_PITCH_linear_pitch_shift 0 +#define SDMA_PKT_COPY_L2T_BROADCAST_LINEAR_PITCH_LINEAR_PITCH(x) (((x) & SDMA_PKT_COPY_L2T_BROADCAST_LINEAR_PITCH_linear_pitch_mask) << SDMA_PKT_COPY_L2T_BROADCAST_LINEAR_PITCH_linear_pitch_shift) + +/*define for COUNT word*/ +/*define for count field*/ +#define SDMA_PKT_COPY_L2T_BROADCAST_COUNT_count_offset 14 +#define SDMA_PKT_COPY_L2T_BROADCAST_COUNT_count_mask 0x000FFFFF +#define SDMA_PKT_COPY_L2T_BROADCAST_COUNT_count_shift 0 +#define SDMA_PKT_COPY_L2T_BROADCAST_COUNT_COUNT(x) (((x) & SDMA_PKT_COPY_L2T_BROADCAST_COUNT_count_mask) << SDMA_PKT_COPY_L2T_BROADCAST_COUNT_count_shift) + + +/* +** Definitions for SDMA_PKT_COPY_T2T packet +*/ + +/*define for HEADER word*/ +/*define for op field*/ +#define SDMA_PKT_COPY_T2T_HEADER_op_offset 0 +#define SDMA_PKT_COPY_T2T_HEADER_op_mask 0x000000FF +#define SDMA_PKT_COPY_T2T_HEADER_op_shift 0 +#define SDMA_PKT_COPY_T2T_HEADER_OP(x) (((x) & SDMA_PKT_COPY_T2T_HEADER_op_mask) << SDMA_PKT_COPY_T2T_HEADER_op_shift) + +/*define for sub_op field*/ +#define SDMA_PKT_COPY_T2T_HEADER_sub_op_offset 0 +#define SDMA_PKT_COPY_T2T_HEADER_sub_op_mask 0x000000FF +#define SDMA_PKT_COPY_T2T_HEADER_sub_op_shift 8 +#define SDMA_PKT_COPY_T2T_HEADER_SUB_OP(x) (((x) & SDMA_PKT_COPY_T2T_HEADER_sub_op_mask) << SDMA_PKT_COPY_T2T_HEADER_sub_op_shift) + +/*define for SRC_ADDR_LO word*/ +/*define for src_addr_31_0 field*/ +#define SDMA_PKT_COPY_T2T_SRC_ADDR_LO_src_addr_31_0_offset 1 +#define SDMA_PKT_COPY_T2T_SRC_ADDR_LO_src_addr_31_0_mask 0xFFFFFFFF +#define SDMA_PKT_COPY_T2T_SRC_ADDR_LO_src_addr_31_0_shift 0 +#define SDMA_PKT_COPY_T2T_SRC_ADDR_LO_SRC_ADDR_31_0(x) (((x) & SDMA_PKT_COPY_T2T_SRC_ADDR_LO_src_addr_31_0_mask) << SDMA_PKT_COPY_T2T_SRC_ADDR_LO_src_addr_31_0_shift) + +/*define for SRC_ADDR_HI word*/ +/*define for src_addr_63_32 field*/ +#define SDMA_PKT_COPY_T2T_SRC_ADDR_HI_src_addr_63_32_offset 2 +#define SDMA_PKT_COPY_T2T_SRC_ADDR_HI_src_addr_63_32_mask 0xFFFFFFFF +#define SDMA_PKT_COPY_T2T_SRC_ADDR_HI_src_addr_63_32_shift 0 +#define SDMA_PKT_COPY_T2T_SRC_ADDR_HI_SRC_ADDR_63_32(x) (((x) & SDMA_PKT_COPY_T2T_SRC_ADDR_HI_src_addr_63_32_mask) << SDMA_PKT_COPY_T2T_SRC_ADDR_HI_src_addr_63_32_shift) + +/*define for DW_3 word*/ +/*define for src_x field*/ +#define SDMA_PKT_COPY_T2T_DW_3_src_x_offset 3 +#define SDMA_PKT_COPY_T2T_DW_3_src_x_mask 0x00003FFF +#define SDMA_PKT_COPY_T2T_DW_3_src_x_shift 0 +#define SDMA_PKT_COPY_T2T_DW_3_SRC_X(x) (((x) & SDMA_PKT_COPY_T2T_DW_3_src_x_mask) << SDMA_PKT_COPY_T2T_DW_3_src_x_shift) + +/*define for src_y field*/ +#define SDMA_PKT_COPY_T2T_DW_3_src_y_offset 3 +#define SDMA_PKT_COPY_T2T_DW_3_src_y_mask 0x00003FFF +#define SDMA_PKT_COPY_T2T_DW_3_src_y_shift 16 +#define SDMA_PKT_COPY_T2T_DW_3_SRC_Y(x) (((x) & SDMA_PKT_COPY_T2T_DW_3_src_y_mask) << SDMA_PKT_COPY_T2T_DW_3_src_y_shift) + +/*define for DW_4 word*/ +/*define for src_z field*/ +#define SDMA_PKT_COPY_T2T_DW_4_src_z_offset 4 +#define SDMA_PKT_COPY_T2T_DW_4_src_z_mask 0x000007FF +#define SDMA_PKT_COPY_T2T_DW_4_src_z_shift 0 +#define SDMA_PKT_COPY_T2T_DW_4_SRC_Z(x) (((x) & SDMA_PKT_COPY_T2T_DW_4_src_z_mask) << SDMA_PKT_COPY_T2T_DW_4_src_z_shift) + +/*define for src_pitch_in_tile field*/ +#define SDMA_PKT_COPY_T2T_DW_4_src_pitch_in_tile_offset 4 +#define SDMA_PKT_COPY_T2T_DW_4_src_pitch_in_tile_mask 0x00000FFF +#define SDMA_PKT_COPY_T2T_DW_4_src_pitch_in_tile_shift 16 +#define SDMA_PKT_COPY_T2T_DW_4_SRC_PITCH_IN_TILE(x) (((x) & SDMA_PKT_COPY_T2T_DW_4_src_pitch_in_tile_mask) << SDMA_PKT_COPY_T2T_DW_4_src_pitch_in_tile_shift) + +/*define for DW_5 word*/ +/*define for src_slice_pitch field*/ +#define SDMA_PKT_COPY_T2T_DW_5_src_slice_pitch_offset 5 +#define SDMA_PKT_COPY_T2T_DW_5_src_slice_pitch_mask 0x003FFFFF +#define SDMA_PKT_COPY_T2T_DW_5_src_slice_pitch_shift 0 +#define SDMA_PKT_COPY_T2T_DW_5_SRC_SLICE_PITCH(x) (((x) & SDMA_PKT_COPY_T2T_DW_5_src_slice_pitch_mask) << SDMA_PKT_COPY_T2T_DW_5_src_slice_pitch_shift) + +/*define for DW_6 word*/ +/*define for src_element_size field*/ +#define SDMA_PKT_COPY_T2T_DW_6_src_element_size_offset 6 +#define SDMA_PKT_COPY_T2T_DW_6_src_element_size_mask 0x00000007 +#define SDMA_PKT_COPY_T2T_DW_6_src_element_size_shift 0 +#define SDMA_PKT_COPY_T2T_DW_6_SRC_ELEMENT_SIZE(x) (((x) & SDMA_PKT_COPY_T2T_DW_6_src_element_size_mask) << SDMA_PKT_COPY_T2T_DW_6_src_element_size_shift) + +/*define for src_array_mode field*/ +#define SDMA_PKT_COPY_T2T_DW_6_src_array_mode_offset 6 +#define SDMA_PKT_COPY_T2T_DW_6_src_array_mode_mask 0x0000000F +#define SDMA_PKT_COPY_T2T_DW_6_src_array_mode_shift 3 +#define SDMA_PKT_COPY_T2T_DW_6_SRC_ARRAY_MODE(x) (((x) & SDMA_PKT_COPY_T2T_DW_6_src_array_mode_mask) << SDMA_PKT_COPY_T2T_DW_6_src_array_mode_shift) + +/*define for src_mit_mode field*/ +#define SDMA_PKT_COPY_T2T_DW_6_src_mit_mode_offset 6 +#define SDMA_PKT_COPY_T2T_DW_6_src_mit_mode_mask 0x00000007 +#define SDMA_PKT_COPY_T2T_DW_6_src_mit_mode_shift 8 +#define SDMA_PKT_COPY_T2T_DW_6_SRC_MIT_MODE(x) (((x) & SDMA_PKT_COPY_T2T_DW_6_src_mit_mode_mask) << SDMA_PKT_COPY_T2T_DW_6_src_mit_mode_shift) + +/*define for src_tilesplit_size field*/ +#define SDMA_PKT_COPY_T2T_DW_6_src_tilesplit_size_offset 6 +#define SDMA_PKT_COPY_T2T_DW_6_src_tilesplit_size_mask 0x00000007 +#define SDMA_PKT_COPY_T2T_DW_6_src_tilesplit_size_shift 11 +#define SDMA_PKT_COPY_T2T_DW_6_SRC_TILESPLIT_SIZE(x) (((x) & SDMA_PKT_COPY_T2T_DW_6_src_tilesplit_size_mask) << SDMA_PKT_COPY_T2T_DW_6_src_tilesplit_size_shift) + +/*define for src_bank_w field*/ +#define SDMA_PKT_COPY_T2T_DW_6_src_bank_w_offset 6 +#define SDMA_PKT_COPY_T2T_DW_6_src_bank_w_mask 0x00000003 +#define SDMA_PKT_COPY_T2T_DW_6_src_bank_w_shift 15 +#define SDMA_PKT_COPY_T2T_DW_6_SRC_BANK_W(x) (((x) & SDMA_PKT_COPY_T2T_DW_6_src_bank_w_mask) << SDMA_PKT_COPY_T2T_DW_6_src_bank_w_shift) + +/*define for src_bank_h field*/ +#define SDMA_PKT_COPY_T2T_DW_6_src_bank_h_offset 6 +#define SDMA_PKT_COPY_T2T_DW_6_src_bank_h_mask 0x00000003 +#define SDMA_PKT_COPY_T2T_DW_6_src_bank_h_shift 18 +#define SDMA_PKT_COPY_T2T_DW_6_SRC_BANK_H(x) (((x) & SDMA_PKT_COPY_T2T_DW_6_src_bank_h_mask) << SDMA_PKT_COPY_T2T_DW_6_src_bank_h_shift) + +/*define for src_num_bank field*/ +#define SDMA_PKT_COPY_T2T_DW_6_src_num_bank_offset 6 +#define SDMA_PKT_COPY_T2T_DW_6_src_num_bank_mask 0x00000003 +#define SDMA_PKT_COPY_T2T_DW_6_src_num_bank_shift 21 +#define SDMA_PKT_COPY_T2T_DW_6_SRC_NUM_BANK(x) (((x) & SDMA_PKT_COPY_T2T_DW_6_src_num_bank_mask) << SDMA_PKT_COPY_T2T_DW_6_src_num_bank_shift) + +/*define for src_mat_aspt field*/ +#define SDMA_PKT_COPY_T2T_DW_6_src_mat_aspt_offset 6 +#define SDMA_PKT_COPY_T2T_DW_6_src_mat_aspt_mask 0x00000003 +#define SDMA_PKT_COPY_T2T_DW_6_src_mat_aspt_shift 24 +#define SDMA_PKT_COPY_T2T_DW_6_SRC_MAT_ASPT(x) (((x) & SDMA_PKT_COPY_T2T_DW_6_src_mat_aspt_mask) << SDMA_PKT_COPY_T2T_DW_6_src_mat_aspt_shift) + +/*define for src_pipe_config field*/ +#define SDMA_PKT_COPY_T2T_DW_6_src_pipe_config_offset 6 +#define SDMA_PKT_COPY_T2T_DW_6_src_pipe_config_mask 0x0000001F +#define SDMA_PKT_COPY_T2T_DW_6_src_pipe_config_shift 26 +#define SDMA_PKT_COPY_T2T_DW_6_SRC_PIPE_CONFIG(x) (((x) & SDMA_PKT_COPY_T2T_DW_6_src_pipe_config_mask) << SDMA_PKT_COPY_T2T_DW_6_src_pipe_config_shift) + +/*define for DST_ADDR_LO word*/ +/*define for dst_addr_31_0 field*/ +#define SDMA_PKT_COPY_T2T_DST_ADDR_LO_dst_addr_31_0_offset 7 +#define SDMA_PKT_COPY_T2T_DST_ADDR_LO_dst_addr_31_0_mask 0xFFFFFFFF +#define SDMA_PKT_COPY_T2T_DST_ADDR_LO_dst_addr_31_0_shift 0 +#define SDMA_PKT_COPY_T2T_DST_ADDR_LO_DST_ADDR_31_0(x) (((x) & SDMA_PKT_COPY_T2T_DST_ADDR_LO_dst_addr_31_0_mask) << SDMA_PKT_COPY_T2T_DST_ADDR_LO_dst_addr_31_0_shift) + +/*define for DST_ADDR_HI word*/ +/*define for dst_addr_63_32 field*/ +#define SDMA_PKT_COPY_T2T_DST_ADDR_HI_dst_addr_63_32_offset 8 +#define SDMA_PKT_COPY_T2T_DST_ADDR_HI_dst_addr_63_32_mask 0xFFFFFFFF +#define SDMA_PKT_COPY_T2T_DST_ADDR_HI_dst_addr_63_32_shift 0 +#define SDMA_PKT_COPY_T2T_DST_ADDR_HI_DST_ADDR_63_32(x) (((x) & SDMA_PKT_COPY_T2T_DST_ADDR_HI_dst_addr_63_32_mask) << SDMA_PKT_COPY_T2T_DST_ADDR_HI_dst_addr_63_32_shift) + +/*define for DW_9 word*/ +/*define for dst_x field*/ +#define SDMA_PKT_COPY_T2T_DW_9_dst_x_offset 9 +#define SDMA_PKT_COPY_T2T_DW_9_dst_x_mask 0x00003FFF +#define SDMA_PKT_COPY_T2T_DW_9_dst_x_shift 0 +#define SDMA_PKT_COPY_T2T_DW_9_DST_X(x) (((x) & SDMA_PKT_COPY_T2T_DW_9_dst_x_mask) << SDMA_PKT_COPY_T2T_DW_9_dst_x_shift) + +/*define for dst_y field*/ +#define SDMA_PKT_COPY_T2T_DW_9_dst_y_offset 9 +#define SDMA_PKT_COPY_T2T_DW_9_dst_y_mask 0x00003FFF +#define SDMA_PKT_COPY_T2T_DW_9_dst_y_shift 16 +#define SDMA_PKT_COPY_T2T_DW_9_DST_Y(x) (((x) & SDMA_PKT_COPY_T2T_DW_9_dst_y_mask) << SDMA_PKT_COPY_T2T_DW_9_dst_y_shift) + +/*define for DW_10 word*/ +/*define for dst_z field*/ +#define SDMA_PKT_COPY_T2T_DW_10_dst_z_offset 10 +#define SDMA_PKT_COPY_T2T_DW_10_dst_z_mask 0x000007FF +#define SDMA_PKT_COPY_T2T_DW_10_dst_z_shift 0 +#define SDMA_PKT_COPY_T2T_DW_10_DST_Z(x) (((x) & SDMA_PKT_COPY_T2T_DW_10_dst_z_mask) << SDMA_PKT_COPY_T2T_DW_10_dst_z_shift) + +/*define for dst_pitch_in_tile field*/ +#define SDMA_PKT_COPY_T2T_DW_10_dst_pitch_in_tile_offset 10 +#define SDMA_PKT_COPY_T2T_DW_10_dst_pitch_in_tile_mask 0x00000FFF +#define SDMA_PKT_COPY_T2T_DW_10_dst_pitch_in_tile_shift 16 +#define SDMA_PKT_COPY_T2T_DW_10_DST_PITCH_IN_TILE(x) (((x) & SDMA_PKT_COPY_T2T_DW_10_dst_pitch_in_tile_mask) << SDMA_PKT_COPY_T2T_DW_10_dst_pitch_in_tile_shift) + +/*define for DW_11 word*/ +/*define for dst_slice_pitch field*/ +#define SDMA_PKT_COPY_T2T_DW_11_dst_slice_pitch_offset 11 +#define SDMA_PKT_COPY_T2T_DW_11_dst_slice_pitch_mask 0x003FFFFF +#define SDMA_PKT_COPY_T2T_DW_11_dst_slice_pitch_shift 0 +#define SDMA_PKT_COPY_T2T_DW_11_DST_SLICE_PITCH(x) (((x) & SDMA_PKT_COPY_T2T_DW_11_dst_slice_pitch_mask) << SDMA_PKT_COPY_T2T_DW_11_dst_slice_pitch_shift) + +/*define for DW_12 word*/ +/*define for dst_array_mode field*/ +#define SDMA_PKT_COPY_T2T_DW_12_dst_array_mode_offset 12 +#define SDMA_PKT_COPY_T2T_DW_12_dst_array_mode_mask 0x0000000F +#define SDMA_PKT_COPY_T2T_DW_12_dst_array_mode_shift 3 +#define SDMA_PKT_COPY_T2T_DW_12_DST_ARRAY_MODE(x) (((x) & SDMA_PKT_COPY_T2T_DW_12_dst_array_mode_mask) << SDMA_PKT_COPY_T2T_DW_12_dst_array_mode_shift) + +/*define for dst_mit_mode field*/ +#define SDMA_PKT_COPY_T2T_DW_12_dst_mit_mode_offset 12 +#define SDMA_PKT_COPY_T2T_DW_12_dst_mit_mode_mask 0x00000007 +#define SDMA_PKT_COPY_T2T_DW_12_dst_mit_mode_shift 8 +#define SDMA_PKT_COPY_T2T_DW_12_DST_MIT_MODE(x) (((x) & SDMA_PKT_COPY_T2T_DW_12_dst_mit_mode_mask) << SDMA_PKT_COPY_T2T_DW_12_dst_mit_mode_shift) + +/*define for dst_tilesplit_size field*/ +#define SDMA_PKT_COPY_T2T_DW_12_dst_tilesplit_size_offset 12 +#define SDMA_PKT_COPY_T2T_DW_12_dst_tilesplit_size_mask 0x00000007 +#define SDMA_PKT_COPY_T2T_DW_12_dst_tilesplit_size_shift 11 +#define SDMA_PKT_COPY_T2T_DW_12_DST_TILESPLIT_SIZE(x) (((x) & SDMA_PKT_COPY_T2T_DW_12_dst_tilesplit_size_mask) << SDMA_PKT_COPY_T2T_DW_12_dst_tilesplit_size_shift) + +/*define for dst_bank_w field*/ +#define SDMA_PKT_COPY_T2T_DW_12_dst_bank_w_offset 12 +#define SDMA_PKT_COPY_T2T_DW_12_dst_bank_w_mask 0x00000003 +#define SDMA_PKT_COPY_T2T_DW_12_dst_bank_w_shift 15 +#define SDMA_PKT_COPY_T2T_DW_12_DST_BANK_W(x) (((x) & SDMA_PKT_COPY_T2T_DW_12_dst_bank_w_mask) << SDMA_PKT_COPY_T2T_DW_12_dst_bank_w_shift) + +/*define for dst_bank_h field*/ +#define SDMA_PKT_COPY_T2T_DW_12_dst_bank_h_offset 12 +#define SDMA_PKT_COPY_T2T_DW_12_dst_bank_h_mask 0x00000003 +#define SDMA_PKT_COPY_T2T_DW_12_dst_bank_h_shift 18 +#define SDMA_PKT_COPY_T2T_DW_12_DST_BANK_H(x) (((x) & SDMA_PKT_COPY_T2T_DW_12_dst_bank_h_mask) << SDMA_PKT_COPY_T2T_DW_12_dst_bank_h_shift) + +/*define for dst_num_bank field*/ +#define SDMA_PKT_COPY_T2T_DW_12_dst_num_bank_offset 12 +#define SDMA_PKT_COPY_T2T_DW_12_dst_num_bank_mask 0x00000003 +#define SDMA_PKT_COPY_T2T_DW_12_dst_num_bank_shift 21 +#define SDMA_PKT_COPY_T2T_DW_12_DST_NUM_BANK(x) (((x) & SDMA_PKT_COPY_T2T_DW_12_dst_num_bank_mask) << SDMA_PKT_COPY_T2T_DW_12_dst_num_bank_shift) + +/*define for dst_mat_aspt field*/ +#define SDMA_PKT_COPY_T2T_DW_12_dst_mat_aspt_offset 12 +#define SDMA_PKT_COPY_T2T_DW_12_dst_mat_aspt_mask 0x00000003 +#define SDMA_PKT_COPY_T2T_DW_12_dst_mat_aspt_shift 24 +#define SDMA_PKT_COPY_T2T_DW_12_DST_MAT_ASPT(x) (((x) & SDMA_PKT_COPY_T2T_DW_12_dst_mat_aspt_mask) << SDMA_PKT_COPY_T2T_DW_12_dst_mat_aspt_shift) + +/*define for dst_pipe_config field*/ +#define SDMA_PKT_COPY_T2T_DW_12_dst_pipe_config_offset 12 +#define SDMA_PKT_COPY_T2T_DW_12_dst_pipe_config_mask 0x0000001F +#define SDMA_PKT_COPY_T2T_DW_12_dst_pipe_config_shift 26 +#define SDMA_PKT_COPY_T2T_DW_12_DST_PIPE_CONFIG(x) (((x) & SDMA_PKT_COPY_T2T_DW_12_dst_pipe_config_mask) << SDMA_PKT_COPY_T2T_DW_12_dst_pipe_config_shift) + +/*define for DW_13 word*/ +/*define for rect_x field*/ +#define SDMA_PKT_COPY_T2T_DW_13_rect_x_offset 13 +#define SDMA_PKT_COPY_T2T_DW_13_rect_x_mask 0x00003FFF +#define SDMA_PKT_COPY_T2T_DW_13_rect_x_shift 0 +#define SDMA_PKT_COPY_T2T_DW_13_RECT_X(x) (((x) & SDMA_PKT_COPY_T2T_DW_13_rect_x_mask) << SDMA_PKT_COPY_T2T_DW_13_rect_x_shift) + +/*define for rect_y field*/ +#define SDMA_PKT_COPY_T2T_DW_13_rect_y_offset 13 +#define SDMA_PKT_COPY_T2T_DW_13_rect_y_mask 0x00003FFF +#define SDMA_PKT_COPY_T2T_DW_13_rect_y_shift 16 +#define SDMA_PKT_COPY_T2T_DW_13_RECT_Y(x) (((x) & SDMA_PKT_COPY_T2T_DW_13_rect_y_mask) << SDMA_PKT_COPY_T2T_DW_13_rect_y_shift) + +/*define for DW_14 word*/ +/*define for rect_z field*/ +#define SDMA_PKT_COPY_T2T_DW_14_rect_z_offset 14 +#define SDMA_PKT_COPY_T2T_DW_14_rect_z_mask 0x000007FF +#define SDMA_PKT_COPY_T2T_DW_14_rect_z_shift 0 +#define SDMA_PKT_COPY_T2T_DW_14_RECT_Z(x) (((x) & SDMA_PKT_COPY_T2T_DW_14_rect_z_mask) << SDMA_PKT_COPY_T2T_DW_14_rect_z_shift) + +/*define for dst_sw field*/ +#define SDMA_PKT_COPY_T2T_DW_14_dst_sw_offset 14 +#define SDMA_PKT_COPY_T2T_DW_14_dst_sw_mask 0x00000003 +#define SDMA_PKT_COPY_T2T_DW_14_dst_sw_shift 16 +#define SDMA_PKT_COPY_T2T_DW_14_DST_SW(x) (((x) & SDMA_PKT_COPY_T2T_DW_14_dst_sw_mask) << SDMA_PKT_COPY_T2T_DW_14_dst_sw_shift) + +/*define for src_sw field*/ +#define SDMA_PKT_COPY_T2T_DW_14_src_sw_offset 14 +#define SDMA_PKT_COPY_T2T_DW_14_src_sw_mask 0x00000003 +#define SDMA_PKT_COPY_T2T_DW_14_src_sw_shift 24 +#define SDMA_PKT_COPY_T2T_DW_14_SRC_SW(x) (((x) & SDMA_PKT_COPY_T2T_DW_14_src_sw_mask) << SDMA_PKT_COPY_T2T_DW_14_src_sw_shift) + + +/* +** Definitions for SDMA_PKT_COPY_TILED_SUBWIN packet +*/ + +/*define for HEADER word*/ +/*define for op field*/ +#define SDMA_PKT_COPY_TILED_SUBWIN_HEADER_op_offset 0 +#define SDMA_PKT_COPY_TILED_SUBWIN_HEADER_op_mask 0x000000FF +#define SDMA_PKT_COPY_TILED_SUBWIN_HEADER_op_shift 0 +#define SDMA_PKT_COPY_TILED_SUBWIN_HEADER_OP(x) (((x) & SDMA_PKT_COPY_TILED_SUBWIN_HEADER_op_mask) << SDMA_PKT_COPY_TILED_SUBWIN_HEADER_op_shift) + +/*define for sub_op field*/ +#define SDMA_PKT_COPY_TILED_SUBWIN_HEADER_sub_op_offset 0 +#define SDMA_PKT_COPY_TILED_SUBWIN_HEADER_sub_op_mask 0x000000FF +#define SDMA_PKT_COPY_TILED_SUBWIN_HEADER_sub_op_shift 8 +#define SDMA_PKT_COPY_TILED_SUBWIN_HEADER_SUB_OP(x) (((x) & SDMA_PKT_COPY_TILED_SUBWIN_HEADER_sub_op_mask) << SDMA_PKT_COPY_TILED_SUBWIN_HEADER_sub_op_shift) + +/*define for detile field*/ +#define SDMA_PKT_COPY_TILED_SUBWIN_HEADER_detile_offset 0 +#define SDMA_PKT_COPY_TILED_SUBWIN_HEADER_detile_mask 0x00000001 +#define SDMA_PKT_COPY_TILED_SUBWIN_HEADER_detile_shift 31 +#define SDMA_PKT_COPY_TILED_SUBWIN_HEADER_DETILE(x) (((x) & SDMA_PKT_COPY_TILED_SUBWIN_HEADER_detile_mask) << SDMA_PKT_COPY_TILED_SUBWIN_HEADER_detile_shift) + +/*define for TILED_ADDR_LO word*/ +/*define for tiled_addr_31_0 field*/ +#define SDMA_PKT_COPY_TILED_SUBWIN_TILED_ADDR_LO_tiled_addr_31_0_offset 1 +#define SDMA_PKT_COPY_TILED_SUBWIN_TILED_ADDR_LO_tiled_addr_31_0_mask 0xFFFFFFFF +#define SDMA_PKT_COPY_TILED_SUBWIN_TILED_ADDR_LO_tiled_addr_31_0_shift 0 +#define SDMA_PKT_COPY_TILED_SUBWIN_TILED_ADDR_LO_TILED_ADDR_31_0(x) (((x) & SDMA_PKT_COPY_TILED_SUBWIN_TILED_ADDR_LO_tiled_addr_31_0_mask) << SDMA_PKT_COPY_TILED_SUBWIN_TILED_ADDR_LO_tiled_addr_31_0_shift) + +/*define for TILED_ADDR_HI word*/ +/*define for tiled_addr_63_32 field*/ +#define SDMA_PKT_COPY_TILED_SUBWIN_TILED_ADDR_HI_tiled_addr_63_32_offset 2 +#define SDMA_PKT_COPY_TILED_SUBWIN_TILED_ADDR_HI_tiled_addr_63_32_mask 0xFFFFFFFF +#define SDMA_PKT_COPY_TILED_SUBWIN_TILED_ADDR_HI_tiled_addr_63_32_shift 0 +#define SDMA_PKT_COPY_TILED_SUBWIN_TILED_ADDR_HI_TILED_ADDR_63_32(x) (((x) & SDMA_PKT_COPY_TILED_SUBWIN_TILED_ADDR_HI_tiled_addr_63_32_mask) << SDMA_PKT_COPY_TILED_SUBWIN_TILED_ADDR_HI_tiled_addr_63_32_shift) + +/*define for DW_3 word*/ +/*define for tiled_x field*/ +#define SDMA_PKT_COPY_TILED_SUBWIN_DW_3_tiled_x_offset 3 +#define SDMA_PKT_COPY_TILED_SUBWIN_DW_3_tiled_x_mask 0x00003FFF +#define SDMA_PKT_COPY_TILED_SUBWIN_DW_3_tiled_x_shift 0 +#define SDMA_PKT_COPY_TILED_SUBWIN_DW_3_TILED_X(x) (((x) & SDMA_PKT_COPY_TILED_SUBWIN_DW_3_tiled_x_mask) << SDMA_PKT_COPY_TILED_SUBWIN_DW_3_tiled_x_shift) + +/*define for tiled_y field*/ +#define SDMA_PKT_COPY_TILED_SUBWIN_DW_3_tiled_y_offset 3 +#define SDMA_PKT_COPY_TILED_SUBWIN_DW_3_tiled_y_mask 0x00003FFF +#define SDMA_PKT_COPY_TILED_SUBWIN_DW_3_tiled_y_shift 16 +#define SDMA_PKT_COPY_TILED_SUBWIN_DW_3_TILED_Y(x) (((x) & SDMA_PKT_COPY_TILED_SUBWIN_DW_3_tiled_y_mask) << SDMA_PKT_COPY_TILED_SUBWIN_DW_3_tiled_y_shift) + +/*define for DW_4 word*/ +/*define for tiled_z field*/ +#define SDMA_PKT_COPY_TILED_SUBWIN_DW_4_tiled_z_offset 4 +#define SDMA_PKT_COPY_TILED_SUBWIN_DW_4_tiled_z_mask 0x000007FF +#define SDMA_PKT_COPY_TILED_SUBWIN_DW_4_tiled_z_shift 0 +#define SDMA_PKT_COPY_TILED_SUBWIN_DW_4_TILED_Z(x) (((x) & SDMA_PKT_COPY_TILED_SUBWIN_DW_4_tiled_z_mask) << SDMA_PKT_COPY_TILED_SUBWIN_DW_4_tiled_z_shift) + +/*define for pitch_in_tile field*/ +#define SDMA_PKT_COPY_TILED_SUBWIN_DW_4_pitch_in_tile_offset 4 +#define SDMA_PKT_COPY_TILED_SUBWIN_DW_4_pitch_in_tile_mask 0x00000FFF +#define SDMA_PKT_COPY_TILED_SUBWIN_DW_4_pitch_in_tile_shift 16 +#define SDMA_PKT_COPY_TILED_SUBWIN_DW_4_PITCH_IN_TILE(x) (((x) & SDMA_PKT_COPY_TILED_SUBWIN_DW_4_pitch_in_tile_mask) << SDMA_PKT_COPY_TILED_SUBWIN_DW_4_pitch_in_tile_shift) + +/*define for DW_5 word*/ +/*define for slice_pitch field*/ +#define SDMA_PKT_COPY_TILED_SUBWIN_DW_5_slice_pitch_offset 5 +#define SDMA_PKT_COPY_TILED_SUBWIN_DW_5_slice_pitch_mask 0x003FFFFF +#define SDMA_PKT_COPY_TILED_SUBWIN_DW_5_slice_pitch_shift 0 +#define SDMA_PKT_COPY_TILED_SUBWIN_DW_5_SLICE_PITCH(x) (((x) & SDMA_PKT_COPY_TILED_SUBWIN_DW_5_slice_pitch_mask) << SDMA_PKT_COPY_TILED_SUBWIN_DW_5_slice_pitch_shift) + +/*define for DW_6 word*/ +/*define for element_size field*/ +#define SDMA_PKT_COPY_TILED_SUBWIN_DW_6_element_size_offset 6 +#define SDMA_PKT_COPY_TILED_SUBWIN_DW_6_element_size_mask 0x00000007 +#define SDMA_PKT_COPY_TILED_SUBWIN_DW_6_element_size_shift 0 +#define SDMA_PKT_COPY_TILED_SUBWIN_DW_6_ELEMENT_SIZE(x) (((x) & SDMA_PKT_COPY_TILED_SUBWIN_DW_6_element_size_mask) << SDMA_PKT_COPY_TILED_SUBWIN_DW_6_element_size_shift) + +/*define for array_mode field*/ +#define SDMA_PKT_COPY_TILED_SUBWIN_DW_6_array_mode_offset 6 +#define SDMA_PKT_COPY_TILED_SUBWIN_DW_6_array_mode_mask 0x0000000F +#define SDMA_PKT_COPY_TILED_SUBWIN_DW_6_array_mode_shift 3 +#define SDMA_PKT_COPY_TILED_SUBWIN_DW_6_ARRAY_MODE(x) (((x) & SDMA_PKT_COPY_TILED_SUBWIN_DW_6_array_mode_mask) << SDMA_PKT_COPY_TILED_SUBWIN_DW_6_array_mode_shift) + +/*define for mit_mode field*/ +#define SDMA_PKT_COPY_TILED_SUBWIN_DW_6_mit_mode_offset 6 +#define SDMA_PKT_COPY_TILED_SUBWIN_DW_6_mit_mode_mask 0x00000007 +#define SDMA_PKT_COPY_TILED_SUBWIN_DW_6_mit_mode_shift 8 +#define SDMA_PKT_COPY_TILED_SUBWIN_DW_6_MIT_MODE(x) (((x) & SDMA_PKT_COPY_TILED_SUBWIN_DW_6_mit_mode_mask) << SDMA_PKT_COPY_TILED_SUBWIN_DW_6_mit_mode_shift) + +/*define for tilesplit_size field*/ +#define SDMA_PKT_COPY_TILED_SUBWIN_DW_6_tilesplit_size_offset 6 +#define SDMA_PKT_COPY_TILED_SUBWIN_DW_6_tilesplit_size_mask 0x00000007 +#define SDMA_PKT_COPY_TILED_SUBWIN_DW_6_tilesplit_size_shift 11 +#define SDMA_PKT_COPY_TILED_SUBWIN_DW_6_TILESPLIT_SIZE(x) (((x) & SDMA_PKT_COPY_TILED_SUBWIN_DW_6_tilesplit_size_mask) << SDMA_PKT_COPY_TILED_SUBWIN_DW_6_tilesplit_size_shift) + +/*define for bank_w field*/ +#define SDMA_PKT_COPY_TILED_SUBWIN_DW_6_bank_w_offset 6 +#define SDMA_PKT_COPY_TILED_SUBWIN_DW_6_bank_w_mask 0x00000003 +#define SDMA_PKT_COPY_TILED_SUBWIN_DW_6_bank_w_shift 15 +#define SDMA_PKT_COPY_TILED_SUBWIN_DW_6_BANK_W(x) (((x) & SDMA_PKT_COPY_TILED_SUBWIN_DW_6_bank_w_mask) << SDMA_PKT_COPY_TILED_SUBWIN_DW_6_bank_w_shift) + +/*define for bank_h field*/ +#define SDMA_PKT_COPY_TILED_SUBWIN_DW_6_bank_h_offset 6 +#define SDMA_PKT_COPY_TILED_SUBWIN_DW_6_bank_h_mask 0x00000003 +#define SDMA_PKT_COPY_TILED_SUBWIN_DW_6_bank_h_shift 18 +#define SDMA_PKT_COPY_TILED_SUBWIN_DW_6_BANK_H(x) (((x) & SDMA_PKT_COPY_TILED_SUBWIN_DW_6_bank_h_mask) << SDMA_PKT_COPY_TILED_SUBWIN_DW_6_bank_h_shift) + +/*define for num_bank field*/ +#define SDMA_PKT_COPY_TILED_SUBWIN_DW_6_num_bank_offset 6 +#define SDMA_PKT_COPY_TILED_SUBWIN_DW_6_num_bank_mask 0x00000003 +#define SDMA_PKT_COPY_TILED_SUBWIN_DW_6_num_bank_shift 21 +#define SDMA_PKT_COPY_TILED_SUBWIN_DW_6_NUM_BANK(x) (((x) & SDMA_PKT_COPY_TILED_SUBWIN_DW_6_num_bank_mask) << SDMA_PKT_COPY_TILED_SUBWIN_DW_6_num_bank_shift) + +/*define for mat_aspt field*/ +#define SDMA_PKT_COPY_TILED_SUBWIN_DW_6_mat_aspt_offset 6 +#define SDMA_PKT_COPY_TILED_SUBWIN_DW_6_mat_aspt_mask 0x00000003 +#define SDMA_PKT_COPY_TILED_SUBWIN_DW_6_mat_aspt_shift 24 +#define SDMA_PKT_COPY_TILED_SUBWIN_DW_6_MAT_ASPT(x) (((x) & SDMA_PKT_COPY_TILED_SUBWIN_DW_6_mat_aspt_mask) << SDMA_PKT_COPY_TILED_SUBWIN_DW_6_mat_aspt_shift) + +/*define for pipe_config field*/ +#define SDMA_PKT_COPY_TILED_SUBWIN_DW_6_pipe_config_offset 6 +#define SDMA_PKT_COPY_TILED_SUBWIN_DW_6_pipe_config_mask 0x0000001F +#define SDMA_PKT_COPY_TILED_SUBWIN_DW_6_pipe_config_shift 26 +#define SDMA_PKT_COPY_TILED_SUBWIN_DW_6_PIPE_CONFIG(x) (((x) & SDMA_PKT_COPY_TILED_SUBWIN_DW_6_pipe_config_mask) << SDMA_PKT_COPY_TILED_SUBWIN_DW_6_pipe_config_shift) + +/*define for LINEAR_ADDR_LO word*/ +/*define for linear_addr_31_0 field*/ +#define SDMA_PKT_COPY_TILED_SUBWIN_LINEAR_ADDR_LO_linear_addr_31_0_offset 7 +#define SDMA_PKT_COPY_TILED_SUBWIN_LINEAR_ADDR_LO_linear_addr_31_0_mask 0xFFFFFFFF +#define SDMA_PKT_COPY_TILED_SUBWIN_LINEAR_ADDR_LO_linear_addr_31_0_shift 0 +#define SDMA_PKT_COPY_TILED_SUBWIN_LINEAR_ADDR_LO_LINEAR_ADDR_31_0(x) (((x) & SDMA_PKT_COPY_TILED_SUBWIN_LINEAR_ADDR_LO_linear_addr_31_0_mask) << SDMA_PKT_COPY_TILED_SUBWIN_LINEAR_ADDR_LO_linear_addr_31_0_shift) + +/*define for LINEAR_ADDR_HI word*/ +/*define for linear_addr_63_32 field*/ +#define SDMA_PKT_COPY_TILED_SUBWIN_LINEAR_ADDR_HI_linear_addr_63_32_offset 8 +#define SDMA_PKT_COPY_TILED_SUBWIN_LINEAR_ADDR_HI_linear_addr_63_32_mask 0xFFFFFFFF +#define SDMA_PKT_COPY_TILED_SUBWIN_LINEAR_ADDR_HI_linear_addr_63_32_shift 0 +#define SDMA_PKT_COPY_TILED_SUBWIN_LINEAR_ADDR_HI_LINEAR_ADDR_63_32(x) (((x) & SDMA_PKT_COPY_TILED_SUBWIN_LINEAR_ADDR_HI_linear_addr_63_32_mask) << SDMA_PKT_COPY_TILED_SUBWIN_LINEAR_ADDR_HI_linear_addr_63_32_shift) + +/*define for DW_9 word*/ +/*define for linear_x field*/ +#define SDMA_PKT_COPY_TILED_SUBWIN_DW_9_linear_x_offset 9 +#define SDMA_PKT_COPY_TILED_SUBWIN_DW_9_linear_x_mask 0x00003FFF +#define SDMA_PKT_COPY_TILED_SUBWIN_DW_9_linear_x_shift 0 +#define SDMA_PKT_COPY_TILED_SUBWIN_DW_9_LINEAR_X(x) (((x) & SDMA_PKT_COPY_TILED_SUBWIN_DW_9_linear_x_mask) << SDMA_PKT_COPY_TILED_SUBWIN_DW_9_linear_x_shift) + +/*define for linear_y field*/ +#define SDMA_PKT_COPY_TILED_SUBWIN_DW_9_linear_y_offset 9 +#define SDMA_PKT_COPY_TILED_SUBWIN_DW_9_linear_y_mask 0x00003FFF +#define SDMA_PKT_COPY_TILED_SUBWIN_DW_9_linear_y_shift 16 +#define SDMA_PKT_COPY_TILED_SUBWIN_DW_9_LINEAR_Y(x) (((x) & SDMA_PKT_COPY_TILED_SUBWIN_DW_9_linear_y_mask) << SDMA_PKT_COPY_TILED_SUBWIN_DW_9_linear_y_shift) + +/*define for DW_10 word*/ +/*define for linear_z field*/ +#define SDMA_PKT_COPY_TILED_SUBWIN_DW_10_linear_z_offset 10 +#define SDMA_PKT_COPY_TILED_SUBWIN_DW_10_linear_z_mask 0x000007FF +#define SDMA_PKT_COPY_TILED_SUBWIN_DW_10_linear_z_shift 0 +#define SDMA_PKT_COPY_TILED_SUBWIN_DW_10_LINEAR_Z(x) (((x) & SDMA_PKT_COPY_TILED_SUBWIN_DW_10_linear_z_mask) << SDMA_PKT_COPY_TILED_SUBWIN_DW_10_linear_z_shift) + +/*define for linear_pitch field*/ +#define SDMA_PKT_COPY_TILED_SUBWIN_DW_10_linear_pitch_offset 10 +#define SDMA_PKT_COPY_TILED_SUBWIN_DW_10_linear_pitch_mask 0x00003FFF +#define SDMA_PKT_COPY_TILED_SUBWIN_DW_10_linear_pitch_shift 16 +#define SDMA_PKT_COPY_TILED_SUBWIN_DW_10_LINEAR_PITCH(x) (((x) & SDMA_PKT_COPY_TILED_SUBWIN_DW_10_linear_pitch_mask) << SDMA_PKT_COPY_TILED_SUBWIN_DW_10_linear_pitch_shift) + +/*define for DW_11 word*/ +/*define for linear_slice_pitch field*/ +#define SDMA_PKT_COPY_TILED_SUBWIN_DW_11_linear_slice_pitch_offset 11 +#define SDMA_PKT_COPY_TILED_SUBWIN_DW_11_linear_slice_pitch_mask 0x0FFFFFFF +#define SDMA_PKT_COPY_TILED_SUBWIN_DW_11_linear_slice_pitch_shift 0 +#define SDMA_PKT_COPY_TILED_SUBWIN_DW_11_LINEAR_SLICE_PITCH(x) (((x) & SDMA_PKT_COPY_TILED_SUBWIN_DW_11_linear_slice_pitch_mask) << SDMA_PKT_COPY_TILED_SUBWIN_DW_11_linear_slice_pitch_shift) + +/*define for DW_12 word*/ +/*define for rect_x field*/ +#define SDMA_PKT_COPY_TILED_SUBWIN_DW_12_rect_x_offset 12 +#define SDMA_PKT_COPY_TILED_SUBWIN_DW_12_rect_x_mask 0x00003FFF +#define SDMA_PKT_COPY_TILED_SUBWIN_DW_12_rect_x_shift 0 +#define SDMA_PKT_COPY_TILED_SUBWIN_DW_12_RECT_X(x) (((x) & SDMA_PKT_COPY_TILED_SUBWIN_DW_12_rect_x_mask) << SDMA_PKT_COPY_TILED_SUBWIN_DW_12_rect_x_shift) + +/*define for rect_y field*/ +#define SDMA_PKT_COPY_TILED_SUBWIN_DW_12_rect_y_offset 12 +#define SDMA_PKT_COPY_TILED_SUBWIN_DW_12_rect_y_mask 0x00003FFF +#define SDMA_PKT_COPY_TILED_SUBWIN_DW_12_rect_y_shift 16 +#define SDMA_PKT_COPY_TILED_SUBWIN_DW_12_RECT_Y(x) (((x) & SDMA_PKT_COPY_TILED_SUBWIN_DW_12_rect_y_mask) << SDMA_PKT_COPY_TILED_SUBWIN_DW_12_rect_y_shift) + +/*define for DW_13 word*/ +/*define for rect_z field*/ +#define SDMA_PKT_COPY_TILED_SUBWIN_DW_13_rect_z_offset 13 +#define SDMA_PKT_COPY_TILED_SUBWIN_DW_13_rect_z_mask 0x000007FF +#define SDMA_PKT_COPY_TILED_SUBWIN_DW_13_rect_z_shift 0 +#define SDMA_PKT_COPY_TILED_SUBWIN_DW_13_RECT_Z(x) (((x) & SDMA_PKT_COPY_TILED_SUBWIN_DW_13_rect_z_mask) << SDMA_PKT_COPY_TILED_SUBWIN_DW_13_rect_z_shift) + +/*define for linear_sw field*/ +#define SDMA_PKT_COPY_TILED_SUBWIN_DW_13_linear_sw_offset 13 +#define SDMA_PKT_COPY_TILED_SUBWIN_DW_13_linear_sw_mask 0x00000003 +#define SDMA_PKT_COPY_TILED_SUBWIN_DW_13_linear_sw_shift 16 +#define SDMA_PKT_COPY_TILED_SUBWIN_DW_13_LINEAR_SW(x) (((x) & SDMA_PKT_COPY_TILED_SUBWIN_DW_13_linear_sw_mask) << SDMA_PKT_COPY_TILED_SUBWIN_DW_13_linear_sw_shift) + +/*define for tile_sw field*/ +#define SDMA_PKT_COPY_TILED_SUBWIN_DW_13_tile_sw_offset 13 +#define SDMA_PKT_COPY_TILED_SUBWIN_DW_13_tile_sw_mask 0x00000003 +#define SDMA_PKT_COPY_TILED_SUBWIN_DW_13_tile_sw_shift 24 +#define SDMA_PKT_COPY_TILED_SUBWIN_DW_13_TILE_SW(x) (((x) & SDMA_PKT_COPY_TILED_SUBWIN_DW_13_tile_sw_mask) << SDMA_PKT_COPY_TILED_SUBWIN_DW_13_tile_sw_shift) + + +/* +** Definitions for SDMA_PKT_COPY_STRUCT packet +*/ + +/*define for HEADER word*/ +/*define for op field*/ +#define SDMA_PKT_COPY_STRUCT_HEADER_op_offset 0 +#define SDMA_PKT_COPY_STRUCT_HEADER_op_mask 0x000000FF +#define SDMA_PKT_COPY_STRUCT_HEADER_op_shift 0 +#define SDMA_PKT_COPY_STRUCT_HEADER_OP(x) (((x) & SDMA_PKT_COPY_STRUCT_HEADER_op_mask) << SDMA_PKT_COPY_STRUCT_HEADER_op_shift) + +/*define for sub_op field*/ +#define SDMA_PKT_COPY_STRUCT_HEADER_sub_op_offset 0 +#define SDMA_PKT_COPY_STRUCT_HEADER_sub_op_mask 0x000000FF +#define SDMA_PKT_COPY_STRUCT_HEADER_sub_op_shift 8 +#define SDMA_PKT_COPY_STRUCT_HEADER_SUB_OP(x) (((x) & SDMA_PKT_COPY_STRUCT_HEADER_sub_op_mask) << SDMA_PKT_COPY_STRUCT_HEADER_sub_op_shift) + +/*define for detile field*/ +#define SDMA_PKT_COPY_STRUCT_HEADER_detile_offset 0 +#define SDMA_PKT_COPY_STRUCT_HEADER_detile_mask 0x00000001 +#define SDMA_PKT_COPY_STRUCT_HEADER_detile_shift 31 +#define SDMA_PKT_COPY_STRUCT_HEADER_DETILE(x) (((x) & SDMA_PKT_COPY_STRUCT_HEADER_detile_mask) << SDMA_PKT_COPY_STRUCT_HEADER_detile_shift) + +/*define for SB_ADDR_LO word*/ +/*define for sb_addr_31_0 field*/ +#define SDMA_PKT_COPY_STRUCT_SB_ADDR_LO_sb_addr_31_0_offset 1 +#define SDMA_PKT_COPY_STRUCT_SB_ADDR_LO_sb_addr_31_0_mask 0xFFFFFFFF +#define SDMA_PKT_COPY_STRUCT_SB_ADDR_LO_sb_addr_31_0_shift 0 +#define SDMA_PKT_COPY_STRUCT_SB_ADDR_LO_SB_ADDR_31_0(x) (((x) & SDMA_PKT_COPY_STRUCT_SB_ADDR_LO_sb_addr_31_0_mask) << SDMA_PKT_COPY_STRUCT_SB_ADDR_LO_sb_addr_31_0_shift) + +/*define for SB_ADDR_HI word*/ +/*define for sb_addr_63_32 field*/ +#define SDMA_PKT_COPY_STRUCT_SB_ADDR_HI_sb_addr_63_32_offset 2 +#define SDMA_PKT_COPY_STRUCT_SB_ADDR_HI_sb_addr_63_32_mask 0xFFFFFFFF +#define SDMA_PKT_COPY_STRUCT_SB_ADDR_HI_sb_addr_63_32_shift 0 +#define SDMA_PKT_COPY_STRUCT_SB_ADDR_HI_SB_ADDR_63_32(x) (((x) & SDMA_PKT_COPY_STRUCT_SB_ADDR_HI_sb_addr_63_32_mask) << SDMA_PKT_COPY_STRUCT_SB_ADDR_HI_sb_addr_63_32_shift) + +/*define for START_INDEX word*/ +/*define for start_index field*/ +#define SDMA_PKT_COPY_STRUCT_START_INDEX_start_index_offset 3 +#define SDMA_PKT_COPY_STRUCT_START_INDEX_start_index_mask 0xFFFFFFFF +#define SDMA_PKT_COPY_STRUCT_START_INDEX_start_index_shift 0 +#define SDMA_PKT_COPY_STRUCT_START_INDEX_START_INDEX(x) (((x) & SDMA_PKT_COPY_STRUCT_START_INDEX_start_index_mask) << SDMA_PKT_COPY_STRUCT_START_INDEX_start_index_shift) + +/*define for COUNT word*/ +/*define for count field*/ +#define SDMA_PKT_COPY_STRUCT_COUNT_count_offset 4 +#define SDMA_PKT_COPY_STRUCT_COUNT_count_mask 0xFFFFFFFF +#define SDMA_PKT_COPY_STRUCT_COUNT_count_shift 0 +#define SDMA_PKT_COPY_STRUCT_COUNT_COUNT(x) (((x) & SDMA_PKT_COPY_STRUCT_COUNT_count_mask) << SDMA_PKT_COPY_STRUCT_COUNT_count_shift) + +/*define for DW_5 word*/ +/*define for stride field*/ +#define SDMA_PKT_COPY_STRUCT_DW_5_stride_offset 5 +#define SDMA_PKT_COPY_STRUCT_DW_5_stride_mask 0x000007FF +#define SDMA_PKT_COPY_STRUCT_DW_5_stride_shift 0 +#define SDMA_PKT_COPY_STRUCT_DW_5_STRIDE(x) (((x) & SDMA_PKT_COPY_STRUCT_DW_5_stride_mask) << SDMA_PKT_COPY_STRUCT_DW_5_stride_shift) + +/*define for struct_sw field*/ +#define SDMA_PKT_COPY_STRUCT_DW_5_struct_sw_offset 5 +#define SDMA_PKT_COPY_STRUCT_DW_5_struct_sw_mask 0x00000003 +#define SDMA_PKT_COPY_STRUCT_DW_5_struct_sw_shift 16 +#define SDMA_PKT_COPY_STRUCT_DW_5_STRUCT_SW(x) (((x) & SDMA_PKT_COPY_STRUCT_DW_5_struct_sw_mask) << SDMA_PKT_COPY_STRUCT_DW_5_struct_sw_shift) + +/*define for struct_ha field*/ +#define SDMA_PKT_COPY_STRUCT_DW_5_struct_ha_offset 5 +#define SDMA_PKT_COPY_STRUCT_DW_5_struct_ha_mask 0x00000001 +#define SDMA_PKT_COPY_STRUCT_DW_5_struct_ha_shift 22 +#define SDMA_PKT_COPY_STRUCT_DW_5_STRUCT_HA(x) (((x) & SDMA_PKT_COPY_STRUCT_DW_5_struct_ha_mask) << SDMA_PKT_COPY_STRUCT_DW_5_struct_ha_shift) + +/*define for linear_sw field*/ +#define SDMA_PKT_COPY_STRUCT_DW_5_linear_sw_offset 5 +#define SDMA_PKT_COPY_STRUCT_DW_5_linear_sw_mask 0x00000003 +#define SDMA_PKT_COPY_STRUCT_DW_5_linear_sw_shift 24 +#define SDMA_PKT_COPY_STRUCT_DW_5_LINEAR_SW(x) (((x) & SDMA_PKT_COPY_STRUCT_DW_5_linear_sw_mask) << SDMA_PKT_COPY_STRUCT_DW_5_linear_sw_shift) + +/*define for linear_ha field*/ +#define SDMA_PKT_COPY_STRUCT_DW_5_linear_ha_offset 5 +#define SDMA_PKT_COPY_STRUCT_DW_5_linear_ha_mask 0x00000001 +#define SDMA_PKT_COPY_STRUCT_DW_5_linear_ha_shift 30 +#define SDMA_PKT_COPY_STRUCT_DW_5_LINEAR_HA(x) (((x) & SDMA_PKT_COPY_STRUCT_DW_5_linear_ha_mask) << SDMA_PKT_COPY_STRUCT_DW_5_linear_ha_shift) + +/*define for LINEAR_ADDR_LO word*/ +/*define for linear_addr_31_0 field*/ +#define SDMA_PKT_COPY_STRUCT_LINEAR_ADDR_LO_linear_addr_31_0_offset 6 +#define SDMA_PKT_COPY_STRUCT_LINEAR_ADDR_LO_linear_addr_31_0_mask 0xFFFFFFFF +#define SDMA_PKT_COPY_STRUCT_LINEAR_ADDR_LO_linear_addr_31_0_shift 0 +#define SDMA_PKT_COPY_STRUCT_LINEAR_ADDR_LO_LINEAR_ADDR_31_0(x) (((x) & SDMA_PKT_COPY_STRUCT_LINEAR_ADDR_LO_linear_addr_31_0_mask) << SDMA_PKT_COPY_STRUCT_LINEAR_ADDR_LO_linear_addr_31_0_shift) + +/*define for LINEAR_ADDR_HI word*/ +/*define for linear_addr_63_32 field*/ +#define SDMA_PKT_COPY_STRUCT_LINEAR_ADDR_HI_linear_addr_63_32_offset 7 +#define SDMA_PKT_COPY_STRUCT_LINEAR_ADDR_HI_linear_addr_63_32_mask 0xFFFFFFFF +#define SDMA_PKT_COPY_STRUCT_LINEAR_ADDR_HI_linear_addr_63_32_shift 0 +#define SDMA_PKT_COPY_STRUCT_LINEAR_ADDR_HI_LINEAR_ADDR_63_32(x) (((x) & SDMA_PKT_COPY_STRUCT_LINEAR_ADDR_HI_linear_addr_63_32_mask) << SDMA_PKT_COPY_STRUCT_LINEAR_ADDR_HI_linear_addr_63_32_shift) + + +/* +** Definitions for SDMA_PKT_WRITE_UNTILED packet +*/ + +/*define for HEADER word*/ +/*define for op field*/ +#define SDMA_PKT_WRITE_UNTILED_HEADER_op_offset 0 +#define SDMA_PKT_WRITE_UNTILED_HEADER_op_mask 0x000000FF +#define SDMA_PKT_WRITE_UNTILED_HEADER_op_shift 0 +#define SDMA_PKT_WRITE_UNTILED_HEADER_OP(x) (((x) & SDMA_PKT_WRITE_UNTILED_HEADER_op_mask) << SDMA_PKT_WRITE_UNTILED_HEADER_op_shift) + +/*define for sub_op field*/ +#define SDMA_PKT_WRITE_UNTILED_HEADER_sub_op_offset 0 +#define SDMA_PKT_WRITE_UNTILED_HEADER_sub_op_mask 0x000000FF +#define SDMA_PKT_WRITE_UNTILED_HEADER_sub_op_shift 8 +#define SDMA_PKT_WRITE_UNTILED_HEADER_SUB_OP(x) (((x) & SDMA_PKT_WRITE_UNTILED_HEADER_sub_op_mask) << SDMA_PKT_WRITE_UNTILED_HEADER_sub_op_shift) + +/*define for DST_ADDR_LO word*/ +/*define for dst_addr_31_0 field*/ +#define SDMA_PKT_WRITE_UNTILED_DST_ADDR_LO_dst_addr_31_0_offset 1 +#define SDMA_PKT_WRITE_UNTILED_DST_ADDR_LO_dst_addr_31_0_mask 0xFFFFFFFF +#define SDMA_PKT_WRITE_UNTILED_DST_ADDR_LO_dst_addr_31_0_shift 0 +#define SDMA_PKT_WRITE_UNTILED_DST_ADDR_LO_DST_ADDR_31_0(x) (((x) & SDMA_PKT_WRITE_UNTILED_DST_ADDR_LO_dst_addr_31_0_mask) << SDMA_PKT_WRITE_UNTILED_DST_ADDR_LO_dst_addr_31_0_shift) + +/*define for DST_ADDR_HI word*/ +/*define for dst_addr_63_32 field*/ +#define SDMA_PKT_WRITE_UNTILED_DST_ADDR_HI_dst_addr_63_32_offset 2 +#define SDMA_PKT_WRITE_UNTILED_DST_ADDR_HI_dst_addr_63_32_mask 0xFFFFFFFF +#define SDMA_PKT_WRITE_UNTILED_DST_ADDR_HI_dst_addr_63_32_shift 0 +#define SDMA_PKT_WRITE_UNTILED_DST_ADDR_HI_DST_ADDR_63_32(x) (((x) & SDMA_PKT_WRITE_UNTILED_DST_ADDR_HI_dst_addr_63_32_mask) << SDMA_PKT_WRITE_UNTILED_DST_ADDR_HI_dst_addr_63_32_shift) + +/*define for DW_3 word*/ +/*define for count field*/ +#define SDMA_PKT_WRITE_UNTILED_DW_3_count_offset 3 +#define SDMA_PKT_WRITE_UNTILED_DW_3_count_mask 0x003FFFFF +#define SDMA_PKT_WRITE_UNTILED_DW_3_count_shift 0 +#define SDMA_PKT_WRITE_UNTILED_DW_3_COUNT(x) (((x) & SDMA_PKT_WRITE_UNTILED_DW_3_count_mask) << SDMA_PKT_WRITE_UNTILED_DW_3_count_shift) + +/*define for sw field*/ +#define SDMA_PKT_WRITE_UNTILED_DW_3_sw_offset 3 +#define SDMA_PKT_WRITE_UNTILED_DW_3_sw_mask 0x00000003 +#define SDMA_PKT_WRITE_UNTILED_DW_3_sw_shift 24 +#define SDMA_PKT_WRITE_UNTILED_DW_3_SW(x) (((x) & SDMA_PKT_WRITE_UNTILED_DW_3_sw_mask) << SDMA_PKT_WRITE_UNTILED_DW_3_sw_shift) + +/*define for DATA0 word*/ +/*define for data0 field*/ +#define SDMA_PKT_WRITE_UNTILED_DATA0_data0_offset 4 +#define SDMA_PKT_WRITE_UNTILED_DATA0_data0_mask 0xFFFFFFFF +#define SDMA_PKT_WRITE_UNTILED_DATA0_data0_shift 0 +#define SDMA_PKT_WRITE_UNTILED_DATA0_DATA0(x) (((x) & SDMA_PKT_WRITE_UNTILED_DATA0_data0_mask) << SDMA_PKT_WRITE_UNTILED_DATA0_data0_shift) + + +/* +** Definitions for SDMA_PKT_WRITE_TILED packet +*/ + +/*define for HEADER word*/ +/*define for op field*/ +#define SDMA_PKT_WRITE_TILED_HEADER_op_offset 0 +#define SDMA_PKT_WRITE_TILED_HEADER_op_mask 0x000000FF +#define SDMA_PKT_WRITE_TILED_HEADER_op_shift 0 +#define SDMA_PKT_WRITE_TILED_HEADER_OP(x) (((x) & SDMA_PKT_WRITE_TILED_HEADER_op_mask) << SDMA_PKT_WRITE_TILED_HEADER_op_shift) + +/*define for sub_op field*/ +#define SDMA_PKT_WRITE_TILED_HEADER_sub_op_offset 0 +#define SDMA_PKT_WRITE_TILED_HEADER_sub_op_mask 0x000000FF +#define SDMA_PKT_WRITE_TILED_HEADER_sub_op_shift 8 +#define SDMA_PKT_WRITE_TILED_HEADER_SUB_OP(x) (((x) & SDMA_PKT_WRITE_TILED_HEADER_sub_op_mask) << SDMA_PKT_WRITE_TILED_HEADER_sub_op_shift) + +/*define for DST_ADDR_LO word*/ +/*define for dst_addr_31_0 field*/ +#define SDMA_PKT_WRITE_TILED_DST_ADDR_LO_dst_addr_31_0_offset 1 +#define SDMA_PKT_WRITE_TILED_DST_ADDR_LO_dst_addr_31_0_mask 0xFFFFFFFF +#define SDMA_PKT_WRITE_TILED_DST_ADDR_LO_dst_addr_31_0_shift 0 +#define SDMA_PKT_WRITE_TILED_DST_ADDR_LO_DST_ADDR_31_0(x) (((x) & SDMA_PKT_WRITE_TILED_DST_ADDR_LO_dst_addr_31_0_mask) << SDMA_PKT_WRITE_TILED_DST_ADDR_LO_dst_addr_31_0_shift) + +/*define for DST_ADDR_HI word*/ +/*define for dst_addr_63_32 field*/ +#define SDMA_PKT_WRITE_TILED_DST_ADDR_HI_dst_addr_63_32_offset 2 +#define SDMA_PKT_WRITE_TILED_DST_ADDR_HI_dst_addr_63_32_mask 0xFFFFFFFF +#define SDMA_PKT_WRITE_TILED_DST_ADDR_HI_dst_addr_63_32_shift 0 +#define SDMA_PKT_WRITE_TILED_DST_ADDR_HI_DST_ADDR_63_32(x) (((x) & SDMA_PKT_WRITE_TILED_DST_ADDR_HI_dst_addr_63_32_mask) << SDMA_PKT_WRITE_TILED_DST_ADDR_HI_dst_addr_63_32_shift) + +/*define for DW_3 word*/ +/*define for pitch_in_tile field*/ +#define SDMA_PKT_WRITE_TILED_DW_3_pitch_in_tile_offset 3 +#define SDMA_PKT_WRITE_TILED_DW_3_pitch_in_tile_mask 0x000007FF +#define SDMA_PKT_WRITE_TILED_DW_3_pitch_in_tile_shift 0 +#define SDMA_PKT_WRITE_TILED_DW_3_PITCH_IN_TILE(x) (((x) & SDMA_PKT_WRITE_TILED_DW_3_pitch_in_tile_mask) << SDMA_PKT_WRITE_TILED_DW_3_pitch_in_tile_shift) + +/*define for height field*/ +#define SDMA_PKT_WRITE_TILED_DW_3_height_offset 3 +#define SDMA_PKT_WRITE_TILED_DW_3_height_mask 0x00003FFF +#define SDMA_PKT_WRITE_TILED_DW_3_height_shift 16 +#define SDMA_PKT_WRITE_TILED_DW_3_HEIGHT(x) (((x) & SDMA_PKT_WRITE_TILED_DW_3_height_mask) << SDMA_PKT_WRITE_TILED_DW_3_height_shift) + +/*define for DW_4 word*/ +/*define for slice_pitch field*/ +#define SDMA_PKT_WRITE_TILED_DW_4_slice_pitch_offset 4 +#define SDMA_PKT_WRITE_TILED_DW_4_slice_pitch_mask 0x003FFFFF +#define SDMA_PKT_WRITE_TILED_DW_4_slice_pitch_shift 0 +#define SDMA_PKT_WRITE_TILED_DW_4_SLICE_PITCH(x) (((x) & SDMA_PKT_WRITE_TILED_DW_4_slice_pitch_mask) << SDMA_PKT_WRITE_TILED_DW_4_slice_pitch_shift) + +/*define for DW_5 word*/ +/*define for element_size field*/ +#define SDMA_PKT_WRITE_TILED_DW_5_element_size_offset 5 +#define SDMA_PKT_WRITE_TILED_DW_5_element_size_mask 0x00000007 +#define SDMA_PKT_WRITE_TILED_DW_5_element_size_shift 0 +#define SDMA_PKT_WRITE_TILED_DW_5_ELEMENT_SIZE(x) (((x) & SDMA_PKT_WRITE_TILED_DW_5_element_size_mask) << SDMA_PKT_WRITE_TILED_DW_5_element_size_shift) + +/*define for array_mode field*/ +#define SDMA_PKT_WRITE_TILED_DW_5_array_mode_offset 5 +#define SDMA_PKT_WRITE_TILED_DW_5_array_mode_mask 0x0000000F +#define SDMA_PKT_WRITE_TILED_DW_5_array_mode_shift 3 +#define SDMA_PKT_WRITE_TILED_DW_5_ARRAY_MODE(x) (((x) & SDMA_PKT_WRITE_TILED_DW_5_array_mode_mask) << SDMA_PKT_WRITE_TILED_DW_5_array_mode_shift) + +/*define for mit_mode field*/ +#define SDMA_PKT_WRITE_TILED_DW_5_mit_mode_offset 5 +#define SDMA_PKT_WRITE_TILED_DW_5_mit_mode_mask 0x00000007 +#define SDMA_PKT_WRITE_TILED_DW_5_mit_mode_shift 8 +#define SDMA_PKT_WRITE_TILED_DW_5_MIT_MODE(x) (((x) & SDMA_PKT_WRITE_TILED_DW_5_mit_mode_mask) << SDMA_PKT_WRITE_TILED_DW_5_mit_mode_shift) + +/*define for tilesplit_size field*/ +#define SDMA_PKT_WRITE_TILED_DW_5_tilesplit_size_offset 5 +#define SDMA_PKT_WRITE_TILED_DW_5_tilesplit_size_mask 0x00000007 +#define SDMA_PKT_WRITE_TILED_DW_5_tilesplit_size_shift 11 +#define SDMA_PKT_WRITE_TILED_DW_5_TILESPLIT_SIZE(x) (((x) & SDMA_PKT_WRITE_TILED_DW_5_tilesplit_size_mask) << SDMA_PKT_WRITE_TILED_DW_5_tilesplit_size_shift) + +/*define for bank_w field*/ +#define SDMA_PKT_WRITE_TILED_DW_5_bank_w_offset 5 +#define SDMA_PKT_WRITE_TILED_DW_5_bank_w_mask 0x00000003 +#define SDMA_PKT_WRITE_TILED_DW_5_bank_w_shift 15 +#define SDMA_PKT_WRITE_TILED_DW_5_BANK_W(x) (((x) & SDMA_PKT_WRITE_TILED_DW_5_bank_w_mask) << SDMA_PKT_WRITE_TILED_DW_5_bank_w_shift) + +/*define for bank_h field*/ +#define SDMA_PKT_WRITE_TILED_DW_5_bank_h_offset 5 +#define SDMA_PKT_WRITE_TILED_DW_5_bank_h_mask 0x00000003 +#define SDMA_PKT_WRITE_TILED_DW_5_bank_h_shift 18 +#define SDMA_PKT_WRITE_TILED_DW_5_BANK_H(x) (((x) & SDMA_PKT_WRITE_TILED_DW_5_bank_h_mask) << SDMA_PKT_WRITE_TILED_DW_5_bank_h_shift) + +/*define for num_bank field*/ +#define SDMA_PKT_WRITE_TILED_DW_5_num_bank_offset 5 +#define SDMA_PKT_WRITE_TILED_DW_5_num_bank_mask 0x00000003 +#define SDMA_PKT_WRITE_TILED_DW_5_num_bank_shift 21 +#define SDMA_PKT_WRITE_TILED_DW_5_NUM_BANK(x) (((x) & SDMA_PKT_WRITE_TILED_DW_5_num_bank_mask) << SDMA_PKT_WRITE_TILED_DW_5_num_bank_shift) + +/*define for mat_aspt field*/ +#define SDMA_PKT_WRITE_TILED_DW_5_mat_aspt_offset 5 +#define SDMA_PKT_WRITE_TILED_DW_5_mat_aspt_mask 0x00000003 +#define SDMA_PKT_WRITE_TILED_DW_5_mat_aspt_shift 24 +#define SDMA_PKT_WRITE_TILED_DW_5_MAT_ASPT(x) (((x) & SDMA_PKT_WRITE_TILED_DW_5_mat_aspt_mask) << SDMA_PKT_WRITE_TILED_DW_5_mat_aspt_shift) + +/*define for pipe_config field*/ +#define SDMA_PKT_WRITE_TILED_DW_5_pipe_config_offset 5 +#define SDMA_PKT_WRITE_TILED_DW_5_pipe_config_mask 0x0000001F +#define SDMA_PKT_WRITE_TILED_DW_5_pipe_config_shift 26 +#define SDMA_PKT_WRITE_TILED_DW_5_PIPE_CONFIG(x) (((x) & SDMA_PKT_WRITE_TILED_DW_5_pipe_config_mask) << SDMA_PKT_WRITE_TILED_DW_5_pipe_config_shift) + +/*define for DW_6 word*/ +/*define for x field*/ +#define SDMA_PKT_WRITE_TILED_DW_6_x_offset 6 +#define SDMA_PKT_WRITE_TILED_DW_6_x_mask 0x00003FFF +#define SDMA_PKT_WRITE_TILED_DW_6_x_shift 0 +#define SDMA_PKT_WRITE_TILED_DW_6_X(x) (((x) & SDMA_PKT_WRITE_TILED_DW_6_x_mask) << SDMA_PKT_WRITE_TILED_DW_6_x_shift) + +/*define for y field*/ +#define SDMA_PKT_WRITE_TILED_DW_6_y_offset 6 +#define SDMA_PKT_WRITE_TILED_DW_6_y_mask 0x00003FFF +#define SDMA_PKT_WRITE_TILED_DW_6_y_shift 16 +#define SDMA_PKT_WRITE_TILED_DW_6_Y(x) (((x) & SDMA_PKT_WRITE_TILED_DW_6_y_mask) << SDMA_PKT_WRITE_TILED_DW_6_y_shift) + +/*define for DW_7 word*/ +/*define for z field*/ +#define SDMA_PKT_WRITE_TILED_DW_7_z_offset 7 +#define SDMA_PKT_WRITE_TILED_DW_7_z_mask 0x00000FFF +#define SDMA_PKT_WRITE_TILED_DW_7_z_shift 0 +#define SDMA_PKT_WRITE_TILED_DW_7_Z(x) (((x) & SDMA_PKT_WRITE_TILED_DW_7_z_mask) << SDMA_PKT_WRITE_TILED_DW_7_z_shift) + +/*define for sw field*/ +#define SDMA_PKT_WRITE_TILED_DW_7_sw_offset 7 +#define SDMA_PKT_WRITE_TILED_DW_7_sw_mask 0x00000003 +#define SDMA_PKT_WRITE_TILED_DW_7_sw_shift 24 +#define SDMA_PKT_WRITE_TILED_DW_7_SW(x) (((x) & SDMA_PKT_WRITE_TILED_DW_7_sw_mask) << SDMA_PKT_WRITE_TILED_DW_7_sw_shift) + +/*define for COUNT word*/ +/*define for count field*/ +#define SDMA_PKT_WRITE_TILED_COUNT_count_offset 8 +#define SDMA_PKT_WRITE_TILED_COUNT_count_mask 0x003FFFFF +#define SDMA_PKT_WRITE_TILED_COUNT_count_shift 0 +#define SDMA_PKT_WRITE_TILED_COUNT_COUNT(x) (((x) & SDMA_PKT_WRITE_TILED_COUNT_count_mask) << SDMA_PKT_WRITE_TILED_COUNT_count_shift) + +/*define for DATA0 word*/ +/*define for data0 field*/ +#define SDMA_PKT_WRITE_TILED_DATA0_data0_offset 9 +#define SDMA_PKT_WRITE_TILED_DATA0_data0_mask 0xFFFFFFFF +#define SDMA_PKT_WRITE_TILED_DATA0_data0_shift 0 +#define SDMA_PKT_WRITE_TILED_DATA0_DATA0(x) (((x) & SDMA_PKT_WRITE_TILED_DATA0_data0_mask) << SDMA_PKT_WRITE_TILED_DATA0_data0_shift) + + +/* +** Definitions for SDMA_PKT_WRITE_INCR packet +*/ + +/*define for HEADER word*/ +/*define for op field*/ +#define SDMA_PKT_WRITE_INCR_HEADER_op_offset 0 +#define SDMA_PKT_WRITE_INCR_HEADER_op_mask 0x000000FF +#define SDMA_PKT_WRITE_INCR_HEADER_op_shift 0 +#define SDMA_PKT_WRITE_INCR_HEADER_OP(x) (((x) & SDMA_PKT_WRITE_INCR_HEADER_op_mask) << SDMA_PKT_WRITE_INCR_HEADER_op_shift) + +/*define for sub_op field*/ +#define SDMA_PKT_WRITE_INCR_HEADER_sub_op_offset 0 +#define SDMA_PKT_WRITE_INCR_HEADER_sub_op_mask 0x000000FF +#define SDMA_PKT_WRITE_INCR_HEADER_sub_op_shift 8 +#define SDMA_PKT_WRITE_INCR_HEADER_SUB_OP(x) (((x) & SDMA_PKT_WRITE_INCR_HEADER_sub_op_mask) << SDMA_PKT_WRITE_INCR_HEADER_sub_op_shift) + +/*define for DST_ADDR_LO word*/ +/*define for dst_addr_31_0 field*/ +#define SDMA_PKT_WRITE_INCR_DST_ADDR_LO_dst_addr_31_0_offset 1 +#define SDMA_PKT_WRITE_INCR_DST_ADDR_LO_dst_addr_31_0_mask 0xFFFFFFFF +#define SDMA_PKT_WRITE_INCR_DST_ADDR_LO_dst_addr_31_0_shift 0 +#define SDMA_PKT_WRITE_INCR_DST_ADDR_LO_DST_ADDR_31_0(x) (((x) & SDMA_PKT_WRITE_INCR_DST_ADDR_LO_dst_addr_31_0_mask) << SDMA_PKT_WRITE_INCR_DST_ADDR_LO_dst_addr_31_0_shift) + +/*define for DST_ADDR_HI word*/ +/*define for dst_addr_63_32 field*/ +#define SDMA_PKT_WRITE_INCR_DST_ADDR_HI_dst_addr_63_32_offset 2 +#define SDMA_PKT_WRITE_INCR_DST_ADDR_HI_dst_addr_63_32_mask 0xFFFFFFFF +#define SDMA_PKT_WRITE_INCR_DST_ADDR_HI_dst_addr_63_32_shift 0 +#define SDMA_PKT_WRITE_INCR_DST_ADDR_HI_DST_ADDR_63_32(x) (((x) & SDMA_PKT_WRITE_INCR_DST_ADDR_HI_dst_addr_63_32_mask) << SDMA_PKT_WRITE_INCR_DST_ADDR_HI_dst_addr_63_32_shift) + +/*define for MASK_DW0 word*/ +/*define for mask_dw0 field*/ +#define SDMA_PKT_WRITE_INCR_MASK_DW0_mask_dw0_offset 3 +#define SDMA_PKT_WRITE_INCR_MASK_DW0_mask_dw0_mask 0xFFFFFFFF +#define SDMA_PKT_WRITE_INCR_MASK_DW0_mask_dw0_shift 0 +#define SDMA_PKT_WRITE_INCR_MASK_DW0_MASK_DW0(x) (((x) & SDMA_PKT_WRITE_INCR_MASK_DW0_mask_dw0_mask) << SDMA_PKT_WRITE_INCR_MASK_DW0_mask_dw0_shift) + +/*define for MASK_DW1 word*/ +/*define for mask_dw1 field*/ +#define SDMA_PKT_WRITE_INCR_MASK_DW1_mask_dw1_offset 4 +#define SDMA_PKT_WRITE_INCR_MASK_DW1_mask_dw1_mask 0xFFFFFFFF +#define SDMA_PKT_WRITE_INCR_MASK_DW1_mask_dw1_shift 0 +#define SDMA_PKT_WRITE_INCR_MASK_DW1_MASK_DW1(x) (((x) & SDMA_PKT_WRITE_INCR_MASK_DW1_mask_dw1_mask) << SDMA_PKT_WRITE_INCR_MASK_DW1_mask_dw1_shift) + +/*define for INIT_DW0 word*/ +/*define for init_dw0 field*/ +#define SDMA_PKT_WRITE_INCR_INIT_DW0_init_dw0_offset 5 +#define SDMA_PKT_WRITE_INCR_INIT_DW0_init_dw0_mask 0xFFFFFFFF +#define SDMA_PKT_WRITE_INCR_INIT_DW0_init_dw0_shift 0 +#define SDMA_PKT_WRITE_INCR_INIT_DW0_INIT_DW0(x) (((x) & SDMA_PKT_WRITE_INCR_INIT_DW0_init_dw0_mask) << SDMA_PKT_WRITE_INCR_INIT_DW0_init_dw0_shift) + +/*define for INIT_DW1 word*/ +/*define for init_dw1 field*/ +#define SDMA_PKT_WRITE_INCR_INIT_DW1_init_dw1_offset 6 +#define SDMA_PKT_WRITE_INCR_INIT_DW1_init_dw1_mask 0xFFFFFFFF +#define SDMA_PKT_WRITE_INCR_INIT_DW1_init_dw1_shift 0 +#define SDMA_PKT_WRITE_INCR_INIT_DW1_INIT_DW1(x) (((x) & SDMA_PKT_WRITE_INCR_INIT_DW1_init_dw1_mask) << SDMA_PKT_WRITE_INCR_INIT_DW1_init_dw1_shift) + +/*define for INCR_DW0 word*/ +/*define for incr_dw0 field*/ +#define SDMA_PKT_WRITE_INCR_INCR_DW0_incr_dw0_offset 7 +#define SDMA_PKT_WRITE_INCR_INCR_DW0_incr_dw0_mask 0xFFFFFFFF +#define SDMA_PKT_WRITE_INCR_INCR_DW0_incr_dw0_shift 0 +#define SDMA_PKT_WRITE_INCR_INCR_DW0_INCR_DW0(x) (((x) & SDMA_PKT_WRITE_INCR_INCR_DW0_incr_dw0_mask) << SDMA_PKT_WRITE_INCR_INCR_DW0_incr_dw0_shift) + +/*define for INCR_DW1 word*/ +/*define for incr_dw1 field*/ +#define SDMA_PKT_WRITE_INCR_INCR_DW1_incr_dw1_offset 8 +#define SDMA_PKT_WRITE_INCR_INCR_DW1_incr_dw1_mask 0xFFFFFFFF +#define SDMA_PKT_WRITE_INCR_INCR_DW1_incr_dw1_shift 0 +#define SDMA_PKT_WRITE_INCR_INCR_DW1_INCR_DW1(x) (((x) & SDMA_PKT_WRITE_INCR_INCR_DW1_incr_dw1_mask) << SDMA_PKT_WRITE_INCR_INCR_DW1_incr_dw1_shift) + +/*define for COUNT word*/ +/*define for count field*/ +#define SDMA_PKT_WRITE_INCR_COUNT_count_offset 9 +#define SDMA_PKT_WRITE_INCR_COUNT_count_mask 0x0007FFFF +#define SDMA_PKT_WRITE_INCR_COUNT_count_shift 0 +#define SDMA_PKT_WRITE_INCR_COUNT_COUNT(x) (((x) & SDMA_PKT_WRITE_INCR_COUNT_count_mask) << SDMA_PKT_WRITE_INCR_COUNT_count_shift) + + +/* +** Definitions for SDMA_PKT_INDIRECT packet +*/ + +/*define for HEADER word*/ +/*define for op field*/ +#define SDMA_PKT_INDIRECT_HEADER_op_offset 0 +#define SDMA_PKT_INDIRECT_HEADER_op_mask 0x000000FF +#define SDMA_PKT_INDIRECT_HEADER_op_shift 0 +#define SDMA_PKT_INDIRECT_HEADER_OP(x) (((x) & SDMA_PKT_INDIRECT_HEADER_op_mask) << SDMA_PKT_INDIRECT_HEADER_op_shift) + +/*define for sub_op field*/ +#define SDMA_PKT_INDIRECT_HEADER_sub_op_offset 0 +#define SDMA_PKT_INDIRECT_HEADER_sub_op_mask 0x000000FF +#define SDMA_PKT_INDIRECT_HEADER_sub_op_shift 8 +#define SDMA_PKT_INDIRECT_HEADER_SUB_OP(x) (((x) & SDMA_PKT_INDIRECT_HEADER_sub_op_mask) << SDMA_PKT_INDIRECT_HEADER_sub_op_shift) + +/*define for vmid field*/ +#define SDMA_PKT_INDIRECT_HEADER_vmid_offset 0 +#define SDMA_PKT_INDIRECT_HEADER_vmid_mask 0x0000000F +#define SDMA_PKT_INDIRECT_HEADER_vmid_shift 16 +#define SDMA_PKT_INDIRECT_HEADER_VMID(x) (((x) & SDMA_PKT_INDIRECT_HEADER_vmid_mask) << SDMA_PKT_INDIRECT_HEADER_vmid_shift) + +/*define for BASE_LO word*/ +/*define for ib_base_31_0 field*/ +#define SDMA_PKT_INDIRECT_BASE_LO_ib_base_31_0_offset 1 +#define SDMA_PKT_INDIRECT_BASE_LO_ib_base_31_0_mask 0xFFFFFFFF +#define SDMA_PKT_INDIRECT_BASE_LO_ib_base_31_0_shift 0 +#define SDMA_PKT_INDIRECT_BASE_LO_IB_BASE_31_0(x) (((x) & SDMA_PKT_INDIRECT_BASE_LO_ib_base_31_0_mask) << SDMA_PKT_INDIRECT_BASE_LO_ib_base_31_0_shift) + +/*define for BASE_HI word*/ +/*define for ib_base_63_32 field*/ +#define SDMA_PKT_INDIRECT_BASE_HI_ib_base_63_32_offset 2 +#define SDMA_PKT_INDIRECT_BASE_HI_ib_base_63_32_mask 0xFFFFFFFF +#define SDMA_PKT_INDIRECT_BASE_HI_ib_base_63_32_shift 0 +#define SDMA_PKT_INDIRECT_BASE_HI_IB_BASE_63_32(x) (((x) & SDMA_PKT_INDIRECT_BASE_HI_ib_base_63_32_mask) << SDMA_PKT_INDIRECT_BASE_HI_ib_base_63_32_shift) + +/*define for IB_SIZE word*/ +/*define for ib_size field*/ +#define SDMA_PKT_INDIRECT_IB_SIZE_ib_size_offset 3 +#define SDMA_PKT_INDIRECT_IB_SIZE_ib_size_mask 0x000FFFFF +#define SDMA_PKT_INDIRECT_IB_SIZE_ib_size_shift 0 +#define SDMA_PKT_INDIRECT_IB_SIZE_IB_SIZE(x) (((x) & SDMA_PKT_INDIRECT_IB_SIZE_ib_size_mask) << SDMA_PKT_INDIRECT_IB_SIZE_ib_size_shift) + +/*define for CSA_ADDR_LO word*/ +/*define for csa_addr_31_0 field*/ +#define SDMA_PKT_INDIRECT_CSA_ADDR_LO_csa_addr_31_0_offset 4 +#define SDMA_PKT_INDIRECT_CSA_ADDR_LO_csa_addr_31_0_mask 0xFFFFFFFF +#define SDMA_PKT_INDIRECT_CSA_ADDR_LO_csa_addr_31_0_shift 0 +#define SDMA_PKT_INDIRECT_CSA_ADDR_LO_CSA_ADDR_31_0(x) (((x) & SDMA_PKT_INDIRECT_CSA_ADDR_LO_csa_addr_31_0_mask) << SDMA_PKT_INDIRECT_CSA_ADDR_LO_csa_addr_31_0_shift) + +/*define for CSA_ADDR_HI word*/ +/*define for csa_addr_63_32 field*/ +#define SDMA_PKT_INDIRECT_CSA_ADDR_HI_csa_addr_63_32_offset 5 +#define SDMA_PKT_INDIRECT_CSA_ADDR_HI_csa_addr_63_32_mask 0xFFFFFFFF +#define SDMA_PKT_INDIRECT_CSA_ADDR_HI_csa_addr_63_32_shift 0 +#define SDMA_PKT_INDIRECT_CSA_ADDR_HI_CSA_ADDR_63_32(x) (((x) & SDMA_PKT_INDIRECT_CSA_ADDR_HI_csa_addr_63_32_mask) << SDMA_PKT_INDIRECT_CSA_ADDR_HI_csa_addr_63_32_shift) + + +/* +** Definitions for SDMA_PKT_SEMAPHORE packet +*/ + +/*define for HEADER word*/ +/*define for op field*/ +#define SDMA_PKT_SEMAPHORE_HEADER_op_offset 0 +#define SDMA_PKT_SEMAPHORE_HEADER_op_mask 0x000000FF +#define SDMA_PKT_SEMAPHORE_HEADER_op_shift 0 +#define SDMA_PKT_SEMAPHORE_HEADER_OP(x) (((x) & SDMA_PKT_SEMAPHORE_HEADER_op_mask) << SDMA_PKT_SEMAPHORE_HEADER_op_shift) + +/*define for sub_op field*/ +#define SDMA_PKT_SEMAPHORE_HEADER_sub_op_offset 0 +#define SDMA_PKT_SEMAPHORE_HEADER_sub_op_mask 0x000000FF +#define SDMA_PKT_SEMAPHORE_HEADER_sub_op_shift 8 +#define SDMA_PKT_SEMAPHORE_HEADER_SUB_OP(x) (((x) & SDMA_PKT_SEMAPHORE_HEADER_sub_op_mask) << SDMA_PKT_SEMAPHORE_HEADER_sub_op_shift) + +/*define for write_one field*/ +#define SDMA_PKT_SEMAPHORE_HEADER_write_one_offset 0 +#define SDMA_PKT_SEMAPHORE_HEADER_write_one_mask 0x00000001 +#define SDMA_PKT_SEMAPHORE_HEADER_write_one_shift 29 +#define SDMA_PKT_SEMAPHORE_HEADER_WRITE_ONE(x) (((x) & SDMA_PKT_SEMAPHORE_HEADER_write_one_mask) << SDMA_PKT_SEMAPHORE_HEADER_write_one_shift) + +/*define for signal field*/ +#define SDMA_PKT_SEMAPHORE_HEADER_signal_offset 0 +#define SDMA_PKT_SEMAPHORE_HEADER_signal_mask 0x00000001 +#define SDMA_PKT_SEMAPHORE_HEADER_signal_shift 30 +#define SDMA_PKT_SEMAPHORE_HEADER_SIGNAL(x) (((x) & SDMA_PKT_SEMAPHORE_HEADER_signal_mask) << SDMA_PKT_SEMAPHORE_HEADER_signal_shift) + +/*define for mailbox field*/ +#define SDMA_PKT_SEMAPHORE_HEADER_mailbox_offset 0 +#define SDMA_PKT_SEMAPHORE_HEADER_mailbox_mask 0x00000001 +#define SDMA_PKT_SEMAPHORE_HEADER_mailbox_shift 31 +#define SDMA_PKT_SEMAPHORE_HEADER_MAILBOX(x) (((x) & SDMA_PKT_SEMAPHORE_HEADER_mailbox_mask) << SDMA_PKT_SEMAPHORE_HEADER_mailbox_shift) + +/*define for ADDR_LO word*/ +/*define for addr_31_0 field*/ +#define SDMA_PKT_SEMAPHORE_ADDR_LO_addr_31_0_offset 1 +#define SDMA_PKT_SEMAPHORE_ADDR_LO_addr_31_0_mask 0xFFFFFFFF +#define SDMA_PKT_SEMAPHORE_ADDR_LO_addr_31_0_shift 0 +#define SDMA_PKT_SEMAPHORE_ADDR_LO_ADDR_31_0(x) (((x) & SDMA_PKT_SEMAPHORE_ADDR_LO_addr_31_0_mask) << SDMA_PKT_SEMAPHORE_ADDR_LO_addr_31_0_shift) + +/*define for ADDR_HI word*/ +/*define for addr_63_32 field*/ +#define SDMA_PKT_SEMAPHORE_ADDR_HI_addr_63_32_offset 2 +#define SDMA_PKT_SEMAPHORE_ADDR_HI_addr_63_32_mask 0xFFFFFFFF +#define SDMA_PKT_SEMAPHORE_ADDR_HI_addr_63_32_shift 0 +#define SDMA_PKT_SEMAPHORE_ADDR_HI_ADDR_63_32(x) (((x) & SDMA_PKT_SEMAPHORE_ADDR_HI_addr_63_32_mask) << SDMA_PKT_SEMAPHORE_ADDR_HI_addr_63_32_shift) + + +/* +** Definitions for SDMA_PKT_FENCE packet +*/ + +/*define for HEADER word*/ +/*define for op field*/ +#define SDMA_PKT_FENCE_HEADER_op_offset 0 +#define SDMA_PKT_FENCE_HEADER_op_mask 0x000000FF +#define SDMA_PKT_FENCE_HEADER_op_shift 0 +#define SDMA_PKT_FENCE_HEADER_OP(x) (((x) & SDMA_PKT_FENCE_HEADER_op_mask) << SDMA_PKT_FENCE_HEADER_op_shift) + +/*define for sub_op field*/ +#define SDMA_PKT_FENCE_HEADER_sub_op_offset 0 +#define SDMA_PKT_FENCE_HEADER_sub_op_mask 0x000000FF +#define SDMA_PKT_FENCE_HEADER_sub_op_shift 8 +#define SDMA_PKT_FENCE_HEADER_SUB_OP(x) (((x) & SDMA_PKT_FENCE_HEADER_sub_op_mask) << SDMA_PKT_FENCE_HEADER_sub_op_shift) + +/*define for ADDR_LO word*/ +/*define for addr_31_0 field*/ +#define SDMA_PKT_FENCE_ADDR_LO_addr_31_0_offset 1 +#define SDMA_PKT_FENCE_ADDR_LO_addr_31_0_mask 0xFFFFFFFF +#define SDMA_PKT_FENCE_ADDR_LO_addr_31_0_shift 0 +#define SDMA_PKT_FENCE_ADDR_LO_ADDR_31_0(x) (((x) & SDMA_PKT_FENCE_ADDR_LO_addr_31_0_mask) << SDMA_PKT_FENCE_ADDR_LO_addr_31_0_shift) + +/*define for ADDR_HI word*/ +/*define for addr_63_32 field*/ +#define SDMA_PKT_FENCE_ADDR_HI_addr_63_32_offset 2 +#define SDMA_PKT_FENCE_ADDR_HI_addr_63_32_mask 0xFFFFFFFF +#define SDMA_PKT_FENCE_ADDR_HI_addr_63_32_shift 0 +#define SDMA_PKT_FENCE_ADDR_HI_ADDR_63_32(x) (((x) & SDMA_PKT_FENCE_ADDR_HI_addr_63_32_mask) << SDMA_PKT_FENCE_ADDR_HI_addr_63_32_shift) + +/*define for DATA word*/ +/*define for data field*/ +#define SDMA_PKT_FENCE_DATA_data_offset 3 +#define SDMA_PKT_FENCE_DATA_data_mask 0xFFFFFFFF +#define SDMA_PKT_FENCE_DATA_data_shift 0 +#define SDMA_PKT_FENCE_DATA_DATA(x) (((x) & SDMA_PKT_FENCE_DATA_data_mask) << SDMA_PKT_FENCE_DATA_data_shift) + + +/* +** Definitions for SDMA_PKT_SRBM_WRITE packet +*/ + +/*define for HEADER word*/ +/*define for op field*/ +#define SDMA_PKT_SRBM_WRITE_HEADER_op_offset 0 +#define SDMA_PKT_SRBM_WRITE_HEADER_op_mask 0x000000FF +#define SDMA_PKT_SRBM_WRITE_HEADER_op_shift 0 +#define SDMA_PKT_SRBM_WRITE_HEADER_OP(x) (((x) & SDMA_PKT_SRBM_WRITE_HEADER_op_mask) << SDMA_PKT_SRBM_WRITE_HEADER_op_shift) + +/*define for sub_op field*/ +#define SDMA_PKT_SRBM_WRITE_HEADER_sub_op_offset 0 +#define SDMA_PKT_SRBM_WRITE_HEADER_sub_op_mask 0x000000FF +#define SDMA_PKT_SRBM_WRITE_HEADER_sub_op_shift 8 +#define SDMA_PKT_SRBM_WRITE_HEADER_SUB_OP(x) (((x) & SDMA_PKT_SRBM_WRITE_HEADER_sub_op_mask) << SDMA_PKT_SRBM_WRITE_HEADER_sub_op_shift) + +/*define for byte_en field*/ +#define SDMA_PKT_SRBM_WRITE_HEADER_byte_en_offset 0 +#define SDMA_PKT_SRBM_WRITE_HEADER_byte_en_mask 0x0000000F +#define SDMA_PKT_SRBM_WRITE_HEADER_byte_en_shift 28 +#define SDMA_PKT_SRBM_WRITE_HEADER_BYTE_EN(x) (((x) & SDMA_PKT_SRBM_WRITE_HEADER_byte_en_mask) << SDMA_PKT_SRBM_WRITE_HEADER_byte_en_shift) + +/*define for ADDR word*/ +/*define for addr field*/ +#define SDMA_PKT_SRBM_WRITE_ADDR_addr_offset 1 +#define SDMA_PKT_SRBM_WRITE_ADDR_addr_mask 0x0000FFFF +#define SDMA_PKT_SRBM_WRITE_ADDR_addr_shift 0 +#define SDMA_PKT_SRBM_WRITE_ADDR_ADDR(x) (((x) & SDMA_PKT_SRBM_WRITE_ADDR_addr_mask) << SDMA_PKT_SRBM_WRITE_ADDR_addr_shift) + +/*define for DATA word*/ +/*define for data field*/ +#define SDMA_PKT_SRBM_WRITE_DATA_data_offset 2 +#define SDMA_PKT_SRBM_WRITE_DATA_data_mask 0xFFFFFFFF +#define SDMA_PKT_SRBM_WRITE_DATA_data_shift 0 +#define SDMA_PKT_SRBM_WRITE_DATA_DATA(x) (((x) & SDMA_PKT_SRBM_WRITE_DATA_data_mask) << SDMA_PKT_SRBM_WRITE_DATA_data_shift) + + +/* +** Definitions for SDMA_PKT_PRE_EXE packet +*/ + +/*define for HEADER word*/ +/*define for op field*/ +#define SDMA_PKT_PRE_EXE_HEADER_op_offset 0 +#define SDMA_PKT_PRE_EXE_HEADER_op_mask 0x000000FF +#define SDMA_PKT_PRE_EXE_HEADER_op_shift 0 +#define SDMA_PKT_PRE_EXE_HEADER_OP(x) (((x) & SDMA_PKT_PRE_EXE_HEADER_op_mask) << SDMA_PKT_PRE_EXE_HEADER_op_shift) + +/*define for sub_op field*/ +#define SDMA_PKT_PRE_EXE_HEADER_sub_op_offset 0 +#define SDMA_PKT_PRE_EXE_HEADER_sub_op_mask 0x000000FF +#define SDMA_PKT_PRE_EXE_HEADER_sub_op_shift 8 +#define SDMA_PKT_PRE_EXE_HEADER_SUB_OP(x) (((x) & SDMA_PKT_PRE_EXE_HEADER_sub_op_mask) << SDMA_PKT_PRE_EXE_HEADER_sub_op_shift) + +/*define for dev_sel field*/ +#define SDMA_PKT_PRE_EXE_HEADER_dev_sel_offset 0 +#define SDMA_PKT_PRE_EXE_HEADER_dev_sel_mask 0x000000FF +#define SDMA_PKT_PRE_EXE_HEADER_dev_sel_shift 16 +#define SDMA_PKT_PRE_EXE_HEADER_DEV_SEL(x) (((x) & SDMA_PKT_PRE_EXE_HEADER_dev_sel_mask) << SDMA_PKT_PRE_EXE_HEADER_dev_sel_shift) + +/*define for EXEC_COUNT word*/ +/*define for exec_count field*/ +#define SDMA_PKT_PRE_EXE_EXEC_COUNT_exec_count_offset 1 +#define SDMA_PKT_PRE_EXE_EXEC_COUNT_exec_count_mask 0x00003FFF +#define SDMA_PKT_PRE_EXE_EXEC_COUNT_exec_count_shift 0 +#define SDMA_PKT_PRE_EXE_EXEC_COUNT_EXEC_COUNT(x) (((x) & SDMA_PKT_PRE_EXE_EXEC_COUNT_exec_count_mask) << SDMA_PKT_PRE_EXE_EXEC_COUNT_exec_count_shift) + + +/* +** Definitions for SDMA_PKT_COND_EXE packet +*/ + +/*define for HEADER word*/ +/*define for op field*/ +#define SDMA_PKT_COND_EXE_HEADER_op_offset 0 +#define SDMA_PKT_COND_EXE_HEADER_op_mask 0x000000FF +#define SDMA_PKT_COND_EXE_HEADER_op_shift 0 +#define SDMA_PKT_COND_EXE_HEADER_OP(x) (((x) & SDMA_PKT_COND_EXE_HEADER_op_mask) << SDMA_PKT_COND_EXE_HEADER_op_shift) + +/*define for sub_op field*/ +#define SDMA_PKT_COND_EXE_HEADER_sub_op_offset 0 +#define SDMA_PKT_COND_EXE_HEADER_sub_op_mask 0x000000FF +#define SDMA_PKT_COND_EXE_HEADER_sub_op_shift 8 +#define SDMA_PKT_COND_EXE_HEADER_SUB_OP(x) (((x) & SDMA_PKT_COND_EXE_HEADER_sub_op_mask) << SDMA_PKT_COND_EXE_HEADER_sub_op_shift) + +/*define for ADDR_LO word*/ +/*define for addr_31_0 field*/ +#define SDMA_PKT_COND_EXE_ADDR_LO_addr_31_0_offset 1 +#define SDMA_PKT_COND_EXE_ADDR_LO_addr_31_0_mask 0xFFFFFFFF +#define SDMA_PKT_COND_EXE_ADDR_LO_addr_31_0_shift 0 +#define SDMA_PKT_COND_EXE_ADDR_LO_ADDR_31_0(x) (((x) & SDMA_PKT_COND_EXE_ADDR_LO_addr_31_0_mask) << SDMA_PKT_COND_EXE_ADDR_LO_addr_31_0_shift) + +/*define for ADDR_HI word*/ +/*define for addr_63_32 field*/ +#define SDMA_PKT_COND_EXE_ADDR_HI_addr_63_32_offset 2 +#define SDMA_PKT_COND_EXE_ADDR_HI_addr_63_32_mask 0xFFFFFFFF +#define SDMA_PKT_COND_EXE_ADDR_HI_addr_63_32_shift 0 +#define SDMA_PKT_COND_EXE_ADDR_HI_ADDR_63_32(x) (((x) & SDMA_PKT_COND_EXE_ADDR_HI_addr_63_32_mask) << SDMA_PKT_COND_EXE_ADDR_HI_addr_63_32_shift) + +/*define for REFERENCE word*/ +/*define for reference field*/ +#define SDMA_PKT_COND_EXE_REFERENCE_reference_offset 3 +#define SDMA_PKT_COND_EXE_REFERENCE_reference_mask 0xFFFFFFFF +#define SDMA_PKT_COND_EXE_REFERENCE_reference_shift 0 +#define SDMA_PKT_COND_EXE_REFERENCE_REFERENCE(x) (((x) & SDMA_PKT_COND_EXE_REFERENCE_reference_mask) << SDMA_PKT_COND_EXE_REFERENCE_reference_shift) + +/*define for EXEC_COUNT word*/ +/*define for exec_count field*/ +#define SDMA_PKT_COND_EXE_EXEC_COUNT_exec_count_offset 4 +#define SDMA_PKT_COND_EXE_EXEC_COUNT_exec_count_mask 0x00003FFF +#define SDMA_PKT_COND_EXE_EXEC_COUNT_exec_count_shift 0 +#define SDMA_PKT_COND_EXE_EXEC_COUNT_EXEC_COUNT(x) (((x) & SDMA_PKT_COND_EXE_EXEC_COUNT_exec_count_mask) << SDMA_PKT_COND_EXE_EXEC_COUNT_exec_count_shift) + + +/* +** Definitions for SDMA_PKT_CONSTANT_FILL packet +*/ + +/*define for HEADER word*/ +/*define for op field*/ +#define SDMA_PKT_CONSTANT_FILL_HEADER_op_offset 0 +#define SDMA_PKT_CONSTANT_FILL_HEADER_op_mask 0x000000FF +#define SDMA_PKT_CONSTANT_FILL_HEADER_op_shift 0 +#define SDMA_PKT_CONSTANT_FILL_HEADER_OP(x) (((x) & SDMA_PKT_CONSTANT_FILL_HEADER_op_mask) << SDMA_PKT_CONSTANT_FILL_HEADER_op_shift) + +/*define for sub_op field*/ +#define SDMA_PKT_CONSTANT_FILL_HEADER_sub_op_offset 0 +#define SDMA_PKT_CONSTANT_FILL_HEADER_sub_op_mask 0x000000FF +#define SDMA_PKT_CONSTANT_FILL_HEADER_sub_op_shift 8 +#define SDMA_PKT_CONSTANT_FILL_HEADER_SUB_OP(x) (((x) & SDMA_PKT_CONSTANT_FILL_HEADER_sub_op_mask) << SDMA_PKT_CONSTANT_FILL_HEADER_sub_op_shift) + +/*define for sw field*/ +#define SDMA_PKT_CONSTANT_FILL_HEADER_sw_offset 0 +#define SDMA_PKT_CONSTANT_FILL_HEADER_sw_mask 0x00000003 +#define SDMA_PKT_CONSTANT_FILL_HEADER_sw_shift 16 +#define SDMA_PKT_CONSTANT_FILL_HEADER_SW(x) (((x) & SDMA_PKT_CONSTANT_FILL_HEADER_sw_mask) << SDMA_PKT_CONSTANT_FILL_HEADER_sw_shift) + +/*define for fillsize field*/ +#define SDMA_PKT_CONSTANT_FILL_HEADER_fillsize_offset 0 +#define SDMA_PKT_CONSTANT_FILL_HEADER_fillsize_mask 0x00000003 +#define SDMA_PKT_CONSTANT_FILL_HEADER_fillsize_shift 30 +#define SDMA_PKT_CONSTANT_FILL_HEADER_FILLSIZE(x) (((x) & SDMA_PKT_CONSTANT_FILL_HEADER_fillsize_mask) << SDMA_PKT_CONSTANT_FILL_HEADER_fillsize_shift) + +/*define for DST_ADDR_LO word*/ +/*define for dst_addr_31_0 field*/ +#define SDMA_PKT_CONSTANT_FILL_DST_ADDR_LO_dst_addr_31_0_offset 1 +#define SDMA_PKT_CONSTANT_FILL_DST_ADDR_LO_dst_addr_31_0_mask 0xFFFFFFFF +#define SDMA_PKT_CONSTANT_FILL_DST_ADDR_LO_dst_addr_31_0_shift 0 +#define SDMA_PKT_CONSTANT_FILL_DST_ADDR_LO_DST_ADDR_31_0(x) (((x) & SDMA_PKT_CONSTANT_FILL_DST_ADDR_LO_dst_addr_31_0_mask) << SDMA_PKT_CONSTANT_FILL_DST_ADDR_LO_dst_addr_31_0_shift) + +/*define for DST_ADDR_HI word*/ +/*define for dst_addr_63_32 field*/ +#define SDMA_PKT_CONSTANT_FILL_DST_ADDR_HI_dst_addr_63_32_offset 2 +#define SDMA_PKT_CONSTANT_FILL_DST_ADDR_HI_dst_addr_63_32_mask 0xFFFFFFFF +#define SDMA_PKT_CONSTANT_FILL_DST_ADDR_HI_dst_addr_63_32_shift 0 +#define SDMA_PKT_CONSTANT_FILL_DST_ADDR_HI_DST_ADDR_63_32(x) (((x) & SDMA_PKT_CONSTANT_FILL_DST_ADDR_HI_dst_addr_63_32_mask) << SDMA_PKT_CONSTANT_FILL_DST_ADDR_HI_dst_addr_63_32_shift) + +/*define for DATA word*/ +/*define for src_data_31_0 field*/ +#define SDMA_PKT_CONSTANT_FILL_DATA_src_data_31_0_offset 3 +#define SDMA_PKT_CONSTANT_FILL_DATA_src_data_31_0_mask 0xFFFFFFFF +#define SDMA_PKT_CONSTANT_FILL_DATA_src_data_31_0_shift 0 +#define SDMA_PKT_CONSTANT_FILL_DATA_SRC_DATA_31_0(x) (((x) & SDMA_PKT_CONSTANT_FILL_DATA_src_data_31_0_mask) << SDMA_PKT_CONSTANT_FILL_DATA_src_data_31_0_shift) + +/*define for COUNT word*/ +/*define for count field*/ +#define SDMA_PKT_CONSTANT_FILL_COUNT_count_offset 4 +#define SDMA_PKT_CONSTANT_FILL_COUNT_count_mask 0x003FFFFF +#define SDMA_PKT_CONSTANT_FILL_COUNT_count_shift 0 +#define SDMA_PKT_CONSTANT_FILL_COUNT_COUNT(x) (((x) & SDMA_PKT_CONSTANT_FILL_COUNT_count_mask) << SDMA_PKT_CONSTANT_FILL_COUNT_count_shift) + + +/* +** Definitions for SDMA_PKT_POLL_REGMEM packet +*/ + +/*define for HEADER word*/ +/*define for op field*/ +#define SDMA_PKT_POLL_REGMEM_HEADER_op_offset 0 +#define SDMA_PKT_POLL_REGMEM_HEADER_op_mask 0x000000FF +#define SDMA_PKT_POLL_REGMEM_HEADER_op_shift 0 +#define SDMA_PKT_POLL_REGMEM_HEADER_OP(x) (((x) & SDMA_PKT_POLL_REGMEM_HEADER_op_mask) << SDMA_PKT_POLL_REGMEM_HEADER_op_shift) + +/*define for sub_op field*/ +#define SDMA_PKT_POLL_REGMEM_HEADER_sub_op_offset 0 +#define SDMA_PKT_POLL_REGMEM_HEADER_sub_op_mask 0x000000FF +#define SDMA_PKT_POLL_REGMEM_HEADER_sub_op_shift 8 +#define SDMA_PKT_POLL_REGMEM_HEADER_SUB_OP(x) (((x) & SDMA_PKT_POLL_REGMEM_HEADER_sub_op_mask) << SDMA_PKT_POLL_REGMEM_HEADER_sub_op_shift) + +/*define for hdp_flush field*/ +#define SDMA_PKT_POLL_REGMEM_HEADER_hdp_flush_offset 0 +#define SDMA_PKT_POLL_REGMEM_HEADER_hdp_flush_mask 0x00000001 +#define SDMA_PKT_POLL_REGMEM_HEADER_hdp_flush_shift 26 +#define SDMA_PKT_POLL_REGMEM_HEADER_HDP_FLUSH(x) (((x) & SDMA_PKT_POLL_REGMEM_HEADER_hdp_flush_mask) << SDMA_PKT_POLL_REGMEM_HEADER_hdp_flush_shift) + +/*define for func field*/ +#define SDMA_PKT_POLL_REGMEM_HEADER_func_offset 0 +#define SDMA_PKT_POLL_REGMEM_HEADER_func_mask 0x00000007 +#define SDMA_PKT_POLL_REGMEM_HEADER_func_shift 28 +#define SDMA_PKT_POLL_REGMEM_HEADER_FUNC(x) (((x) & SDMA_PKT_POLL_REGMEM_HEADER_func_mask) << SDMA_PKT_POLL_REGMEM_HEADER_func_shift) + +/*define for mem_poll field*/ +#define SDMA_PKT_POLL_REGMEM_HEADER_mem_poll_offset 0 +#define SDMA_PKT_POLL_REGMEM_HEADER_mem_poll_mask 0x00000001 +#define SDMA_PKT_POLL_REGMEM_HEADER_mem_poll_shift 31 +#define SDMA_PKT_POLL_REGMEM_HEADER_MEM_POLL(x) (((x) & SDMA_PKT_POLL_REGMEM_HEADER_mem_poll_mask) << SDMA_PKT_POLL_REGMEM_HEADER_mem_poll_shift) + +/*define for ADDR_LO word*/ +/*define for addr_31_0 field*/ +#define SDMA_PKT_POLL_REGMEM_ADDR_LO_addr_31_0_offset 1 +#define SDMA_PKT_POLL_REGMEM_ADDR_LO_addr_31_0_mask 0xFFFFFFFF +#define SDMA_PKT_POLL_REGMEM_ADDR_LO_addr_31_0_shift 0 +#define SDMA_PKT_POLL_REGMEM_ADDR_LO_ADDR_31_0(x) (((x) & SDMA_PKT_POLL_REGMEM_ADDR_LO_addr_31_0_mask) << SDMA_PKT_POLL_REGMEM_ADDR_LO_addr_31_0_shift) + +/*define for ADDR_HI word*/ +/*define for addr_63_32 field*/ +#define SDMA_PKT_POLL_REGMEM_ADDR_HI_addr_63_32_offset 2 +#define SDMA_PKT_POLL_REGMEM_ADDR_HI_addr_63_32_mask 0xFFFFFFFF +#define SDMA_PKT_POLL_REGMEM_ADDR_HI_addr_63_32_shift 0 +#define SDMA_PKT_POLL_REGMEM_ADDR_HI_ADDR_63_32(x) (((x) & SDMA_PKT_POLL_REGMEM_ADDR_HI_addr_63_32_mask) << SDMA_PKT_POLL_REGMEM_ADDR_HI_addr_63_32_shift) + +/*define for VALUE word*/ +/*define for value field*/ +#define SDMA_PKT_POLL_REGMEM_VALUE_value_offset 3 +#define SDMA_PKT_POLL_REGMEM_VALUE_value_mask 0xFFFFFFFF +#define SDMA_PKT_POLL_REGMEM_VALUE_value_shift 0 +#define SDMA_PKT_POLL_REGMEM_VALUE_VALUE(x) (((x) & SDMA_PKT_POLL_REGMEM_VALUE_value_mask) << SDMA_PKT_POLL_REGMEM_VALUE_value_shift) + +/*define for MASK word*/ +/*define for mask field*/ +#define SDMA_PKT_POLL_REGMEM_MASK_mask_offset 4 +#define SDMA_PKT_POLL_REGMEM_MASK_mask_mask 0xFFFFFFFF +#define SDMA_PKT_POLL_REGMEM_MASK_mask_shift 0 +#define SDMA_PKT_POLL_REGMEM_MASK_MASK(x) (((x) & SDMA_PKT_POLL_REGMEM_MASK_mask_mask) << SDMA_PKT_POLL_REGMEM_MASK_mask_shift) + +/*define for DW5 word*/ +/*define for interval field*/ +#define SDMA_PKT_POLL_REGMEM_DW5_interval_offset 5 +#define SDMA_PKT_POLL_REGMEM_DW5_interval_mask 0x0000FFFF +#define SDMA_PKT_POLL_REGMEM_DW5_interval_shift 0 +#define SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(x) (((x) & SDMA_PKT_POLL_REGMEM_DW5_interval_mask) << SDMA_PKT_POLL_REGMEM_DW5_interval_shift) + +/*define for retry_count field*/ +#define SDMA_PKT_POLL_REGMEM_DW5_retry_count_offset 5 +#define SDMA_PKT_POLL_REGMEM_DW5_retry_count_mask 0x00000FFF +#define SDMA_PKT_POLL_REGMEM_DW5_retry_count_shift 16 +#define SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(x) (((x) & SDMA_PKT_POLL_REGMEM_DW5_retry_count_mask) << SDMA_PKT_POLL_REGMEM_DW5_retry_count_shift) + + +/* +** Definitions for SDMA_PKT_TIMESTAMP_SET packet +*/ + +/*define for HEADER word*/ +/*define for op field*/ +#define SDMA_PKT_TIMESTAMP_SET_HEADER_op_offset 0 +#define SDMA_PKT_TIMESTAMP_SET_HEADER_op_mask 0x000000FF +#define SDMA_PKT_TIMESTAMP_SET_HEADER_op_shift 0 +#define SDMA_PKT_TIMESTAMP_SET_HEADER_OP(x) (((x) & SDMA_PKT_TIMESTAMP_SET_HEADER_op_mask) << SDMA_PKT_TIMESTAMP_SET_HEADER_op_shift) + +/*define for sub_op field*/ +#define SDMA_PKT_TIMESTAMP_SET_HEADER_sub_op_offset 0 +#define SDMA_PKT_TIMESTAMP_SET_HEADER_sub_op_mask 0x000000FF +#define SDMA_PKT_TIMESTAMP_SET_HEADER_sub_op_shift 8 +#define SDMA_PKT_TIMESTAMP_SET_HEADER_SUB_OP(x) (((x) & SDMA_PKT_TIMESTAMP_SET_HEADER_sub_op_mask) << SDMA_PKT_TIMESTAMP_SET_HEADER_sub_op_shift) + +/*define for INIT_DATA_LO word*/ +/*define for init_data_31_0 field*/ +#define SDMA_PKT_TIMESTAMP_SET_INIT_DATA_LO_init_data_31_0_offset 1 +#define SDMA_PKT_TIMESTAMP_SET_INIT_DATA_LO_init_data_31_0_mask 0xFFFFFFFF +#define SDMA_PKT_TIMESTAMP_SET_INIT_DATA_LO_init_data_31_0_shift 0 +#define SDMA_PKT_TIMESTAMP_SET_INIT_DATA_LO_INIT_DATA_31_0(x) (((x) & SDMA_PKT_TIMESTAMP_SET_INIT_DATA_LO_init_data_31_0_mask) << SDMA_PKT_TIMESTAMP_SET_INIT_DATA_LO_init_data_31_0_shift) + +/*define for INIT_DATA_HI word*/ +/*define for init_data_63_32 field*/ +#define SDMA_PKT_TIMESTAMP_SET_INIT_DATA_HI_init_data_63_32_offset 2 +#define SDMA_PKT_TIMESTAMP_SET_INIT_DATA_HI_init_data_63_32_mask 0xFFFFFFFF +#define SDMA_PKT_TIMESTAMP_SET_INIT_DATA_HI_init_data_63_32_shift 0 +#define SDMA_PKT_TIMESTAMP_SET_INIT_DATA_HI_INIT_DATA_63_32(x) (((x) & SDMA_PKT_TIMESTAMP_SET_INIT_DATA_HI_init_data_63_32_mask) << SDMA_PKT_TIMESTAMP_SET_INIT_DATA_HI_init_data_63_32_shift) + + +/* +** Definitions for SDMA_PKT_TIMESTAMP_GET packet +*/ + +/*define for HEADER word*/ +/*define for op field*/ +#define SDMA_PKT_TIMESTAMP_GET_HEADER_op_offset 0 +#define SDMA_PKT_TIMESTAMP_GET_HEADER_op_mask 0x000000FF +#define SDMA_PKT_TIMESTAMP_GET_HEADER_op_shift 0 +#define SDMA_PKT_TIMESTAMP_GET_HEADER_OP(x) (((x) & SDMA_PKT_TIMESTAMP_GET_HEADER_op_mask) << SDMA_PKT_TIMESTAMP_GET_HEADER_op_shift) + +/*define for sub_op field*/ +#define SDMA_PKT_TIMESTAMP_GET_HEADER_sub_op_offset 0 +#define SDMA_PKT_TIMESTAMP_GET_HEADER_sub_op_mask 0x000000FF +#define SDMA_PKT_TIMESTAMP_GET_HEADER_sub_op_shift 8 +#define SDMA_PKT_TIMESTAMP_GET_HEADER_SUB_OP(x) (((x) & SDMA_PKT_TIMESTAMP_GET_HEADER_sub_op_mask) << SDMA_PKT_TIMESTAMP_GET_HEADER_sub_op_shift) + +/*define for WRITE_ADDR_LO word*/ +/*define for write_addr_31_3 field*/ +#define SDMA_PKT_TIMESTAMP_GET_WRITE_ADDR_LO_write_addr_31_3_offset 1 +#define SDMA_PKT_TIMESTAMP_GET_WRITE_ADDR_LO_write_addr_31_3_mask 0x1FFFFFFF +#define SDMA_PKT_TIMESTAMP_GET_WRITE_ADDR_LO_write_addr_31_3_shift 3 +#define SDMA_PKT_TIMESTAMP_GET_WRITE_ADDR_LO_WRITE_ADDR_31_3(x) (((x) & SDMA_PKT_TIMESTAMP_GET_WRITE_ADDR_LO_write_addr_31_3_mask) << SDMA_PKT_TIMESTAMP_GET_WRITE_ADDR_LO_write_addr_31_3_shift) + +/*define for WRITE_ADDR_HI word*/ +/*define for write_addr_63_32 field*/ +#define SDMA_PKT_TIMESTAMP_GET_WRITE_ADDR_HI_write_addr_63_32_offset 2 +#define SDMA_PKT_TIMESTAMP_GET_WRITE_ADDR_HI_write_addr_63_32_mask 0xFFFFFFFF +#define SDMA_PKT_TIMESTAMP_GET_WRITE_ADDR_HI_write_addr_63_32_shift 0 +#define SDMA_PKT_TIMESTAMP_GET_WRITE_ADDR_HI_WRITE_ADDR_63_32(x) (((x) & SDMA_PKT_TIMESTAMP_GET_WRITE_ADDR_HI_write_addr_63_32_mask) << SDMA_PKT_TIMESTAMP_GET_WRITE_ADDR_HI_write_addr_63_32_shift) + + +/* +** Definitions for SDMA_PKT_TIMESTAMP_GET_GLOBAL packet +*/ + +/*define for HEADER word*/ +/*define for op field*/ +#define SDMA_PKT_TIMESTAMP_GET_GLOBAL_HEADER_op_offset 0 +#define SDMA_PKT_TIMESTAMP_GET_GLOBAL_HEADER_op_mask 0x000000FF +#define SDMA_PKT_TIMESTAMP_GET_GLOBAL_HEADER_op_shift 0 +#define SDMA_PKT_TIMESTAMP_GET_GLOBAL_HEADER_OP(x) (((x) & SDMA_PKT_TIMESTAMP_GET_GLOBAL_HEADER_op_mask) << SDMA_PKT_TIMESTAMP_GET_GLOBAL_HEADER_op_shift) + +/*define for sub_op field*/ +#define SDMA_PKT_TIMESTAMP_GET_GLOBAL_HEADER_sub_op_offset 0 +#define SDMA_PKT_TIMESTAMP_GET_GLOBAL_HEADER_sub_op_mask 0x000000FF +#define SDMA_PKT_TIMESTAMP_GET_GLOBAL_HEADER_sub_op_shift 8 +#define SDMA_PKT_TIMESTAMP_GET_GLOBAL_HEADER_SUB_OP(x) (((x) & SDMA_PKT_TIMESTAMP_GET_GLOBAL_HEADER_sub_op_mask) << SDMA_PKT_TIMESTAMP_GET_GLOBAL_HEADER_sub_op_shift) + +/*define for WRITE_ADDR_LO word*/ +/*define for write_addr_31_3 field*/ +#define SDMA_PKT_TIMESTAMP_GET_GLOBAL_WRITE_ADDR_LO_write_addr_31_3_offset 1 +#define SDMA_PKT_TIMESTAMP_GET_GLOBAL_WRITE_ADDR_LO_write_addr_31_3_mask 0x1FFFFFFF +#define SDMA_PKT_TIMESTAMP_GET_GLOBAL_WRITE_ADDR_LO_write_addr_31_3_shift 3 +#define SDMA_PKT_TIMESTAMP_GET_GLOBAL_WRITE_ADDR_LO_WRITE_ADDR_31_3(x) (((x) & SDMA_PKT_TIMESTAMP_GET_GLOBAL_WRITE_ADDR_LO_write_addr_31_3_mask) << SDMA_PKT_TIMESTAMP_GET_GLOBAL_WRITE_ADDR_LO_write_addr_31_3_shift) + +/*define for WRITE_ADDR_HI word*/ +/*define for write_addr_63_32 field*/ +#define SDMA_PKT_TIMESTAMP_GET_GLOBAL_WRITE_ADDR_HI_write_addr_63_32_offset 2 +#define SDMA_PKT_TIMESTAMP_GET_GLOBAL_WRITE_ADDR_HI_write_addr_63_32_mask 0xFFFFFFFF +#define SDMA_PKT_TIMESTAMP_GET_GLOBAL_WRITE_ADDR_HI_write_addr_63_32_shift 0 +#define SDMA_PKT_TIMESTAMP_GET_GLOBAL_WRITE_ADDR_HI_WRITE_ADDR_63_32(x) (((x) & SDMA_PKT_TIMESTAMP_GET_GLOBAL_WRITE_ADDR_HI_write_addr_63_32_mask) << SDMA_PKT_TIMESTAMP_GET_GLOBAL_WRITE_ADDR_HI_write_addr_63_32_shift) + + +/* +** Definitions for SDMA_PKT_TRAP packet +*/ + +/*define for HEADER word*/ +/*define for op field*/ +#define SDMA_PKT_TRAP_HEADER_op_offset 0 +#define SDMA_PKT_TRAP_HEADER_op_mask 0x000000FF +#define SDMA_PKT_TRAP_HEADER_op_shift 0 +#define SDMA_PKT_TRAP_HEADER_OP(x) (((x) & SDMA_PKT_TRAP_HEADER_op_mask) << SDMA_PKT_TRAP_HEADER_op_shift) + +/*define for sub_op field*/ +#define SDMA_PKT_TRAP_HEADER_sub_op_offset 0 +#define SDMA_PKT_TRAP_HEADER_sub_op_mask 0x000000FF +#define SDMA_PKT_TRAP_HEADER_sub_op_shift 8 +#define SDMA_PKT_TRAP_HEADER_SUB_OP(x) (((x) & SDMA_PKT_TRAP_HEADER_sub_op_mask) << SDMA_PKT_TRAP_HEADER_sub_op_shift) + +/*define for INT_CONTEXT word*/ +/*define for int_context field*/ +#define SDMA_PKT_TRAP_INT_CONTEXT_int_context_offset 1 +#define SDMA_PKT_TRAP_INT_CONTEXT_int_context_mask 0x0FFFFFFF +#define SDMA_PKT_TRAP_INT_CONTEXT_int_context_shift 0 +#define SDMA_PKT_TRAP_INT_CONTEXT_INT_CONTEXT(x) (((x) & SDMA_PKT_TRAP_INT_CONTEXT_int_context_mask) << SDMA_PKT_TRAP_INT_CONTEXT_int_context_shift) + + +/* +** Definitions for SDMA_PKT_NOP packet +*/ + +/*define for HEADER word*/ +/*define for op field*/ +#define SDMA_PKT_NOP_HEADER_op_offset 0 +#define SDMA_PKT_NOP_HEADER_op_mask 0x000000FF +#define SDMA_PKT_NOP_HEADER_op_shift 0 +#define SDMA_PKT_NOP_HEADER_OP(x) (((x) & SDMA_PKT_NOP_HEADER_op_mask) << SDMA_PKT_NOP_HEADER_op_shift) + +/*define for sub_op field*/ +#define SDMA_PKT_NOP_HEADER_sub_op_offset 0 +#define SDMA_PKT_NOP_HEADER_sub_op_mask 0x000000FF +#define SDMA_PKT_NOP_HEADER_sub_op_shift 8 +#define SDMA_PKT_NOP_HEADER_SUB_OP(x) (((x) & SDMA_PKT_NOP_HEADER_sub_op_mask) << SDMA_PKT_NOP_HEADER_sub_op_shift) + + +#endif /* __ICELAND_SDMA_PKT_OPEN_H_ */ diff --git a/drivers/gpu/drm/amd/amdgpu/iceland_smc.c b/drivers/gpu/drm/amd/amdgpu/iceland_smc.c new file mode 100644 index 000000000000..c6f1e2f12b5f --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/iceland_smc.c @@ -0,0 +1,675 @@ +/* + * Copyright 2014 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#include +#include "drmP.h" +#include "amdgpu.h" +#include "ppsmc.h" +#include "iceland_smumgr.h" +#include "smu_ucode_xfer_vi.h" +#include "amdgpu_ucode.h" + +#include "smu/smu_7_1_1_d.h" +#include "smu/smu_7_1_1_sh_mask.h" + +#define ICELAND_SMC_SIZE 0x20000 + +static int iceland_set_smc_sram_address(struct amdgpu_device *adev, + uint32_t smc_address, uint32_t limit) +{ + uint32_t val; + + if (smc_address & 3) + return -EINVAL; + + if ((smc_address + 3) > limit) + return -EINVAL; + + WREG32(mmSMC_IND_INDEX_0, smc_address); + + val = RREG32(mmSMC_IND_ACCESS_CNTL); + val = REG_SET_FIELD(val, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 0); + WREG32(mmSMC_IND_ACCESS_CNTL, val); + + return 0; +} + +static int iceland_copy_bytes_to_smc(struct amdgpu_device *adev, + uint32_t smc_start_address, + const uint8_t *src, + uint32_t byte_count, uint32_t limit) +{ + uint32_t addr; + uint32_t data, orig_data; + int result = 0; + uint32_t extra_shift; + unsigned long flags; + + if (smc_start_address & 3) + return -EINVAL; + + if ((smc_start_address + byte_count) > limit) + return -EINVAL; + + addr = smc_start_address; + + spin_lock_irqsave(&adev->smc_idx_lock, flags); + while (byte_count >= 4) { + /* Bytes are written into the SMC addres space with the MSB first */ + data = (src[0] << 24) + (src[1] << 16) + (src[2] << 8) + src[3]; + + result = iceland_set_smc_sram_address(adev, addr, limit); + + if (result) + goto out; + + WREG32(mmSMC_IND_DATA_0, data); + + src += 4; + byte_count -= 4; + addr += 4; + } + + if (0 != byte_count) { + /* Now write odd bytes left, do a read modify write cycle */ + data = 0; + + result = iceland_set_smc_sram_address(adev, addr, limit); + if (result) + goto out; + + orig_data = RREG32(mmSMC_IND_DATA_0); + extra_shift = 8 * (4 - byte_count); + + while (byte_count > 0) { + data = (data << 8) + *src++; + byte_count--; + } + + data <<= extra_shift; + data |= (orig_data & ~((~0UL) << extra_shift)); + + result = iceland_set_smc_sram_address(adev, addr, limit); + if (result) + goto out; + + WREG32(mmSMC_IND_DATA_0, data); + } + +out: + spin_unlock_irqrestore(&adev->smc_idx_lock, flags); + return result; +} + +void iceland_start_smc(struct amdgpu_device *adev) +{ + uint32_t val = RREG32_SMC(ixSMC_SYSCON_RESET_CNTL); + + val = REG_SET_FIELD(val, SMC_SYSCON_RESET_CNTL, rst_reg, 0); + WREG32_SMC(ixSMC_SYSCON_RESET_CNTL, val); +} + +void iceland_reset_smc(struct amdgpu_device *adev) +{ + uint32_t val = RREG32_SMC(ixSMC_SYSCON_RESET_CNTL); + + val = REG_SET_FIELD(val, SMC_SYSCON_RESET_CNTL, rst_reg, 1); + WREG32_SMC(ixSMC_SYSCON_RESET_CNTL, val); +} + +static int iceland_program_jump_on_start(struct amdgpu_device *adev) +{ + static unsigned char data[] = {0xE0, 0x00, 0x80, 0x40}; + iceland_copy_bytes_to_smc(adev, 0x0, data, 4, sizeof(data)+1); + + return 0; +} + +void iceland_stop_smc_clock(struct amdgpu_device *adev) +{ + uint32_t val = RREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0); + + val = REG_SET_FIELD(val, SMC_SYSCON_CLOCK_CNTL_0, ck_disable, 1); + WREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0, val); +} + +void iceland_start_smc_clock(struct amdgpu_device *adev) +{ + uint32_t val = RREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0); + + val = REG_SET_FIELD(val, SMC_SYSCON_CLOCK_CNTL_0, ck_disable, 0); + WREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0, val); +} + +static bool iceland_is_smc_ram_running(struct amdgpu_device *adev) +{ + uint32_t val = RREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0); + val = REG_GET_FIELD(val, SMC_SYSCON_CLOCK_CNTL_0, ck_disable); + + return ((0 == val) && (0x20100 <= RREG32_SMC(ixSMC_PC_C))); +} + +static int wait_smu_response(struct amdgpu_device *adev) +{ + int i; + uint32_t val; + + for (i = 0; i < adev->usec_timeout; i++) { + val = RREG32(mmSMC_RESP_0); + if (REG_GET_FIELD(val, SMC_RESP_0, SMC_RESP)) + break; + udelay(1); + } + + if (i == adev->usec_timeout) + return -EINVAL; + + return 0; +} + +static int iceland_send_msg_to_smc(struct amdgpu_device *adev, PPSMC_Msg msg) +{ + if (!iceland_is_smc_ram_running(adev)) + return -EINVAL; + + if (wait_smu_response(adev)) { + DRM_ERROR("Failed to send previous message\n"); + return -EINVAL; + } + + WREG32(mmSMC_MESSAGE_0, msg); + + if (wait_smu_response(adev)) { + DRM_ERROR("Failed to send message\n"); + return -EINVAL; + } + + return 0; +} + +static int iceland_send_msg_to_smc_without_waiting(struct amdgpu_device *adev, + PPSMC_Msg msg) +{ + if (!iceland_is_smc_ram_running(adev)) + return -EINVAL;; + + if (wait_smu_response(adev)) { + DRM_ERROR("Failed to send previous message\n"); + return -EINVAL; + } + + WREG32(mmSMC_MESSAGE_0, msg); + + return 0; +} + +static int iceland_send_msg_to_smc_with_parameter(struct amdgpu_device *adev, + PPSMC_Msg msg, + uint32_t parameter) +{ + WREG32(mmSMC_MSG_ARG_0, parameter); + + return iceland_send_msg_to_smc(adev, msg); +} + +static int iceland_send_msg_to_smc_with_parameter_without_waiting( + struct amdgpu_device *adev, + PPSMC_Msg msg, uint32_t parameter) +{ + WREG32(mmSMC_MSG_ARG_0, parameter); + + return iceland_send_msg_to_smc_without_waiting(adev, msg); +} + +#if 0 /* not used yet */ +static int iceland_wait_for_smc_inactive(struct amdgpu_device *adev) +{ + int i; + uint32_t val; + + if (!iceland_is_smc_ram_running(adev)) + return -EINVAL; + + for (i = 0; i < adev->usec_timeout; i++) { + val = RREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0); + if (REG_GET_FIELD(val, SMC_SYSCON_CLOCK_CNTL_0, cken) == 0) + break; + udelay(1); + } + + if (i == adev->usec_timeout) + return -EINVAL; + + return 0; +} +#endif + +static int iceland_smu_upload_firmware_image(struct amdgpu_device *adev) +{ + const struct smc_firmware_header_v1_0 *hdr; + uint32_t ucode_size; + uint32_t ucode_start_address; + const uint8_t *src; + uint32_t val; + uint32_t byte_count; + uint32_t data; + unsigned long flags; + int i; + + if (!adev->pm.fw) + return -EINVAL; + + hdr = (const struct smc_firmware_header_v1_0 *)adev->pm.fw->data; + amdgpu_ucode_print_smc_hdr(&hdr->header); + + adev->pm.fw_version = le32_to_cpu(hdr->header.ucode_version); + ucode_size = le32_to_cpu(hdr->header.ucode_size_bytes); + ucode_start_address = le32_to_cpu(hdr->ucode_start_addr); + src = (const uint8_t *) + (adev->pm.fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes)); + + if (ucode_size & 3) { + DRM_ERROR("SMC ucode is not 4 bytes aligned\n"); + return -EINVAL; + } + + if (ucode_size > ICELAND_SMC_SIZE) { + DRM_ERROR("SMC address is beyond the SMC RAM area\n"); + return -EINVAL; + } + + for (i = 0; i < adev->usec_timeout; i++) { + val = RREG32_SMC(ixRCU_UC_EVENTS); + if (REG_GET_FIELD(val, RCU_UC_EVENTS, boot_seq_done) == 0) + break; + udelay(1); + } + val = RREG32_SMC(ixSMC_SYSCON_MISC_CNTL); + WREG32_SMC(ixSMC_SYSCON_MISC_CNTL, val | 1); + + iceland_stop_smc_clock(adev); + iceland_reset_smc(adev); + + spin_lock_irqsave(&adev->smc_idx_lock, flags); + WREG32(mmSMC_IND_INDEX_0, ucode_start_address); + + val = RREG32(mmSMC_IND_ACCESS_CNTL); + val = REG_SET_FIELD(val, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 1); + WREG32(mmSMC_IND_ACCESS_CNTL, val); + + byte_count = ucode_size; + while (byte_count >= 4) { + data = (src[0] << 24) + (src[1] << 16) + (src[2] << 8) + src[3]; + WREG32(mmSMC_IND_DATA_0, data); + src += 4; + byte_count -= 4; + } + val = RREG32(mmSMC_IND_ACCESS_CNTL); + val = REG_SET_FIELD(val, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 0); + WREG32(mmSMC_IND_ACCESS_CNTL, val); + spin_unlock_irqrestore(&adev->smc_idx_lock, flags); + + return 0; +} + +#if 0 /* not used yet */ +static int iceland_read_smc_sram_dword(struct amdgpu_device *adev, + uint32_t smc_address, + uint32_t *value, + uint32_t limit) +{ + int result; + unsigned long flags; + + spin_lock_irqsave(&adev->smc_idx_lock, flags); + result = iceland_set_smc_sram_address(adev, smc_address, limit); + if (result == 0) + *value = RREG32(mmSMC_IND_DATA_0); + spin_unlock_irqrestore(&adev->smc_idx_lock, flags); + return result; +} + +static int iceland_write_smc_sram_dword(struct amdgpu_device *adev, + uint32_t smc_address, + uint32_t value, + uint32_t limit) +{ + int result; + unsigned long flags; + + spin_lock_irqsave(&adev->smc_idx_lock, flags); + result = iceland_set_smc_sram_address(adev, smc_address, limit); + if (result == 0) + WREG32(mmSMC_IND_DATA_0, value); + spin_unlock_irqrestore(&adev->smc_idx_lock, flags); + return result; +} + +static int iceland_smu_stop_smc(struct amdgpu_device *adev) +{ + iceland_reset_smc(adev); + iceland_stop_smc_clock(adev); + + return 0; +} +#endif + +static int iceland_smu_start_smc(struct amdgpu_device *adev) +{ + int i; + uint32_t val; + + iceland_program_jump_on_start(adev); + iceland_start_smc_clock(adev); + iceland_start_smc(adev); + + for (i = 0; i < adev->usec_timeout; i++) { + val = RREG32_SMC(ixFIRMWARE_FLAGS); + if (REG_GET_FIELD(val, FIRMWARE_FLAGS, INTERRUPTS_ENABLED) == 1) + break; + udelay(1); + } + return 0; +} + +static enum AMDGPU_UCODE_ID iceland_convert_fw_type(uint32_t fw_type) +{ + switch (fw_type) { + case UCODE_ID_SDMA0: + return AMDGPU_UCODE_ID_SDMA0; + case UCODE_ID_SDMA1: + return AMDGPU_UCODE_ID_SDMA1; + case UCODE_ID_CP_CE: + return AMDGPU_UCODE_ID_CP_CE; + case UCODE_ID_CP_PFP: + return AMDGPU_UCODE_ID_CP_PFP; + case UCODE_ID_CP_ME: + return AMDGPU_UCODE_ID_CP_ME; + case UCODE_ID_CP_MEC: + case UCODE_ID_CP_MEC_JT1: + return AMDGPU_UCODE_ID_CP_MEC1; + case UCODE_ID_CP_MEC_JT2: + return AMDGPU_UCODE_ID_CP_MEC2; + case UCODE_ID_RLC_G: + return AMDGPU_UCODE_ID_RLC_G; + default: + DRM_ERROR("ucode type is out of range!\n"); + return AMDGPU_UCODE_ID_MAXIMUM; + } +} + +static uint32_t iceland_smu_get_mask_for_fw_type(uint32_t fw_type) +{ + switch (fw_type) { + case AMDGPU_UCODE_ID_SDMA0: + return UCODE_ID_SDMA0_MASK; + case AMDGPU_UCODE_ID_SDMA1: + return UCODE_ID_SDMA1_MASK; + case AMDGPU_UCODE_ID_CP_CE: + return UCODE_ID_CP_CE_MASK; + case AMDGPU_UCODE_ID_CP_PFP: + return UCODE_ID_CP_PFP_MASK; + case AMDGPU_UCODE_ID_CP_ME: + return UCODE_ID_CP_ME_MASK; + case AMDGPU_UCODE_ID_CP_MEC1: + return UCODE_ID_CP_MEC_MASK | UCODE_ID_CP_MEC_JT1_MASK | UCODE_ID_CP_MEC_JT2_MASK; + case AMDGPU_UCODE_ID_CP_MEC2: + return UCODE_ID_CP_MEC_MASK; + case AMDGPU_UCODE_ID_RLC_G: + return UCODE_ID_RLC_G_MASK; + default: + DRM_ERROR("ucode type is out of range!\n"); + return 0; + } +} + +static int iceland_smu_populate_single_firmware_entry(struct amdgpu_device *adev, + uint32_t fw_type, + struct SMU_Entry *entry) +{ + enum AMDGPU_UCODE_ID id = iceland_convert_fw_type(fw_type); + struct amdgpu_firmware_info *ucode = &adev->firmware.ucode[id]; + const struct gfx_firmware_header_v1_0 *header = NULL; + uint64_t gpu_addr; + uint32_t data_size; + + if (ucode->fw == NULL) + return -EINVAL; + + gpu_addr = ucode->mc_addr; + header = (const struct gfx_firmware_header_v1_0 *)ucode->fw->data; + data_size = le32_to_cpu(header->header.ucode_size_bytes); + + entry->version = (uint16_t)le32_to_cpu(header->header.ucode_version); + entry->id = (uint16_t)fw_type; + entry->image_addr_high = upper_32_bits(gpu_addr); + entry->image_addr_low = lower_32_bits(gpu_addr); + entry->meta_data_addr_high = 0; + entry->meta_data_addr_low = 0; + entry->data_size_byte = data_size; + entry->num_register_entries = 0; + entry->flags = 0; + + return 0; +} + +static int iceland_smu_request_load_fw(struct amdgpu_device *adev) +{ + struct iceland_smu_private_data *private = (struct iceland_smu_private_data *)adev->smu.priv; + struct SMU_DRAMData_TOC *toc; + uint32_t fw_to_load; + + toc = (struct SMU_DRAMData_TOC *)private->header; + toc->num_entries = 0; + toc->structure_version = 1; + + if (!adev->firmware.smu_load) + return 0; + + if (iceland_smu_populate_single_firmware_entry(adev, UCODE_ID_RLC_G, + &toc->entry[toc->num_entries++])) { + DRM_ERROR("Failed to get firmware entry for RLC\n"); + return -EINVAL; + } + + if (iceland_smu_populate_single_firmware_entry(adev, UCODE_ID_CP_CE, + &toc->entry[toc->num_entries++])) { + DRM_ERROR("Failed to get firmware entry for CE\n"); + return -EINVAL; + } + + if (iceland_smu_populate_single_firmware_entry(adev, UCODE_ID_CP_PFP, + &toc->entry[toc->num_entries++])) { + DRM_ERROR("Failed to get firmware entry for PFP\n"); + return -EINVAL; + } + + if (iceland_smu_populate_single_firmware_entry(adev, UCODE_ID_CP_ME, + &toc->entry[toc->num_entries++])) { + DRM_ERROR("Failed to get firmware entry for ME\n"); + return -EINVAL; + } + + if (iceland_smu_populate_single_firmware_entry(adev, UCODE_ID_CP_MEC, + &toc->entry[toc->num_entries++])) { + DRM_ERROR("Failed to get firmware entry for MEC\n"); + return -EINVAL; + } + + if (iceland_smu_populate_single_firmware_entry(adev, UCODE_ID_CP_MEC_JT1, + &toc->entry[toc->num_entries++])) { + DRM_ERROR("Failed to get firmware entry for MEC_JT1\n"); + return -EINVAL; + } + + if (iceland_smu_populate_single_firmware_entry(adev, UCODE_ID_CP_MEC_JT2, + &toc->entry[toc->num_entries++])) { + DRM_ERROR("Failed to get firmware entry for MEC_JT2\n"); + return -EINVAL; + } + + if (iceland_smu_populate_single_firmware_entry(adev, UCODE_ID_SDMA0, + &toc->entry[toc->num_entries++])) { + DRM_ERROR("Failed to get firmware entry for SDMA0\n"); + return -EINVAL; + } + + if (iceland_smu_populate_single_firmware_entry(adev, UCODE_ID_SDMA1, + &toc->entry[toc->num_entries++])) { + DRM_ERROR("Failed to get firmware entry for SDMA1\n"); + return -EINVAL; + } + + iceland_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_DRV_DRAM_ADDR_HI, private->header_addr_high); + iceland_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_DRV_DRAM_ADDR_LO, private->header_addr_low); + + fw_to_load = UCODE_ID_RLC_G_MASK | + UCODE_ID_SDMA0_MASK | + UCODE_ID_SDMA1_MASK | + UCODE_ID_CP_CE_MASK | + UCODE_ID_CP_ME_MASK | + UCODE_ID_CP_PFP_MASK | + UCODE_ID_CP_MEC_MASK | + UCODE_ID_CP_MEC_JT1_MASK | + UCODE_ID_CP_MEC_JT2_MASK; + + if (iceland_send_msg_to_smc_with_parameter_without_waiting(adev, PPSMC_MSG_LoadUcodes, fw_to_load)) { + DRM_ERROR("Fail to request SMU load ucode\n"); + return -EINVAL; + } + + return 0; +} + +static int iceland_smu_check_fw_load_finish(struct amdgpu_device *adev, + uint32_t fw_type) +{ + uint32_t fw_mask = iceland_smu_get_mask_for_fw_type(fw_type); + int i; + + for (i = 0; i < adev->usec_timeout; i++) { + if (fw_mask == (RREG32_SMC(ixSOFT_REGISTERS_TABLE_27) & fw_mask)) + break; + udelay(1); + } + + if (i == adev->usec_timeout) { + DRM_ERROR("check firmware loading failed\n"); + return -EINVAL; + } + + return 0; +} + +int iceland_smu_start(struct amdgpu_device *adev) +{ + int result; + + result = iceland_smu_upload_firmware_image(adev); + if (result) + return result; + result = iceland_smu_start_smc(adev); + if (result) + return result; + + return iceland_smu_request_load_fw(adev); +} + +static const struct amdgpu_smumgr_funcs iceland_smumgr_funcs = { + .check_fw_load_finish = iceland_smu_check_fw_load_finish, + .request_smu_load_fw = NULL, + .request_smu_specific_fw = NULL, +}; + +int iceland_smu_init(struct amdgpu_device *adev) +{ + struct iceland_smu_private_data *private; + uint32_t image_size = ((sizeof(struct SMU_DRAMData_TOC) / 4096) + 1) * 4096; + struct amdgpu_bo **toc_buf = &adev->smu.toc_buf; + uint64_t mc_addr; + void *toc_buf_ptr; + int ret; + + private = kzalloc(sizeof(struct iceland_smu_private_data), GFP_KERNEL); + if (NULL == private) + return -ENOMEM; + + /* allocate firmware buffers */ + if (adev->firmware.smu_load) + amdgpu_ucode_init_bo(adev); + + adev->smu.priv = private; + adev->smu.fw_flags = 0; + + /* Allocate FW image data structure and header buffer */ + ret = amdgpu_bo_create(adev, image_size, PAGE_SIZE, + true, AMDGPU_GEM_DOMAIN_VRAM, 0, NULL, toc_buf); + if (ret) { + DRM_ERROR("Failed to allocate memory for TOC buffer\n"); + return -ENOMEM; + } + + /* Retrieve GPU address for header buffer and internal buffer */ + ret = amdgpu_bo_reserve(adev->smu.toc_buf, false); + if (ret) { + amdgpu_bo_unref(&adev->smu.toc_buf); + DRM_ERROR("Failed to reserve the TOC buffer\n"); + return -EINVAL; + } + + ret = amdgpu_bo_pin(adev->smu.toc_buf, AMDGPU_GEM_DOMAIN_VRAM, &mc_addr); + if (ret) { + amdgpu_bo_unreserve(adev->smu.toc_buf); + amdgpu_bo_unref(&adev->smu.toc_buf); + DRM_ERROR("Failed to pin the TOC buffer\n"); + return -EINVAL; + } + + ret = amdgpu_bo_kmap(*toc_buf, &toc_buf_ptr); + if (ret) { + amdgpu_bo_unreserve(adev->smu.toc_buf); + amdgpu_bo_unref(&adev->smu.toc_buf); + DRM_ERROR("Failed to map the TOC buffer\n"); + return -EINVAL; + } + + amdgpu_bo_unreserve(adev->smu.toc_buf); + private->header_addr_low = lower_32_bits(mc_addr); + private->header_addr_high = upper_32_bits(mc_addr); + private->header = toc_buf_ptr; + + adev->smu.smumgr_funcs = &iceland_smumgr_funcs; + + return 0; +} + +int iceland_smu_fini(struct amdgpu_device *adev) +{ + amdgpu_bo_unref(&adev->smu.toc_buf); + kfree(adev->smu.priv); + adev->smu.priv = NULL; + if (adev->firmware.fw_buf) + amdgpu_ucode_fini_bo(adev); + + return 0; +} diff --git a/drivers/gpu/drm/amd/amdgpu/iceland_smumgr.h b/drivers/gpu/drm/amd/amdgpu/iceland_smumgr.h new file mode 100644 index 000000000000..1e0769e110fa --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/iceland_smumgr.h @@ -0,0 +1,41 @@ +/* + * Copyright 2014 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef ICELAND_SMUMGR_H +#define ICELAND_SMUMGR_H + +#include "ppsmc.h" + +extern int iceland_smu_init(struct amdgpu_device *adev); +extern int iceland_smu_fini(struct amdgpu_device *adev); +extern int iceland_smu_start(struct amdgpu_device *adev); + +struct iceland_smu_private_data +{ + uint8_t *header; + uint8_t *mec_image; + uint32_t header_addr_high; + uint32_t header_addr_low; +}; + +#endif diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c new file mode 100644 index 000000000000..a83029d548c1 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c @@ -0,0 +1,1447 @@ +/* + * Copyright 2014 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Alex Deucher + */ +#include +#include +#include "amdgpu.h" +#include "amdgpu_ucode.h" +#include "amdgpu_trace.h" +#include "vi.h" +#include "vid.h" + +#include "oss/oss_2_4_d.h" +#include "oss/oss_2_4_sh_mask.h" + +#include "gmc/gmc_8_1_d.h" +#include "gmc/gmc_8_1_sh_mask.h" + +#include "gca/gfx_8_0_d.h" +#include "gca/gfx_8_0_sh_mask.h" + +#include "bif/bif_5_0_d.h" +#include "bif/bif_5_0_sh_mask.h" + +#include "iceland_sdma_pkt_open.h" + +static void sdma_v2_4_set_ring_funcs(struct amdgpu_device *adev); +static void sdma_v2_4_set_buffer_funcs(struct amdgpu_device *adev); +static void sdma_v2_4_set_vm_pte_funcs(struct amdgpu_device *adev); +static void sdma_v2_4_set_irq_funcs(struct amdgpu_device *adev); + +MODULE_FIRMWARE("radeon/topaz_sdma.bin"); +MODULE_FIRMWARE("radeon/topaz_sdma1.bin"); + +static const u32 sdma_offsets[SDMA_MAX_INSTANCE] = +{ + SDMA0_REGISTER_OFFSET, + SDMA1_REGISTER_OFFSET +}; + +static const u32 golden_settings_iceland_a11[] = +{ + mmSDMA0_CHICKEN_BITS, 0xfc910007, 0x00810007, + mmSDMA0_CLK_CTRL, 0xff000fff, 0x00000000, + mmSDMA1_CHICKEN_BITS, 0xfc910007, 0x00810007, + mmSDMA1_CLK_CTRL, 0xff000fff, 0x00000000, +}; + +static const u32 iceland_mgcg_cgcg_init[] = +{ + mmSDMA0_CLK_CTRL, 0xff000ff0, 0x00000100, + mmSDMA1_CLK_CTRL, 0xff000ff0, 0x00000100 +}; + +/* + * sDMA - System DMA + * Starting with CIK, the GPU has new asynchronous + * DMA engines. These engines are used for compute + * and gfx. There are two DMA engines (SDMA0, SDMA1) + * and each one supports 1 ring buffer used for gfx + * and 2 queues used for compute. + * + * The programming model is very similar to the CP + * (ring buffer, IBs, etc.), but sDMA has it's own + * packet format that is different from the PM4 format + * used by the CP. sDMA supports copying data, writing + * embedded data, solid fills, and a number of other + * things. It also has support for tiling/detiling of + * buffers. + */ + +static void sdma_v2_4_init_golden_registers(struct amdgpu_device *adev) +{ + switch (adev->asic_type) { + case CHIP_TOPAZ: + amdgpu_program_register_sequence(adev, + iceland_mgcg_cgcg_init, + (const u32)ARRAY_SIZE(iceland_mgcg_cgcg_init)); + amdgpu_program_register_sequence(adev, + golden_settings_iceland_a11, + (const u32)ARRAY_SIZE(golden_settings_iceland_a11)); + break; + default: + break; + } +} + +/** + * sdma_v2_4_init_microcode - load ucode images from disk + * + * @adev: amdgpu_device pointer + * + * Use the firmware interface to load the ucode images into + * the driver (not loaded into hw). + * Returns 0 on success, error on failure. + */ +static int sdma_v2_4_init_microcode(struct amdgpu_device *adev) +{ + const char *chip_name; + char fw_name[30]; + int err, i; + struct amdgpu_firmware_info *info = NULL; + const struct common_firmware_header *header = NULL; + + DRM_DEBUG("\n"); + + switch (adev->asic_type) { + case CHIP_TOPAZ: + chip_name = "topaz"; + break; + default: BUG(); + } + + for (i = 0; i < SDMA_MAX_INSTANCE; i++) { + if (i == 0) + snprintf(fw_name, sizeof(fw_name), "radeon/%s_sdma.bin", chip_name); + else + snprintf(fw_name, sizeof(fw_name), "radeon/%s_sdma1.bin", chip_name); + err = request_firmware(&adev->sdma[i].fw, fw_name, adev->dev); + if (err) + goto out; + err = amdgpu_ucode_validate(adev->sdma[i].fw); + if (err) + goto out; + + if (adev->firmware.smu_load) { + info = &adev->firmware.ucode[AMDGPU_UCODE_ID_SDMA0 + i]; + info->ucode_id = AMDGPU_UCODE_ID_SDMA0 + i; + info->fw = adev->sdma[i].fw; + header = (const struct common_firmware_header *)info->fw->data; + adev->firmware.fw_size += + ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE); + } + } + +out: + if (err) { + printk(KERN_ERR + "sdma_v2_4: Failed to load firmware \"%s\"\n", + fw_name); + for (i = 0; i < SDMA_MAX_INSTANCE; i++) { + release_firmware(adev->sdma[i].fw); + adev->sdma[i].fw = NULL; + } + } + return err; +} + +/** + * sdma_v2_4_ring_get_rptr - get the current read pointer + * + * @ring: amdgpu ring pointer + * + * Get the current rptr from the hardware (VI+). + */ +static uint32_t sdma_v2_4_ring_get_rptr(struct amdgpu_ring *ring) +{ + u32 rptr; + + /* XXX check if swapping is necessary on BE */ + rptr = ring->adev->wb.wb[ring->rptr_offs] >> 2; + + return rptr; +} + +/** + * sdma_v2_4_ring_get_wptr - get the current write pointer + * + * @ring: amdgpu ring pointer + * + * Get the current wptr from the hardware (VI+). + */ +static uint32_t sdma_v2_4_ring_get_wptr(struct amdgpu_ring *ring) +{ + struct amdgpu_device *adev = ring->adev; + int me = (ring == &ring->adev->sdma[0].ring) ? 0 : 1; + u32 wptr = RREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[me]) >> 2; + + return wptr; +} + +/** + * sdma_v2_4_ring_set_wptr - commit the write pointer + * + * @ring: amdgpu ring pointer + * + * Write the wptr back to the hardware (VI+). + */ +static void sdma_v2_4_ring_set_wptr(struct amdgpu_ring *ring) +{ + struct amdgpu_device *adev = ring->adev; + int me = (ring == &ring->adev->sdma[0].ring) ? 0 : 1; + + WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[me], ring->wptr << 2); +} + +static void sdma_v2_4_hdp_flush_ring_emit(struct amdgpu_ring *); + +/** + * sdma_v2_4_ring_emit_ib - Schedule an IB on the DMA engine + * + * @ring: amdgpu ring pointer + * @ib: IB object to schedule + * + * Schedule an IB in the DMA ring (VI). + */ +static void sdma_v2_4_ring_emit_ib(struct amdgpu_ring *ring, + struct amdgpu_ib *ib) +{ + u32 vmid = (ib->vm ? ib->vm->ids[ring->idx].id : 0) & 0xf; + u32 next_rptr = ring->wptr + 5; + + if (ib->flush_hdp_writefifo) + next_rptr += 6; + + while ((next_rptr & 7) != 2) + next_rptr++; + + next_rptr += 6; + + amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) | + SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_WRITE_LINEAR)); + amdgpu_ring_write(ring, lower_32_bits(ring->next_rptr_gpu_addr) & 0xfffffffc); + amdgpu_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr)); + amdgpu_ring_write(ring, SDMA_PKT_WRITE_UNTILED_DW_3_COUNT(1)); + amdgpu_ring_write(ring, next_rptr); + + if (ib->flush_hdp_writefifo) { + /* flush HDP */ + sdma_v2_4_hdp_flush_ring_emit(ring); + } + + /* IB packet must end on a 8 DW boundary */ + while ((ring->wptr & 7) != 2) + amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_NOP)); + amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_INDIRECT) | + SDMA_PKT_INDIRECT_HEADER_VMID(vmid)); + /* base must be 32 byte aligned */ + amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr) & 0xffffffe0); + amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr)); + amdgpu_ring_write(ring, ib->length_dw); + amdgpu_ring_write(ring, 0); + amdgpu_ring_write(ring, 0); + +} + +/** + * sdma_v2_4_hdp_flush_ring_emit - emit an hdp flush on the DMA ring + * + * @ring: amdgpu ring pointer + * + * Emit an hdp flush packet on the requested DMA ring. + */ +static void sdma_v2_4_hdp_flush_ring_emit(struct amdgpu_ring *ring) +{ + u32 ref_and_mask = 0; + + if (ring == &ring->adev->sdma[0].ring) + ref_and_mask = REG_SET_FIELD(ref_and_mask, GPU_HDP_FLUSH_DONE, SDMA0, 1); + else + ref_and_mask = REG_SET_FIELD(ref_and_mask, GPU_HDP_FLUSH_DONE, SDMA1, 1); + + amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_POLL_REGMEM) | + SDMA_PKT_POLL_REGMEM_HEADER_HDP_FLUSH(1) | + SDMA_PKT_POLL_REGMEM_HEADER_FUNC(3)); /* == */ + amdgpu_ring_write(ring, mmGPU_HDP_FLUSH_DONE << 2); + amdgpu_ring_write(ring, mmGPU_HDP_FLUSH_REQ << 2); + amdgpu_ring_write(ring, ref_and_mask); /* reference */ + amdgpu_ring_write(ring, ref_and_mask); /* mask */ + amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) | + SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(10)); /* retry count, poll interval */ +} + +/** + * sdma_v2_4_ring_emit_fence - emit a fence on the DMA ring + * + * @ring: amdgpu ring pointer + * @fence: amdgpu fence object + * + * Add a DMA fence packet to the ring to write + * the fence seq number and DMA trap packet to generate + * an interrupt if needed (VI). + */ +static void sdma_v2_4_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq, + bool write64bits) +{ + /* write the fence */ + amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_FENCE)); + amdgpu_ring_write(ring, lower_32_bits(addr)); + amdgpu_ring_write(ring, upper_32_bits(addr)); + amdgpu_ring_write(ring, lower_32_bits(seq)); + + /* optionally write high bits as well */ + if (write64bits) { + addr += 4; + amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_FENCE)); + amdgpu_ring_write(ring, lower_32_bits(addr)); + amdgpu_ring_write(ring, upper_32_bits(addr)); + amdgpu_ring_write(ring, upper_32_bits(seq)); + } + + /* generate an interrupt */ + amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_TRAP)); + amdgpu_ring_write(ring, SDMA_PKT_TRAP_INT_CONTEXT_INT_CONTEXT(0)); +} + +/** + * sdma_v2_4_ring_emit_semaphore - emit a semaphore on the dma ring + * + * @ring: amdgpu_ring structure holding ring information + * @semaphore: amdgpu semaphore object + * @emit_wait: wait or signal semaphore + * + * Add a DMA semaphore packet to the ring wait on or signal + * other rings (VI). + */ +static bool sdma_v2_4_ring_emit_semaphore(struct amdgpu_ring *ring, + struct amdgpu_semaphore *semaphore, + bool emit_wait) +{ + u64 addr = semaphore->gpu_addr; + u32 sig = emit_wait ? 0 : 1; + + amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_SEM) | + SDMA_PKT_SEMAPHORE_HEADER_SIGNAL(sig)); + amdgpu_ring_write(ring, lower_32_bits(addr) & 0xfffffff8); + amdgpu_ring_write(ring, upper_32_bits(addr)); + + return true; +} + +/** + * sdma_v2_4_gfx_stop - stop the gfx async dma engines + * + * @adev: amdgpu_device pointer + * + * Stop the gfx async dma ring buffers (VI). + */ +static void sdma_v2_4_gfx_stop(struct amdgpu_device *adev) +{ + struct amdgpu_ring *sdma0 = &adev->sdma[0].ring; + struct amdgpu_ring *sdma1 = &adev->sdma[1].ring; + u32 rb_cntl, ib_cntl; + int i; + + if ((adev->mman.buffer_funcs_ring == sdma0) || + (adev->mman.buffer_funcs_ring == sdma1)) + amdgpu_ttm_set_active_vram_size(adev, adev->mc.visible_vram_size); + + for (i = 0; i < SDMA_MAX_INSTANCE; i++) { + rb_cntl = RREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i]); + rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_ENABLE, 0); + WREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i], rb_cntl); + ib_cntl = RREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i]); + ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_ENABLE, 0); + WREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i], ib_cntl); + } + sdma0->ready = false; + sdma1->ready = false; +} + +/** + * sdma_v2_4_rlc_stop - stop the compute async dma engines + * + * @adev: amdgpu_device pointer + * + * Stop the compute async dma queues (VI). + */ +static void sdma_v2_4_rlc_stop(struct amdgpu_device *adev) +{ + /* XXX todo */ +} + +/** + * sdma_v2_4_enable - stop the async dma engines + * + * @adev: amdgpu_device pointer + * @enable: enable/disable the DMA MEs. + * + * Halt or unhalt the async dma engines (VI). + */ +static void sdma_v2_4_enable(struct amdgpu_device *adev, bool enable) +{ + u32 f32_cntl; + int i; + + if (enable == false) { + sdma_v2_4_gfx_stop(adev); + sdma_v2_4_rlc_stop(adev); + } + + for (i = 0; i < SDMA_MAX_INSTANCE; i++) { + f32_cntl = RREG32(mmSDMA0_F32_CNTL + sdma_offsets[i]); + if (enable) + f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_F32_CNTL, HALT, 0); + else + f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_F32_CNTL, HALT, 1); + WREG32(mmSDMA0_F32_CNTL + sdma_offsets[i], f32_cntl); + } +} + +/** + * sdma_v2_4_gfx_resume - setup and start the async dma engines + * + * @adev: amdgpu_device pointer + * + * Set up the gfx DMA ring buffers and enable them (VI). + * Returns 0 for success, error for failure. + */ +static int sdma_v2_4_gfx_resume(struct amdgpu_device *adev) +{ + struct amdgpu_ring *ring; + u32 rb_cntl, ib_cntl; + u32 rb_bufsz; + u32 wb_offset; + int i, j, r; + + for (i = 0; i < SDMA_MAX_INSTANCE; i++) { + ring = &adev->sdma[i].ring; + wb_offset = (ring->rptr_offs * 4); + + mutex_lock(&adev->srbm_mutex); + for (j = 0; j < 16; j++) { + vi_srbm_select(adev, 0, 0, 0, j); + /* SDMA GFX */ + WREG32(mmSDMA0_GFX_VIRTUAL_ADDR + sdma_offsets[i], 0); + WREG32(mmSDMA0_GFX_APE1_CNTL + sdma_offsets[i], 0); + } + vi_srbm_select(adev, 0, 0, 0, 0); + mutex_unlock(&adev->srbm_mutex); + + WREG32(mmSDMA0_SEM_WAIT_FAIL_TIMER_CNTL + sdma_offsets[i], 0); + + /* Set ring buffer size in dwords */ + rb_bufsz = order_base_2(ring->ring_size / 4); + rb_cntl = RREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i]); + rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_SIZE, rb_bufsz); +#ifdef __BIG_ENDIAN + rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_SWAP_ENABLE, 1); + rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, + RPTR_WRITEBACK_SWAP_ENABLE, 1); +#endif + WREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i], rb_cntl); + + /* Initialize the ring buffer's read and write pointers */ + WREG32(mmSDMA0_GFX_RB_RPTR + sdma_offsets[i], 0); + WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[i], 0); + + /* set the wb address whether it's enabled or not */ + WREG32(mmSDMA0_GFX_RB_RPTR_ADDR_HI + sdma_offsets[i], + upper_32_bits(adev->wb.gpu_addr + wb_offset) & 0xFFFFFFFF); + WREG32(mmSDMA0_GFX_RB_RPTR_ADDR_LO + sdma_offsets[i], + lower_32_bits(adev->wb.gpu_addr + wb_offset) & 0xFFFFFFFC); + + rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RPTR_WRITEBACK_ENABLE, 1); + + WREG32(mmSDMA0_GFX_RB_BASE + sdma_offsets[i], ring->gpu_addr >> 8); + WREG32(mmSDMA0_GFX_RB_BASE_HI + sdma_offsets[i], ring->gpu_addr >> 40); + + ring->wptr = 0; + WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[i], ring->wptr << 2); + + /* enable DMA RB */ + rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_ENABLE, 1); + WREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i], rb_cntl); + + ib_cntl = RREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i]); + ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_ENABLE, 1); +#ifdef __BIG_ENDIAN + ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_SWAP_ENABLE, 1); +#endif + /* enable DMA IBs */ + WREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i], ib_cntl); + + ring->ready = true; + + r = amdgpu_ring_test_ring(ring); + if (r) { + ring->ready = false; + return r; + } + + if (adev->mman.buffer_funcs_ring == ring) + amdgpu_ttm_set_active_vram_size(adev, adev->mc.real_vram_size); + } + + return 0; +} + +/** + * sdma_v2_4_rlc_resume - setup and start the async dma engines + * + * @adev: amdgpu_device pointer + * + * Set up the compute DMA queues and enable them (VI). + * Returns 0 for success, error for failure. + */ +static int sdma_v2_4_rlc_resume(struct amdgpu_device *adev) +{ + /* XXX todo */ + return 0; +} + +/** + * sdma_v2_4_load_microcode - load the sDMA ME ucode + * + * @adev: amdgpu_device pointer + * + * Loads the sDMA0/1 ucode. + * Returns 0 for success, -EINVAL if the ucode is not available. + */ +static int sdma_v2_4_load_microcode(struct amdgpu_device *adev) +{ + const struct sdma_firmware_header_v1_0 *hdr; + const __le32 *fw_data; + u32 fw_size; + int i, j; + bool smc_loads_fw = false; /* XXX fix me */ + + if (!adev->sdma[0].fw || !adev->sdma[1].fw) + return -EINVAL; + + /* halt the MEs */ + sdma_v2_4_enable(adev, false); + + if (smc_loads_fw) { + /* XXX query SMC for fw load complete */ + } else { + for (i = 0; i < SDMA_MAX_INSTANCE; i++) { + hdr = (const struct sdma_firmware_header_v1_0 *)adev->sdma[i].fw->data; + amdgpu_ucode_print_sdma_hdr(&hdr->header); + fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4; + adev->sdma[i].fw_version = le32_to_cpu(hdr->header.ucode_version); + + fw_data = (const __le32 *) + (adev->sdma[i].fw->data + + le32_to_cpu(hdr->header.ucode_array_offset_bytes)); + WREG32(mmSDMA0_UCODE_ADDR + sdma_offsets[i], 0); + for (j = 0; j < fw_size; j++) + WREG32(mmSDMA0_UCODE_DATA + sdma_offsets[i], le32_to_cpup(fw_data++)); + WREG32(mmSDMA0_UCODE_ADDR + sdma_offsets[i], adev->sdma[i].fw_version); + } + } + + return 0; +} + +/** + * sdma_v2_4_start - setup and start the async dma engines + * + * @adev: amdgpu_device pointer + * + * Set up the DMA engines and enable them (VI). + * Returns 0 for success, error for failure. + */ +static int sdma_v2_4_start(struct amdgpu_device *adev) +{ + int r; + + if (!adev->firmware.smu_load) { + r = sdma_v2_4_load_microcode(adev); + if (r) + return r; + } else { + r = adev->smu.smumgr_funcs->check_fw_load_finish(adev, + AMDGPU_UCODE_ID_SDMA0); + if (r) + return -EINVAL; + r = adev->smu.smumgr_funcs->check_fw_load_finish(adev, + AMDGPU_UCODE_ID_SDMA1); + if (r) + return -EINVAL; + } + + /* unhalt the MEs */ + sdma_v2_4_enable(adev, true); + + /* start the gfx rings and rlc compute queues */ + r = sdma_v2_4_gfx_resume(adev); + if (r) + return r; + r = sdma_v2_4_rlc_resume(adev); + if (r) + return r; + + return 0; +} + +/** + * sdma_v2_4_ring_test_ring - simple async dma engine test + * + * @ring: amdgpu_ring structure holding ring information + * + * Test the DMA engine by writing using it to write an + * value to memory. (VI). + * Returns 0 for success, error for failure. + */ +static int sdma_v2_4_ring_test_ring(struct amdgpu_ring *ring) +{ + struct amdgpu_device *adev = ring->adev; + unsigned i; + unsigned index; + int r; + u32 tmp; + u64 gpu_addr; + + r = amdgpu_wb_get(adev, &index); + if (r) { + dev_err(adev->dev, "(%d) failed to allocate wb slot\n", r); + return r; + } + + gpu_addr = adev->wb.gpu_addr + (index * 4); + tmp = 0xCAFEDEAD; + adev->wb.wb[index] = cpu_to_le32(tmp); + + r = amdgpu_ring_lock(ring, 5); + if (r) { + DRM_ERROR("amdgpu: dma failed to lock ring %d (%d).\n", ring->idx, r); + amdgpu_wb_free(adev, index); + return r; + } + + amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) | + SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_WRITE_LINEAR)); + amdgpu_ring_write(ring, lower_32_bits(gpu_addr)); + amdgpu_ring_write(ring, upper_32_bits(gpu_addr)); + amdgpu_ring_write(ring, SDMA_PKT_WRITE_UNTILED_DW_3_COUNT(1)); + amdgpu_ring_write(ring, 0xDEADBEEF); + amdgpu_ring_unlock_commit(ring); + + for (i = 0; i < adev->usec_timeout; i++) { + tmp = le32_to_cpu(adev->wb.wb[index]); + if (tmp == 0xDEADBEEF) + break; + DRM_UDELAY(1); + } + + if (i < adev->usec_timeout) { + DRM_INFO("ring test on %d succeeded in %d usecs\n", ring->idx, i); + } else { + DRM_ERROR("amdgpu: ring %d test failed (0x%08X)\n", + ring->idx, tmp); + r = -EINVAL; + } + amdgpu_wb_free(adev, index); + + return r; +} + +/** + * sdma_v2_4_ring_test_ib - test an IB on the DMA engine + * + * @ring: amdgpu_ring structure holding ring information + * + * Test a simple IB in the DMA ring (VI). + * Returns 0 on success, error on failure. + */ +static int sdma_v2_4_ring_test_ib(struct amdgpu_ring *ring) +{ + struct amdgpu_device *adev = ring->adev; + struct amdgpu_ib ib; + unsigned i; + unsigned index; + int r; + u32 tmp = 0; + u64 gpu_addr; + + r = amdgpu_wb_get(adev, &index); + if (r) { + dev_err(adev->dev, "(%d) failed to allocate wb slot\n", r); + return r; + } + + gpu_addr = adev->wb.gpu_addr + (index * 4); + tmp = 0xCAFEDEAD; + adev->wb.wb[index] = cpu_to_le32(tmp); + + r = amdgpu_ib_get(ring, NULL, 256, &ib); + if (r) { + amdgpu_wb_free(adev, index); + DRM_ERROR("amdgpu: failed to get ib (%d).\n", r); + return r; + } + + ib.ptr[0] = SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) | + SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_WRITE_LINEAR); + ib.ptr[1] = lower_32_bits(gpu_addr); + ib.ptr[2] = upper_32_bits(gpu_addr); + ib.ptr[3] = SDMA_PKT_WRITE_UNTILED_DW_3_COUNT(1); + ib.ptr[4] = 0xDEADBEEF; + ib.ptr[5] = SDMA_PKT_HEADER_OP(SDMA_OP_NOP); + ib.ptr[6] = SDMA_PKT_HEADER_OP(SDMA_OP_NOP); + ib.ptr[7] = SDMA_PKT_HEADER_OP(SDMA_OP_NOP); + ib.length_dw = 8; + + r = amdgpu_ib_schedule(adev, 1, &ib, AMDGPU_FENCE_OWNER_UNDEFINED); + if (r) { + amdgpu_ib_free(adev, &ib); + amdgpu_wb_free(adev, index); + DRM_ERROR("amdgpu: failed to schedule ib (%d).\n", r); + return r; + } + r = amdgpu_fence_wait(ib.fence, false); + if (r) { + amdgpu_ib_free(adev, &ib); + amdgpu_wb_free(adev, index); + DRM_ERROR("amdgpu: fence wait failed (%d).\n", r); + return r; + } + for (i = 0; i < adev->usec_timeout; i++) { + tmp = le32_to_cpu(adev->wb.wb[index]); + if (tmp == 0xDEADBEEF) + break; + DRM_UDELAY(1); + } + if (i < adev->usec_timeout) { + DRM_INFO("ib test on ring %d succeeded in %u usecs\n", + ib.fence->ring->idx, i); + } else { + DRM_ERROR("amdgpu: ib test failed (0x%08X)\n", tmp); + r = -EINVAL; + } + amdgpu_ib_free(adev, &ib); + amdgpu_wb_free(adev, index); + return r; +} + +/** + * sdma_v2_4_vm_copy_pte - update PTEs by copying them from the GART + * + * @ib: indirect buffer to fill with commands + * @pe: addr of the page entry + * @src: src addr to copy from + * @count: number of page entries to update + * + * Update PTEs by copying them from the GART using sDMA (CIK). + */ +static void sdma_v2_4_vm_copy_pte(struct amdgpu_ib *ib, + uint64_t pe, uint64_t src, + unsigned count) +{ + while (count) { + unsigned bytes = count * 8; + if (bytes > 0x1FFFF8) + bytes = 0x1FFFF8; + + ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_COPY) | + SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR); + ib->ptr[ib->length_dw++] = bytes; + ib->ptr[ib->length_dw++] = 0; /* src/dst endian swap */ + ib->ptr[ib->length_dw++] = lower_32_bits(src); + ib->ptr[ib->length_dw++] = upper_32_bits(src); + ib->ptr[ib->length_dw++] = lower_32_bits(pe); + ib->ptr[ib->length_dw++] = upper_32_bits(pe); + + pe += bytes; + src += bytes; + count -= bytes / 8; + } +} + +/** + * sdma_v2_4_vm_write_pte - update PTEs by writing them manually + * + * @ib: indirect buffer to fill with commands + * @pe: addr of the page entry + * @addr: dst addr to write into pe + * @count: number of page entries to update + * @incr: increase next addr by incr bytes + * @flags: access flags + * + * Update PTEs by writing them manually using sDMA (CIK). + */ +static void sdma_v2_4_vm_write_pte(struct amdgpu_ib *ib, + uint64_t pe, + uint64_t addr, unsigned count, + uint32_t incr, uint32_t flags) +{ + uint64_t value; + unsigned ndw; + + while (count) { + ndw = count * 2; + if (ndw > 0xFFFFE) + ndw = 0xFFFFE; + + /* for non-physically contiguous pages (system) */ + ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) | + SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR); + ib->ptr[ib->length_dw++] = pe; + ib->ptr[ib->length_dw++] = upper_32_bits(pe); + ib->ptr[ib->length_dw++] = ndw; + for (; ndw > 0; ndw -= 2, --count, pe += 8) { + if (flags & AMDGPU_PTE_SYSTEM) { + value = amdgpu_vm_map_gart(ib->ring->adev, addr); + value &= 0xFFFFFFFFFFFFF000ULL; + } else if (flags & AMDGPU_PTE_VALID) { + value = addr; + } else { + value = 0; + } + addr += incr; + value |= flags; + ib->ptr[ib->length_dw++] = value; + ib->ptr[ib->length_dw++] = upper_32_bits(value); + } + } +} + +/** + * sdma_v2_4_vm_set_pte_pde - update the page tables using sDMA + * + * @ib: indirect buffer to fill with commands + * @pe: addr of the page entry + * @addr: dst addr to write into pe + * @count: number of page entries to update + * @incr: increase next addr by incr bytes + * @flags: access flags + * + * Update the page tables using sDMA (CIK). + */ +static void sdma_v2_4_vm_set_pte_pde(struct amdgpu_ib *ib, + uint64_t pe, + uint64_t addr, unsigned count, + uint32_t incr, uint32_t flags) +{ + uint64_t value; + unsigned ndw; + + while (count) { + ndw = count; + if (ndw > 0x7FFFF) + ndw = 0x7FFFF; + + if (flags & AMDGPU_PTE_VALID) + value = addr; + else + value = 0; + + /* for physically contiguous pages (vram) */ + ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_GEN_PTEPDE); + ib->ptr[ib->length_dw++] = pe; /* dst addr */ + ib->ptr[ib->length_dw++] = upper_32_bits(pe); + ib->ptr[ib->length_dw++] = flags; /* mask */ + ib->ptr[ib->length_dw++] = 0; + ib->ptr[ib->length_dw++] = value; /* value */ + ib->ptr[ib->length_dw++] = upper_32_bits(value); + ib->ptr[ib->length_dw++] = incr; /* increment size */ + ib->ptr[ib->length_dw++] = 0; + ib->ptr[ib->length_dw++] = ndw; /* number of entries */ + + pe += ndw * 8; + addr += ndw * incr; + count -= ndw; + } +} + +/** + * sdma_v2_4_vm_pad_ib - pad the IB to the required number of dw + * + * @ib: indirect buffer to fill with padding + * + */ +static void sdma_v2_4_vm_pad_ib(struct amdgpu_ib *ib) +{ + while (ib->length_dw & 0x7) + ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_NOP); +} + +/** + * sdma_v2_4_ring_emit_vm_flush - cik vm flush using sDMA + * + * @ring: amdgpu_ring pointer + * @vm: amdgpu_vm pointer + * + * Update the page table base and flush the VM TLB + * using sDMA (VI). + */ +static void sdma_v2_4_ring_emit_vm_flush(struct amdgpu_ring *ring, + unsigned vm_id, uint64_t pd_addr) +{ + u32 srbm_gfx_cntl = 0; + + amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_SRBM_WRITE) | + SDMA_PKT_SRBM_WRITE_HEADER_BYTE_EN(0xf)); + if (vm_id < 8) { + amdgpu_ring_write(ring, (mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + vm_id)); + } else { + amdgpu_ring_write(ring, (mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + vm_id - 8)); + } + amdgpu_ring_write(ring, pd_addr >> 12); + + /* update SH_MEM_* regs */ + srbm_gfx_cntl = REG_SET_FIELD(srbm_gfx_cntl, SRBM_GFX_CNTL, VMID, vm_id); + amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_SRBM_WRITE) | + SDMA_PKT_SRBM_WRITE_HEADER_BYTE_EN(0xf)); + amdgpu_ring_write(ring, mmSRBM_GFX_CNTL); + amdgpu_ring_write(ring, srbm_gfx_cntl); + + amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_SRBM_WRITE) | + SDMA_PKT_SRBM_WRITE_HEADER_BYTE_EN(0xf)); + amdgpu_ring_write(ring, mmSH_MEM_BASES); + amdgpu_ring_write(ring, 0); + + amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_SRBM_WRITE) | + SDMA_PKT_SRBM_WRITE_HEADER_BYTE_EN(0xf)); + amdgpu_ring_write(ring, mmSH_MEM_CONFIG); + amdgpu_ring_write(ring, 0); + + amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_SRBM_WRITE) | + SDMA_PKT_SRBM_WRITE_HEADER_BYTE_EN(0xf)); + amdgpu_ring_write(ring, mmSH_MEM_APE1_BASE); + amdgpu_ring_write(ring, 1); + + amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_SRBM_WRITE) | + SDMA_PKT_SRBM_WRITE_HEADER_BYTE_EN(0xf)); + amdgpu_ring_write(ring, mmSH_MEM_APE1_LIMIT); + amdgpu_ring_write(ring, 0); + + srbm_gfx_cntl = REG_SET_FIELD(srbm_gfx_cntl, SRBM_GFX_CNTL, VMID, 0); + amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_SRBM_WRITE) | + SDMA_PKT_SRBM_WRITE_HEADER_BYTE_EN(0xf)); + amdgpu_ring_write(ring, mmSRBM_GFX_CNTL); + amdgpu_ring_write(ring, srbm_gfx_cntl); + + + /* flush TLB */ + amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_SRBM_WRITE) | + SDMA_PKT_SRBM_WRITE_HEADER_BYTE_EN(0xf)); + amdgpu_ring_write(ring, mmVM_INVALIDATE_REQUEST); + amdgpu_ring_write(ring, 1 << vm_id); + + /* wait for flush */ + amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_POLL_REGMEM) | + SDMA_PKT_POLL_REGMEM_HEADER_HDP_FLUSH(0) | + SDMA_PKT_POLL_REGMEM_HEADER_FUNC(0)); /* always */ + amdgpu_ring_write(ring, mmVM_INVALIDATE_REQUEST << 2); + amdgpu_ring_write(ring, 0); + amdgpu_ring_write(ring, 0); /* reference */ + amdgpu_ring_write(ring, 0); /* mask */ + amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) | + SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(10)); /* retry count, poll interval */ +} + +static int sdma_v2_4_early_init(struct amdgpu_device *adev) +{ + sdma_v2_4_set_ring_funcs(adev); + sdma_v2_4_set_buffer_funcs(adev); + sdma_v2_4_set_vm_pte_funcs(adev); + sdma_v2_4_set_irq_funcs(adev); + + return 0; +} + +static int sdma_v2_4_sw_init(struct amdgpu_device *adev) +{ + struct amdgpu_ring *ring; + int r; + + /* SDMA trap event */ + r = amdgpu_irq_add_id(adev, 224, &adev->sdma_trap_irq); + if (r) + return r; + + /* SDMA Privileged inst */ + r = amdgpu_irq_add_id(adev, 241, &adev->sdma_illegal_inst_irq); + if (r) + return r; + + /* SDMA Privileged inst */ + r = amdgpu_irq_add_id(adev, 247, &adev->sdma_illegal_inst_irq); + if (r) + return r; + + r = sdma_v2_4_init_microcode(adev); + if (r) { + DRM_ERROR("Failed to load sdma firmware!\n"); + return r; + } + + ring = &adev->sdma[0].ring; + ring->ring_obj = NULL; + ring->use_doorbell = false; + + ring = &adev->sdma[1].ring; + ring->ring_obj = NULL; + ring->use_doorbell = false; + + ring = &adev->sdma[0].ring; + sprintf(ring->name, "sdma0"); + r = amdgpu_ring_init(adev, ring, 256 * 1024, + SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP), 0xf, + &adev->sdma_trap_irq, AMDGPU_SDMA_IRQ_TRAP0, + AMDGPU_RING_TYPE_SDMA); + if (r) + return r; + + ring = &adev->sdma[1].ring; + sprintf(ring->name, "sdma1"); + r = amdgpu_ring_init(adev, ring, 256 * 1024, + SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP), 0xf, + &adev->sdma_trap_irq, AMDGPU_SDMA_IRQ_TRAP1, + AMDGPU_RING_TYPE_SDMA); + if (r) + return r; + + return r; +} + +static int sdma_v2_4_sw_fini(struct amdgpu_device *adev) +{ + amdgpu_ring_fini(&adev->sdma[0].ring); + amdgpu_ring_fini(&adev->sdma[1].ring); + + return 0; +} + +static int sdma_v2_4_hw_init(struct amdgpu_device *adev) +{ + int r; + + sdma_v2_4_init_golden_registers(adev); + + r = sdma_v2_4_start(adev); + if (r) + return r; + + return r; +} + +static int sdma_v2_4_hw_fini(struct amdgpu_device *adev) +{ + sdma_v2_4_enable(adev, false); + + return 0; +} + +static int sdma_v2_4_suspend(struct amdgpu_device *adev) +{ + + return sdma_v2_4_hw_fini(adev); +} + +static int sdma_v2_4_resume(struct amdgpu_device *adev) +{ + + return sdma_v2_4_hw_init(adev); +} + +static bool sdma_v2_4_is_idle(struct amdgpu_device *adev) +{ + u32 tmp = RREG32(mmSRBM_STATUS2); + + if (tmp & (SRBM_STATUS2__SDMA_BUSY_MASK | + SRBM_STATUS2__SDMA1_BUSY_MASK)) + return false; + + return true; +} + +static int sdma_v2_4_wait_for_idle(struct amdgpu_device *adev) +{ + unsigned i; + u32 tmp; + + for (i = 0; i < adev->usec_timeout; i++) { + tmp = RREG32(mmSRBM_STATUS2) & (SRBM_STATUS2__SDMA_BUSY_MASK | + SRBM_STATUS2__SDMA1_BUSY_MASK); + + if (!tmp) + return 0; + udelay(1); + } + return -ETIMEDOUT; +} + +static void sdma_v2_4_print_status(struct amdgpu_device *adev) +{ + int i, j; + + dev_info(adev->dev, "VI SDMA registers\n"); + dev_info(adev->dev, " SRBM_STATUS2=0x%08X\n", + RREG32(mmSRBM_STATUS2)); + for (i = 0; i < SDMA_MAX_INSTANCE; i++) { + dev_info(adev->dev, " SDMA%d_STATUS_REG=0x%08X\n", + i, RREG32(mmSDMA0_STATUS_REG + sdma_offsets[i])); + dev_info(adev->dev, " SDMA%d_F32_CNTL=0x%08X\n", + i, RREG32(mmSDMA0_F32_CNTL + sdma_offsets[i])); + dev_info(adev->dev, " SDMA%d_CNTL=0x%08X\n", + i, RREG32(mmSDMA0_CNTL + sdma_offsets[i])); + dev_info(adev->dev, " SDMA%d_SEM_WAIT_FAIL_TIMER_CNTL=0x%08X\n", + i, RREG32(mmSDMA0_SEM_WAIT_FAIL_TIMER_CNTL + sdma_offsets[i])); + dev_info(adev->dev, " SDMA%d_GFX_IB_CNTL=0x%08X\n", + i, RREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i])); + dev_info(adev->dev, " SDMA%d_GFX_RB_CNTL=0x%08X\n", + i, RREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i])); + dev_info(adev->dev, " SDMA%d_GFX_RB_RPTR=0x%08X\n", + i, RREG32(mmSDMA0_GFX_RB_RPTR + sdma_offsets[i])); + dev_info(adev->dev, " SDMA%d_GFX_RB_WPTR=0x%08X\n", + i, RREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[i])); + dev_info(adev->dev, " SDMA%d_GFX_RB_RPTR_ADDR_HI=0x%08X\n", + i, RREG32(mmSDMA0_GFX_RB_RPTR_ADDR_HI + sdma_offsets[i])); + dev_info(adev->dev, " SDMA%d_GFX_RB_RPTR_ADDR_LO=0x%08X\n", + i, RREG32(mmSDMA0_GFX_RB_RPTR_ADDR_LO + sdma_offsets[i])); + dev_info(adev->dev, " SDMA%d_GFX_RB_BASE=0x%08X\n", + i, RREG32(mmSDMA0_GFX_RB_BASE + sdma_offsets[i])); + dev_info(adev->dev, " SDMA%d_GFX_RB_BASE_HI=0x%08X\n", + i, RREG32(mmSDMA0_GFX_RB_BASE_HI + sdma_offsets[i])); + mutex_lock(&adev->srbm_mutex); + for (j = 0; j < 16; j++) { + vi_srbm_select(adev, 0, 0, 0, j); + dev_info(adev->dev, " VM %d:\n", j); + dev_info(adev->dev, " SDMA%d_GFX_VIRTUAL_ADDR=0x%08X\n", + i, RREG32(mmSDMA0_GFX_VIRTUAL_ADDR + sdma_offsets[i])); + dev_info(adev->dev, " SDMA%d_GFX_APE1_CNTL=0x%08X\n", + i, RREG32(mmSDMA0_GFX_APE1_CNTL + sdma_offsets[i])); + } + vi_srbm_select(adev, 0, 0, 0, 0); + mutex_unlock(&adev->srbm_mutex); + } +} + +static int sdma_v2_4_soft_reset(struct amdgpu_device *adev) +{ + u32 srbm_soft_reset = 0; + u32 tmp = RREG32(mmSRBM_STATUS2); + + if (tmp & SRBM_STATUS2__SDMA_BUSY_MASK) { + /* sdma0 */ + tmp = RREG32(mmSDMA0_F32_CNTL + SDMA0_REGISTER_OFFSET); + tmp = REG_SET_FIELD(tmp, SDMA0_F32_CNTL, HALT, 0); + WREG32(mmSDMA0_F32_CNTL + SDMA0_REGISTER_OFFSET, tmp); + srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_SDMA_MASK; + } + if (tmp & SRBM_STATUS2__SDMA1_BUSY_MASK) { + /* sdma1 */ + tmp = RREG32(mmSDMA0_F32_CNTL + SDMA1_REGISTER_OFFSET); + tmp = REG_SET_FIELD(tmp, SDMA0_F32_CNTL, HALT, 0); + WREG32(mmSDMA0_F32_CNTL + SDMA1_REGISTER_OFFSET, tmp); + srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_SDMA1_MASK; + } + + if (srbm_soft_reset) { + sdma_v2_4_print_status(adev); + + tmp = RREG32(mmSRBM_SOFT_RESET); + tmp |= srbm_soft_reset; + dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp); + WREG32(mmSRBM_SOFT_RESET, tmp); + tmp = RREG32(mmSRBM_SOFT_RESET); + + udelay(50); + + tmp &= ~srbm_soft_reset; + WREG32(mmSRBM_SOFT_RESET, tmp); + tmp = RREG32(mmSRBM_SOFT_RESET); + + /* Wait a little for things to settle down */ + udelay(50); + + sdma_v2_4_print_status(adev); + } + + return 0; +} + +static int sdma_v2_4_set_trap_irq_state(struct amdgpu_device *adev, + struct amdgpu_irq_src *src, + unsigned type, + enum amdgpu_interrupt_state state) +{ + u32 sdma_cntl; + + switch (type) { + case AMDGPU_SDMA_IRQ_TRAP0: + switch (state) { + case AMDGPU_IRQ_STATE_DISABLE: + sdma_cntl = RREG32(mmSDMA0_CNTL + SDMA0_REGISTER_OFFSET); + sdma_cntl = REG_SET_FIELD(sdma_cntl, SDMA0_CNTL, TRAP_ENABLE, 0); + WREG32(mmSDMA0_CNTL + SDMA0_REGISTER_OFFSET, sdma_cntl); + break; + case AMDGPU_IRQ_STATE_ENABLE: + sdma_cntl = RREG32(mmSDMA0_CNTL + SDMA0_REGISTER_OFFSET); + sdma_cntl = REG_SET_FIELD(sdma_cntl, SDMA0_CNTL, TRAP_ENABLE, 1); + WREG32(mmSDMA0_CNTL + SDMA0_REGISTER_OFFSET, sdma_cntl); + break; + default: + break; + } + break; + case AMDGPU_SDMA_IRQ_TRAP1: + switch (state) { + case AMDGPU_IRQ_STATE_DISABLE: + sdma_cntl = RREG32(mmSDMA0_CNTL + SDMA1_REGISTER_OFFSET); + sdma_cntl = REG_SET_FIELD(sdma_cntl, SDMA0_CNTL, TRAP_ENABLE, 0); + WREG32(mmSDMA0_CNTL + SDMA1_REGISTER_OFFSET, sdma_cntl); + break; + case AMDGPU_IRQ_STATE_ENABLE: + sdma_cntl = RREG32(mmSDMA0_CNTL + SDMA1_REGISTER_OFFSET); + sdma_cntl = REG_SET_FIELD(sdma_cntl, SDMA0_CNTL, TRAP_ENABLE, 1); + WREG32(mmSDMA0_CNTL + SDMA1_REGISTER_OFFSET, sdma_cntl); + break; + default: + break; + } + break; + default: + break; + } + return 0; +} + +static int sdma_v2_4_process_trap_irq(struct amdgpu_device *adev, + struct amdgpu_irq_src *source, + struct amdgpu_iv_entry *entry) +{ + u8 instance_id, queue_id; + + instance_id = (entry->ring_id & 0x3) >> 0; + queue_id = (entry->ring_id & 0xc) >> 2; + DRM_DEBUG("IH: SDMA trap\n"); + switch (instance_id) { + case 0: + switch (queue_id) { + case 0: + amdgpu_fence_process(&adev->sdma[0].ring); + break; + case 1: + /* XXX compute */ + break; + case 2: + /* XXX compute */ + break; + } + break; + case 1: + switch (queue_id) { + case 0: + amdgpu_fence_process(&adev->sdma[1].ring); + break; + case 1: + /* XXX compute */ + break; + case 2: + /* XXX compute */ + break; + } + break; + } + return 0; +} + +static int sdma_v2_4_process_illegal_inst_irq(struct amdgpu_device *adev, + struct amdgpu_irq_src *source, + struct amdgpu_iv_entry *entry) +{ + DRM_ERROR("Illegal instruction in SDMA command stream\n"); + schedule_work(&adev->reset_work); + return 0; +} + +static int sdma_v2_4_set_clockgating_state(struct amdgpu_device *adev, + enum amdgpu_clockgating_state state) +{ + /* XXX handled via the smc on VI */ + + return 0; +} + +static int sdma_v2_4_set_powergating_state(struct amdgpu_device *adev, + enum amdgpu_powergating_state state) +{ + return 0; +} + +const struct amdgpu_ip_funcs sdma_v2_4_ip_funcs = { + .early_init = sdma_v2_4_early_init, + .late_init = NULL, + .sw_init = sdma_v2_4_sw_init, + .sw_fini = sdma_v2_4_sw_fini, + .hw_init = sdma_v2_4_hw_init, + .hw_fini = sdma_v2_4_hw_fini, + .suspend = sdma_v2_4_suspend, + .resume = sdma_v2_4_resume, + .is_idle = sdma_v2_4_is_idle, + .wait_for_idle = sdma_v2_4_wait_for_idle, + .soft_reset = sdma_v2_4_soft_reset, + .print_status = sdma_v2_4_print_status, + .set_clockgating_state = sdma_v2_4_set_clockgating_state, + .set_powergating_state = sdma_v2_4_set_powergating_state, +}; + +/** + * sdma_v2_4_ring_is_lockup - Check if the DMA engine is locked up + * + * @ring: amdgpu_ring structure holding ring information + * + * Check if the async DMA engine is locked up (VI). + * Returns true if the engine appears to be locked up, false if not. + */ +static bool sdma_v2_4_ring_is_lockup(struct amdgpu_ring *ring) +{ + + if (sdma_v2_4_is_idle(ring->adev)) { + amdgpu_ring_lockup_update(ring); + return false; + } + return amdgpu_ring_test_lockup(ring); +} + +static const struct amdgpu_ring_funcs sdma_v2_4_ring_funcs = { + .get_rptr = sdma_v2_4_ring_get_rptr, + .get_wptr = sdma_v2_4_ring_get_wptr, + .set_wptr = sdma_v2_4_ring_set_wptr, + .parse_cs = NULL, + .emit_ib = sdma_v2_4_ring_emit_ib, + .emit_fence = sdma_v2_4_ring_emit_fence, + .emit_semaphore = sdma_v2_4_ring_emit_semaphore, + .emit_vm_flush = sdma_v2_4_ring_emit_vm_flush, + .test_ring = sdma_v2_4_ring_test_ring, + .test_ib = sdma_v2_4_ring_test_ib, + .is_lockup = sdma_v2_4_ring_is_lockup, +}; + +static void sdma_v2_4_set_ring_funcs(struct amdgpu_device *adev) +{ + adev->sdma[0].ring.funcs = &sdma_v2_4_ring_funcs; + adev->sdma[1].ring.funcs = &sdma_v2_4_ring_funcs; +} + +static const struct amdgpu_irq_src_funcs sdma_v2_4_trap_irq_funcs = { + .set = sdma_v2_4_set_trap_irq_state, + .process = sdma_v2_4_process_trap_irq, +}; + +static const struct amdgpu_irq_src_funcs sdma_v2_4_illegal_inst_irq_funcs = { + .process = sdma_v2_4_process_illegal_inst_irq, +}; + +static void sdma_v2_4_set_irq_funcs(struct amdgpu_device *adev) +{ + adev->sdma_trap_irq.num_types = AMDGPU_SDMA_IRQ_LAST; + adev->sdma_trap_irq.funcs = &sdma_v2_4_trap_irq_funcs; + adev->sdma_illegal_inst_irq.funcs = &sdma_v2_4_illegal_inst_irq_funcs; +} + +/** + * sdma_v2_4_emit_copy_buffer - copy buffer using the sDMA engine + * + * @ring: amdgpu_ring structure holding ring information + * @src_offset: src GPU address + * @dst_offset: dst GPU address + * @byte_count: number of bytes to xfer + * + * Copy GPU buffers using the DMA engine (VI). + * Used by the amdgpu ttm implementation to move pages if + * registered as the asic copy callback. + */ +static void sdma_v2_4_emit_copy_buffer(struct amdgpu_ring *ring, + uint64_t src_offset, + uint64_t dst_offset, + uint32_t byte_count) +{ + amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_COPY) | + SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR)); + amdgpu_ring_write(ring, byte_count); + amdgpu_ring_write(ring, 0); /* src/dst endian swap */ + amdgpu_ring_write(ring, lower_32_bits(src_offset)); + amdgpu_ring_write(ring, upper_32_bits(src_offset)); + amdgpu_ring_write(ring, lower_32_bits(dst_offset)); + amdgpu_ring_write(ring, upper_32_bits(dst_offset)); +} + +/** + * sdma_v2_4_emit_fill_buffer - fill buffer using the sDMA engine + * + * @ring: amdgpu_ring structure holding ring information + * @src_data: value to write to buffer + * @dst_offset: dst GPU address + * @byte_count: number of bytes to xfer + * + * Fill GPU buffers using the DMA engine (VI). + */ +static void sdma_v2_4_emit_fill_buffer(struct amdgpu_ring *ring, + uint32_t src_data, + uint64_t dst_offset, + uint32_t byte_count) +{ + amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_CONST_FILL)); + amdgpu_ring_write(ring, lower_32_bits(dst_offset)); + amdgpu_ring_write(ring, upper_32_bits(dst_offset)); + amdgpu_ring_write(ring, src_data); + amdgpu_ring_write(ring, byte_count); +} + +static const struct amdgpu_buffer_funcs sdma_v2_4_buffer_funcs = { + .copy_max_bytes = 0x1fffff, + .copy_num_dw = 7, + .emit_copy_buffer = sdma_v2_4_emit_copy_buffer, + + .fill_max_bytes = 0x1fffff, + .fill_num_dw = 7, + .emit_fill_buffer = sdma_v2_4_emit_fill_buffer, +}; + +static void sdma_v2_4_set_buffer_funcs(struct amdgpu_device *adev) +{ + if (adev->mman.buffer_funcs == NULL) { + adev->mman.buffer_funcs = &sdma_v2_4_buffer_funcs; + adev->mman.buffer_funcs_ring = &adev->sdma[0].ring; + } +} + +static const struct amdgpu_vm_pte_funcs sdma_v2_4_vm_pte_funcs = { + .copy_pte = sdma_v2_4_vm_copy_pte, + .write_pte = sdma_v2_4_vm_write_pte, + .set_pte_pde = sdma_v2_4_vm_set_pte_pde, + .pad_ib = sdma_v2_4_vm_pad_ib, +}; + +static void sdma_v2_4_set_vm_pte_funcs(struct amdgpu_device *adev) +{ + if (adev->vm_manager.vm_pte_funcs == NULL) { + adev->vm_manager.vm_pte_funcs = &sdma_v2_4_vm_pte_funcs; + adev->vm_manager.vm_pte_funcs_ring = &adev->sdma[0].ring; + } +} diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.h b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.h new file mode 100644 index 000000000000..6cdf8941c577 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.h @@ -0,0 +1,29 @@ +/* + * Copyright 2014 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef __SDMA_V2_4_H__ +#define __SDMA_V2_4_H__ + +extern const struct amdgpu_ip_funcs sdma_v2_4_ip_funcs; + +#endif diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c new file mode 100644 index 000000000000..dd547c7f6cbc --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c @@ -0,0 +1,1514 @@ +/* + * Copyright 2014 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Alex Deucher + */ +#include +#include +#include "amdgpu.h" +#include "amdgpu_ucode.h" +#include "amdgpu_trace.h" +#include "vi.h" +#include "vid.h" + +#include "oss/oss_3_0_d.h" +#include "oss/oss_3_0_sh_mask.h" + +#include "gmc/gmc_8_1_d.h" +#include "gmc/gmc_8_1_sh_mask.h" + +#include "gca/gfx_8_0_d.h" +#include "gca/gfx_8_0_sh_mask.h" + +#include "bif/bif_5_0_d.h" +#include "bif/bif_5_0_sh_mask.h" + +#include "tonga_sdma_pkt_open.h" + +static void sdma_v3_0_set_ring_funcs(struct amdgpu_device *adev); +static void sdma_v3_0_set_buffer_funcs(struct amdgpu_device *adev); +static void sdma_v3_0_set_vm_pte_funcs(struct amdgpu_device *adev); +static void sdma_v3_0_set_irq_funcs(struct amdgpu_device *adev); + +MODULE_FIRMWARE("radeon/tonga_sdma.bin"); +MODULE_FIRMWARE("radeon/tonga_sdma1.bin"); +MODULE_FIRMWARE("radeon/carrizo_sdma.bin"); +MODULE_FIRMWARE("radeon/carrizo_sdma1.bin"); + +static const u32 sdma_offsets[SDMA_MAX_INSTANCE] = +{ + SDMA0_REGISTER_OFFSET, + SDMA1_REGISTER_OFFSET +}; + +static const u32 golden_settings_tonga_a11[] = +{ + mmSDMA0_CHICKEN_BITS, 0xfc910007, 0x00810007, + mmSDMA0_CLK_CTRL, 0xff000fff, 0x00000000, + mmSDMA0_GFX_IB_CNTL, 0x800f0111, 0x00000100, + mmSDMA0_RLC0_IB_CNTL, 0x800f0111, 0x00000100, + mmSDMA0_RLC1_IB_CNTL, 0x800f0111, 0x00000100, + mmSDMA1_CHICKEN_BITS, 0xfc910007, 0x00810007, + mmSDMA1_CLK_CTRL, 0xff000fff, 0x00000000, + mmSDMA1_GFX_IB_CNTL, 0x800f0111, 0x00000100, + mmSDMA1_RLC0_IB_CNTL, 0x800f0111, 0x00000100, + mmSDMA1_RLC1_IB_CNTL, 0x800f0111, 0x00000100, +}; + +static const u32 tonga_mgcg_cgcg_init[] = +{ + mmSDMA0_CLK_CTRL, 0xff000ff0, 0x00000100, + mmSDMA1_CLK_CTRL, 0xff000ff0, 0x00000100 +}; + +static const u32 cz_golden_settings_a11[] = +{ + mmSDMA0_CHICKEN_BITS, 0xfc910007, 0x00810007, + mmSDMA0_CLK_CTRL, 0xff000fff, 0x00000000, + mmSDMA0_GFX_IB_CNTL, 0x00000100, 0x00000100, + mmSDMA0_POWER_CNTL, 0x00000800, 0x0003c800, + mmSDMA0_RLC0_IB_CNTL, 0x00000100, 0x00000100, + mmSDMA0_RLC1_IB_CNTL, 0x00000100, 0x00000100, + mmSDMA1_CHICKEN_BITS, 0xfc910007, 0x00810007, + mmSDMA1_CLK_CTRL, 0xff000fff, 0x00000000, + mmSDMA1_GFX_IB_CNTL, 0x00000100, 0x00000100, + mmSDMA1_POWER_CNTL, 0x00000800, 0x0003c800, + mmSDMA1_RLC0_IB_CNTL, 0x00000100, 0x00000100, + mmSDMA1_RLC1_IB_CNTL, 0x00000100, 0x00000100, +}; + +static const u32 cz_mgcg_cgcg_init[] = +{ + mmSDMA0_CLK_CTRL, 0xff000ff0, 0x00000100, + mmSDMA1_CLK_CTRL, 0xff000ff0, 0x00000100 +}; + +/* + * sDMA - System DMA + * Starting with CIK, the GPU has new asynchronous + * DMA engines. These engines are used for compute + * and gfx. There are two DMA engines (SDMA0, SDMA1) + * and each one supports 1 ring buffer used for gfx + * and 2 queues used for compute. + * + * The programming model is very similar to the CP + * (ring buffer, IBs, etc.), but sDMA has it's own + * packet format that is different from the PM4 format + * used by the CP. sDMA supports copying data, writing + * embedded data, solid fills, and a number of other + * things. It also has support for tiling/detiling of + * buffers. + */ + +static void sdma_v3_0_init_golden_registers(struct amdgpu_device *adev) +{ + switch (adev->asic_type) { + case CHIP_TONGA: + amdgpu_program_register_sequence(adev, + tonga_mgcg_cgcg_init, + (const u32)ARRAY_SIZE(tonga_mgcg_cgcg_init)); + amdgpu_program_register_sequence(adev, + golden_settings_tonga_a11, + (const u32)ARRAY_SIZE(golden_settings_tonga_a11)); + break; + case CHIP_CARRIZO: + amdgpu_program_register_sequence(adev, + cz_mgcg_cgcg_init, + (const u32)ARRAY_SIZE(cz_mgcg_cgcg_init)); + amdgpu_program_register_sequence(adev, + cz_golden_settings_a11, + (const u32)ARRAY_SIZE(cz_golden_settings_a11)); + break; + default: + break; + } +} + +/** + * sdma_v3_0_init_microcode - load ucode images from disk + * + * @adev: amdgpu_device pointer + * + * Use the firmware interface to load the ucode images into + * the driver (not loaded into hw). + * Returns 0 on success, error on failure. + */ +static int sdma_v3_0_init_microcode(struct amdgpu_device *adev) +{ + const char *chip_name; + char fw_name[30]; + int err, i; + struct amdgpu_firmware_info *info = NULL; + const struct common_firmware_header *header = NULL; + + DRM_DEBUG("\n"); + + switch (adev->asic_type) { + case CHIP_TONGA: + chip_name = "tonga"; + break; + case CHIP_CARRIZO: + chip_name = "carrizo"; + break; + default: BUG(); + } + + for (i = 0; i < SDMA_MAX_INSTANCE; i++) { + if (i == 0) + snprintf(fw_name, sizeof(fw_name), "radeon/%s_sdma.bin", chip_name); + else + snprintf(fw_name, sizeof(fw_name), "radeon/%s_sdma1.bin", chip_name); + err = request_firmware(&adev->sdma[i].fw, fw_name, adev->dev); + if (err) + goto out; + err = amdgpu_ucode_validate(adev->sdma[i].fw); + if (err) + goto out; + + if (adev->firmware.smu_load) { + info = &adev->firmware.ucode[AMDGPU_UCODE_ID_SDMA0 + i]; + info->ucode_id = AMDGPU_UCODE_ID_SDMA0 + i; + info->fw = adev->sdma[i].fw; + header = (const struct common_firmware_header *)info->fw->data; + adev->firmware.fw_size += + ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE); + } + } +out: + if (err) { + printk(KERN_ERR + "sdma_v3_0: Failed to load firmware \"%s\"\n", + fw_name); + for (i = 0; i < SDMA_MAX_INSTANCE; i++) { + release_firmware(adev->sdma[i].fw); + adev->sdma[i].fw = NULL; + } + } + return err; +} + +/** + * sdma_v3_0_ring_get_rptr - get the current read pointer + * + * @ring: amdgpu ring pointer + * + * Get the current rptr from the hardware (VI+). + */ +static uint32_t sdma_v3_0_ring_get_rptr(struct amdgpu_ring *ring) +{ + u32 rptr; + + /* XXX check if swapping is necessary on BE */ + rptr = ring->adev->wb.wb[ring->rptr_offs] >> 2; + + return rptr; +} + +/** + * sdma_v3_0_ring_get_wptr - get the current write pointer + * + * @ring: amdgpu ring pointer + * + * Get the current wptr from the hardware (VI+). + */ +static uint32_t sdma_v3_0_ring_get_wptr(struct amdgpu_ring *ring) +{ + struct amdgpu_device *adev = ring->adev; + u32 wptr; + + if (ring->use_doorbell) { + /* XXX check if swapping is necessary on BE */ + wptr = ring->adev->wb.wb[ring->wptr_offs] >> 2; + } else { + int me = (ring == &ring->adev->sdma[0].ring) ? 0 : 1; + + wptr = RREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[me]) >> 2; + } + + return wptr; +} + +/** + * sdma_v3_0_ring_set_wptr - commit the write pointer + * + * @ring: amdgpu ring pointer + * + * Write the wptr back to the hardware (VI+). + */ +static void sdma_v3_0_ring_set_wptr(struct amdgpu_ring *ring) +{ + struct amdgpu_device *adev = ring->adev; + + if (ring->use_doorbell) { + /* XXX check if swapping is necessary on BE */ + adev->wb.wb[ring->wptr_offs] = ring->wptr << 2; + WDOORBELL32(ring->doorbell_index, ring->wptr << 2); + } else { + int me = (ring == &ring->adev->sdma[0].ring) ? 0 : 1; + + WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[me], ring->wptr << 2); + } +} + +static void sdma_v3_0_hdp_flush_ring_emit(struct amdgpu_ring *); + +/** + * sdma_v3_0_ring_emit_ib - Schedule an IB on the DMA engine + * + * @ring: amdgpu ring pointer + * @ib: IB object to schedule + * + * Schedule an IB in the DMA ring (VI). + */ +static void sdma_v3_0_ring_emit_ib(struct amdgpu_ring *ring, + struct amdgpu_ib *ib) +{ + u32 vmid = (ib->vm ? ib->vm->ids[ring->idx].id : 0) & 0xf; + u32 next_rptr = ring->wptr + 5; + + if (ib->flush_hdp_writefifo) + next_rptr += 6; + + while ((next_rptr & 7) != 2) + next_rptr++; + next_rptr += 6; + + amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) | + SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_WRITE_LINEAR)); + amdgpu_ring_write(ring, lower_32_bits(ring->next_rptr_gpu_addr) & 0xfffffffc); + amdgpu_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr)); + amdgpu_ring_write(ring, SDMA_PKT_WRITE_UNTILED_DW_3_COUNT(1)); + amdgpu_ring_write(ring, next_rptr); + + /* flush HDP */ + if (ib->flush_hdp_writefifo) { + sdma_v3_0_hdp_flush_ring_emit(ring); + } + + /* IB packet must end on a 8 DW boundary */ + while ((ring->wptr & 7) != 2) + amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_NOP)); + + amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_INDIRECT) | + SDMA_PKT_INDIRECT_HEADER_VMID(vmid)); + /* base must be 32 byte aligned */ + amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr) & 0xffffffe0); + amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr)); + amdgpu_ring_write(ring, ib->length_dw); + amdgpu_ring_write(ring, 0); + amdgpu_ring_write(ring, 0); + +} + +/** + * sdma_v3_0_hdp_flush_ring_emit - emit an hdp flush on the DMA ring + * + * @ring: amdgpu ring pointer + * + * Emit an hdp flush packet on the requested DMA ring. + */ +static void sdma_v3_0_hdp_flush_ring_emit(struct amdgpu_ring *ring) +{ + u32 ref_and_mask = 0; + + if (ring == &ring->adev->sdma[0].ring) + ref_and_mask = REG_SET_FIELD(ref_and_mask, GPU_HDP_FLUSH_DONE, SDMA0, 1); + else + ref_and_mask = REG_SET_FIELD(ref_and_mask, GPU_HDP_FLUSH_DONE, SDMA1, 1); + + amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_POLL_REGMEM) | + SDMA_PKT_POLL_REGMEM_HEADER_HDP_FLUSH(1) | + SDMA_PKT_POLL_REGMEM_HEADER_FUNC(3)); /* == */ + amdgpu_ring_write(ring, mmGPU_HDP_FLUSH_DONE << 2); + amdgpu_ring_write(ring, mmGPU_HDP_FLUSH_REQ << 2); + amdgpu_ring_write(ring, ref_and_mask); /* reference */ + amdgpu_ring_write(ring, ref_and_mask); /* mask */ + amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) | + SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(10)); /* retry count, poll interval */ +} + +/** + * sdma_v3_0_ring_emit_fence - emit a fence on the DMA ring + * + * @ring: amdgpu ring pointer + * @fence: amdgpu fence object + * + * Add a DMA fence packet to the ring to write + * the fence seq number and DMA trap packet to generate + * an interrupt if needed (VI). + */ +static void sdma_v3_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq, + bool write64bits) +{ + /* write the fence */ + amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_FENCE)); + amdgpu_ring_write(ring, lower_32_bits(addr)); + amdgpu_ring_write(ring, upper_32_bits(addr)); + amdgpu_ring_write(ring, lower_32_bits(seq)); + + /* optionally write high bits as well */ + if (write64bits) { + addr += 4; + amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_FENCE)); + amdgpu_ring_write(ring, lower_32_bits(addr)); + amdgpu_ring_write(ring, upper_32_bits(addr)); + amdgpu_ring_write(ring, upper_32_bits(seq)); + } + + /* generate an interrupt */ + amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_TRAP)); + amdgpu_ring_write(ring, SDMA_PKT_TRAP_INT_CONTEXT_INT_CONTEXT(0)); +} + + +/** + * sdma_v3_0_ring_emit_semaphore - emit a semaphore on the dma ring + * + * @ring: amdgpu_ring structure holding ring information + * @semaphore: amdgpu semaphore object + * @emit_wait: wait or signal semaphore + * + * Add a DMA semaphore packet to the ring wait on or signal + * other rings (VI). + */ +static bool sdma_v3_0_ring_emit_semaphore(struct amdgpu_ring *ring, + struct amdgpu_semaphore *semaphore, + bool emit_wait) +{ + u64 addr = semaphore->gpu_addr; + u32 sig = emit_wait ? 0 : 1; + + amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_SEM) | + SDMA_PKT_SEMAPHORE_HEADER_SIGNAL(sig)); + amdgpu_ring_write(ring, lower_32_bits(addr) & 0xfffffff8); + amdgpu_ring_write(ring, upper_32_bits(addr)); + + return true; +} + +/** + * sdma_v3_0_gfx_stop - stop the gfx async dma engines + * + * @adev: amdgpu_device pointer + * + * Stop the gfx async dma ring buffers (VI). + */ +static void sdma_v3_0_gfx_stop(struct amdgpu_device *adev) +{ + struct amdgpu_ring *sdma0 = &adev->sdma[0].ring; + struct amdgpu_ring *sdma1 = &adev->sdma[1].ring; + u32 rb_cntl, ib_cntl; + int i; + + if ((adev->mman.buffer_funcs_ring == sdma0) || + (adev->mman.buffer_funcs_ring == sdma1)) + amdgpu_ttm_set_active_vram_size(adev, adev->mc.visible_vram_size); + + for (i = 0; i < SDMA_MAX_INSTANCE; i++) { + rb_cntl = RREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i]); + rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_ENABLE, 0); + WREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i], rb_cntl); + ib_cntl = RREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i]); + ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_ENABLE, 0); + WREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i], ib_cntl); + } + sdma0->ready = false; + sdma1->ready = false; +} + +/** + * sdma_v3_0_rlc_stop - stop the compute async dma engines + * + * @adev: amdgpu_device pointer + * + * Stop the compute async dma queues (VI). + */ +static void sdma_v3_0_rlc_stop(struct amdgpu_device *adev) +{ + /* XXX todo */ +} + +/** + * sdma_v3_0_enable - stop the async dma engines + * + * @adev: amdgpu_device pointer + * @enable: enable/disable the DMA MEs. + * + * Halt or unhalt the async dma engines (VI). + */ +static void sdma_v3_0_enable(struct amdgpu_device *adev, bool enable) +{ + u32 f32_cntl; + int i; + + if (enable == false) { + sdma_v3_0_gfx_stop(adev); + sdma_v3_0_rlc_stop(adev); + } + + for (i = 0; i < SDMA_MAX_INSTANCE; i++) { + f32_cntl = RREG32(mmSDMA0_F32_CNTL + sdma_offsets[i]); + if (enable) + f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_F32_CNTL, HALT, 0); + else + f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_F32_CNTL, HALT, 1); + WREG32(mmSDMA0_F32_CNTL + sdma_offsets[i], f32_cntl); + } +} + +/** + * sdma_v3_0_gfx_resume - setup and start the async dma engines + * + * @adev: amdgpu_device pointer + * + * Set up the gfx DMA ring buffers and enable them (VI). + * Returns 0 for success, error for failure. + */ +static int sdma_v3_0_gfx_resume(struct amdgpu_device *adev) +{ + struct amdgpu_ring *ring; + u32 rb_cntl, ib_cntl; + u32 rb_bufsz; + u32 wb_offset; + u32 doorbell; + int i, j, r; + + for (i = 0; i < SDMA_MAX_INSTANCE; i++) { + ring = &adev->sdma[i].ring; + wb_offset = (ring->rptr_offs * 4); + + mutex_lock(&adev->srbm_mutex); + for (j = 0; j < 16; j++) { + vi_srbm_select(adev, 0, 0, 0, j); + /* SDMA GFX */ + WREG32(mmSDMA0_GFX_VIRTUAL_ADDR + sdma_offsets[i], 0); + WREG32(mmSDMA0_GFX_APE1_CNTL + sdma_offsets[i], 0); + } + vi_srbm_select(adev, 0, 0, 0, 0); + mutex_unlock(&adev->srbm_mutex); + + WREG32(mmSDMA0_SEM_WAIT_FAIL_TIMER_CNTL + sdma_offsets[i], 0); + + /* Set ring buffer size in dwords */ + rb_bufsz = order_base_2(ring->ring_size / 4); + rb_cntl = RREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i]); + rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_SIZE, rb_bufsz); +#ifdef __BIG_ENDIAN + rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_SWAP_ENABLE, 1); + rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, + RPTR_WRITEBACK_SWAP_ENABLE, 1); +#endif + WREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i], rb_cntl); + + /* Initialize the ring buffer's read and write pointers */ + WREG32(mmSDMA0_GFX_RB_RPTR + sdma_offsets[i], 0); + WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[i], 0); + + /* set the wb address whether it's enabled or not */ + WREG32(mmSDMA0_GFX_RB_RPTR_ADDR_HI + sdma_offsets[i], + upper_32_bits(adev->wb.gpu_addr + wb_offset) & 0xFFFFFFFF); + WREG32(mmSDMA0_GFX_RB_RPTR_ADDR_LO + sdma_offsets[i], + lower_32_bits(adev->wb.gpu_addr + wb_offset) & 0xFFFFFFFC); + + rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RPTR_WRITEBACK_ENABLE, 1); + + WREG32(mmSDMA0_GFX_RB_BASE + sdma_offsets[i], ring->gpu_addr >> 8); + WREG32(mmSDMA0_GFX_RB_BASE_HI + sdma_offsets[i], ring->gpu_addr >> 40); + + ring->wptr = 0; + WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[i], ring->wptr << 2); + + doorbell = RREG32(mmSDMA0_GFX_DOORBELL + sdma_offsets[i]); + + if (ring->use_doorbell) { + doorbell = REG_SET_FIELD(doorbell, SDMA0_GFX_DOORBELL, + OFFSET, ring->doorbell_index); + doorbell = REG_SET_FIELD(doorbell, SDMA0_GFX_DOORBELL, ENABLE, 1); + } else { + doorbell = REG_SET_FIELD(doorbell, SDMA0_GFX_DOORBELL, ENABLE, 0); + } + WREG32(mmSDMA0_GFX_DOORBELL + sdma_offsets[i], doorbell); + + /* enable DMA RB */ + rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_ENABLE, 1); + WREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i], rb_cntl); + + ib_cntl = RREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i]); + ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_ENABLE, 1); +#ifdef __BIG_ENDIAN + ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_SWAP_ENABLE, 1); +#endif + /* enable DMA IBs */ + WREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i], ib_cntl); + + ring->ready = true; + + r = amdgpu_ring_test_ring(ring); + if (r) { + ring->ready = false; + return r; + } + + if (adev->mman.buffer_funcs_ring == ring) + amdgpu_ttm_set_active_vram_size(adev, adev->mc.real_vram_size); + } + + return 0; +} + +/** + * sdma_v3_0_rlc_resume - setup and start the async dma engines + * + * @adev: amdgpu_device pointer + * + * Set up the compute DMA queues and enable them (VI). + * Returns 0 for success, error for failure. + */ +static int sdma_v3_0_rlc_resume(struct amdgpu_device *adev) +{ + /* XXX todo */ + return 0; +} + +/** + * sdma_v3_0_load_microcode - load the sDMA ME ucode + * + * @adev: amdgpu_device pointer + * + * Loads the sDMA0/1 ucode. + * Returns 0 for success, -EINVAL if the ucode is not available. + */ +static int sdma_v3_0_load_microcode(struct amdgpu_device *adev) +{ + const struct sdma_firmware_header_v1_0 *hdr; + const __le32 *fw_data; + u32 fw_size; + int i, j; + + if (!adev->sdma[0].fw || !adev->sdma[1].fw) + return -EINVAL; + + /* halt the MEs */ + sdma_v3_0_enable(adev, false); + + for (i = 0; i < SDMA_MAX_INSTANCE; i++) { + hdr = (const struct sdma_firmware_header_v1_0 *)adev->sdma[i].fw->data; + amdgpu_ucode_print_sdma_hdr(&hdr->header); + fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4; + adev->sdma[i].fw_version = le32_to_cpu(hdr->header.ucode_version); + + fw_data = (const __le32 *) + (adev->sdma[i].fw->data + + le32_to_cpu(hdr->header.ucode_array_offset_bytes)); + WREG32(mmSDMA0_UCODE_ADDR + sdma_offsets[i], 0); + for (j = 0; j < fw_size; j++) + WREG32(mmSDMA0_UCODE_DATA + sdma_offsets[i], le32_to_cpup(fw_data++)); + WREG32(mmSDMA0_UCODE_ADDR + sdma_offsets[i], adev->sdma[i].fw_version); + } + + return 0; +} + +/** + * sdma_v3_0_start - setup and start the async dma engines + * + * @adev: amdgpu_device pointer + * + * Set up the DMA engines and enable them (VI). + * Returns 0 for success, error for failure. + */ +static int sdma_v3_0_start(struct amdgpu_device *adev) +{ + int r; + + if (!adev->firmware.smu_load) { + r = sdma_v3_0_load_microcode(adev); + if (r) + return r; + } else { + r = adev->smu.smumgr_funcs->check_fw_load_finish(adev, + AMDGPU_UCODE_ID_SDMA0); + if (r) + return -EINVAL; + r = adev->smu.smumgr_funcs->check_fw_load_finish(adev, + AMDGPU_UCODE_ID_SDMA1); + if (r) + return -EINVAL; + } + + /* unhalt the MEs */ + sdma_v3_0_enable(adev, true); + + /* start the gfx rings and rlc compute queues */ + r = sdma_v3_0_gfx_resume(adev); + if (r) + return r; + r = sdma_v3_0_rlc_resume(adev); + if (r) + return r; + + return 0; +} + +/** + * sdma_v3_0_ring_test_ring - simple async dma engine test + * + * @ring: amdgpu_ring structure holding ring information + * + * Test the DMA engine by writing using it to write an + * value to memory. (VI). + * Returns 0 for success, error for failure. + */ +static int sdma_v3_0_ring_test_ring(struct amdgpu_ring *ring) +{ + struct amdgpu_device *adev = ring->adev; + unsigned i; + unsigned index; + int r; + u32 tmp; + u64 gpu_addr; + + r = amdgpu_wb_get(adev, &index); + if (r) { + dev_err(adev->dev, "(%d) failed to allocate wb slot\n", r); + return r; + } + + gpu_addr = adev->wb.gpu_addr + (index * 4); + tmp = 0xCAFEDEAD; + adev->wb.wb[index] = cpu_to_le32(tmp); + + r = amdgpu_ring_lock(ring, 5); + if (r) { + DRM_ERROR("amdgpu: dma failed to lock ring %d (%d).\n", ring->idx, r); + amdgpu_wb_free(adev, index); + return r; + } + + amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) | + SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_WRITE_LINEAR)); + amdgpu_ring_write(ring, lower_32_bits(gpu_addr)); + amdgpu_ring_write(ring, upper_32_bits(gpu_addr)); + amdgpu_ring_write(ring, SDMA_PKT_WRITE_UNTILED_DW_3_COUNT(1)); + amdgpu_ring_write(ring, 0xDEADBEEF); + amdgpu_ring_unlock_commit(ring); + + for (i = 0; i < adev->usec_timeout; i++) { + tmp = le32_to_cpu(adev->wb.wb[index]); + if (tmp == 0xDEADBEEF) + break; + DRM_UDELAY(1); + } + + if (i < adev->usec_timeout) { + DRM_INFO("ring test on %d succeeded in %d usecs\n", ring->idx, i); + } else { + DRM_ERROR("amdgpu: ring %d test failed (0x%08X)\n", + ring->idx, tmp); + r = -EINVAL; + } + amdgpu_wb_free(adev, index); + + return r; +} + +/** + * sdma_v3_0_ring_test_ib - test an IB on the DMA engine + * + * @ring: amdgpu_ring structure holding ring information + * + * Test a simple IB in the DMA ring (VI). + * Returns 0 on success, error on failure. + */ +static int sdma_v3_0_ring_test_ib(struct amdgpu_ring *ring) +{ + struct amdgpu_device *adev = ring->adev; + struct amdgpu_ib ib; + unsigned i; + unsigned index; + int r; + u32 tmp = 0; + u64 gpu_addr; + + r = amdgpu_wb_get(adev, &index); + if (r) { + dev_err(adev->dev, "(%d) failed to allocate wb slot\n", r); + return r; + } + + gpu_addr = adev->wb.gpu_addr + (index * 4); + tmp = 0xCAFEDEAD; + adev->wb.wb[index] = cpu_to_le32(tmp); + + r = amdgpu_ib_get(ring, NULL, 256, &ib); + if (r) { + amdgpu_wb_free(adev, index); + DRM_ERROR("amdgpu: failed to get ib (%d).\n", r); + return r; + } + + ib.ptr[0] = SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) | + SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_WRITE_LINEAR); + ib.ptr[1] = lower_32_bits(gpu_addr); + ib.ptr[2] = upper_32_bits(gpu_addr); + ib.ptr[3] = SDMA_PKT_WRITE_UNTILED_DW_3_COUNT(1); + ib.ptr[4] = 0xDEADBEEF; + ib.ptr[5] = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP); + ib.ptr[6] = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP); + ib.ptr[7] = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP); + ib.length_dw = 8; + + r = amdgpu_ib_schedule(adev, 1, &ib, AMDGPU_FENCE_OWNER_UNDEFINED); + if (r) { + amdgpu_ib_free(adev, &ib); + amdgpu_wb_free(adev, index); + DRM_ERROR("amdgpu: failed to schedule ib (%d).\n", r); + return r; + } + r = amdgpu_fence_wait(ib.fence, false); + if (r) { + amdgpu_ib_free(adev, &ib); + amdgpu_wb_free(adev, index); + DRM_ERROR("amdgpu: fence wait failed (%d).\n", r); + return r; + } + for (i = 0; i < adev->usec_timeout; i++) { + tmp = le32_to_cpu(adev->wb.wb[index]); + if (tmp == 0xDEADBEEF) + break; + DRM_UDELAY(1); + } + if (i < adev->usec_timeout) { + DRM_INFO("ib test on ring %d succeeded in %u usecs\n", + ib.fence->ring->idx, i); + } else { + DRM_ERROR("amdgpu: ib test failed (0x%08X)\n", tmp); + r = -EINVAL; + } + amdgpu_ib_free(adev, &ib); + amdgpu_wb_free(adev, index); + return r; +} + +/** + * sdma_v3_0_vm_copy_pte - update PTEs by copying them from the GART + * + * @ib: indirect buffer to fill with commands + * @pe: addr of the page entry + * @src: src addr to copy from + * @count: number of page entries to update + * + * Update PTEs by copying them from the GART using sDMA (CIK). + */ +static void sdma_v3_0_vm_copy_pte(struct amdgpu_ib *ib, + uint64_t pe, uint64_t src, + unsigned count) +{ + while (count) { + unsigned bytes = count * 8; + if (bytes > 0x1FFFF8) + bytes = 0x1FFFF8; + + ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_COPY) | + SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR); + ib->ptr[ib->length_dw++] = bytes; + ib->ptr[ib->length_dw++] = 0; /* src/dst endian swap */ + ib->ptr[ib->length_dw++] = lower_32_bits(src); + ib->ptr[ib->length_dw++] = upper_32_bits(src); + ib->ptr[ib->length_dw++] = lower_32_bits(pe); + ib->ptr[ib->length_dw++] = upper_32_bits(pe); + + pe += bytes; + src += bytes; + count -= bytes / 8; + } +} + +/** + * sdma_v3_0_vm_write_pte - update PTEs by writing them manually + * + * @ib: indirect buffer to fill with commands + * @pe: addr of the page entry + * @addr: dst addr to write into pe + * @count: number of page entries to update + * @incr: increase next addr by incr bytes + * @flags: access flags + * + * Update PTEs by writing them manually using sDMA (CIK). + */ +static void sdma_v3_0_vm_write_pte(struct amdgpu_ib *ib, + uint64_t pe, + uint64_t addr, unsigned count, + uint32_t incr, uint32_t flags) +{ + uint64_t value; + unsigned ndw; + + while (count) { + ndw = count * 2; + if (ndw > 0xFFFFE) + ndw = 0xFFFFE; + + /* for non-physically contiguous pages (system) */ + ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) | + SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR); + ib->ptr[ib->length_dw++] = pe; + ib->ptr[ib->length_dw++] = upper_32_bits(pe); + ib->ptr[ib->length_dw++] = ndw; + for (; ndw > 0; ndw -= 2, --count, pe += 8) { + if (flags & AMDGPU_PTE_SYSTEM) { + value = amdgpu_vm_map_gart(ib->ring->adev, addr); + value &= 0xFFFFFFFFFFFFF000ULL; + } else if (flags & AMDGPU_PTE_VALID) { + value = addr; + } else { + value = 0; + } + addr += incr; + value |= flags; + ib->ptr[ib->length_dw++] = value; + ib->ptr[ib->length_dw++] = upper_32_bits(value); + } + } +} + +/** + * sdma_v3_0_vm_set_pte_pde - update the page tables using sDMA + * + * @ib: indirect buffer to fill with commands + * @pe: addr of the page entry + * @addr: dst addr to write into pe + * @count: number of page entries to update + * @incr: increase next addr by incr bytes + * @flags: access flags + * + * Update the page tables using sDMA (CIK). + */ +static void sdma_v3_0_vm_set_pte_pde(struct amdgpu_ib *ib, + uint64_t pe, + uint64_t addr, unsigned count, + uint32_t incr, uint32_t flags) +{ + uint64_t value; + unsigned ndw; + + while (count) { + ndw = count; + if (ndw > 0x7FFFF) + ndw = 0x7FFFF; + + if (flags & AMDGPU_PTE_VALID) + value = addr; + else + value = 0; + + /* for physically contiguous pages (vram) */ + ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_GEN_PTEPDE); + ib->ptr[ib->length_dw++] = pe; /* dst addr */ + ib->ptr[ib->length_dw++] = upper_32_bits(pe); + ib->ptr[ib->length_dw++] = flags; /* mask */ + ib->ptr[ib->length_dw++] = 0; + ib->ptr[ib->length_dw++] = value; /* value */ + ib->ptr[ib->length_dw++] = upper_32_bits(value); + ib->ptr[ib->length_dw++] = incr; /* increment size */ + ib->ptr[ib->length_dw++] = 0; + ib->ptr[ib->length_dw++] = ndw; /* number of entries */ + + pe += ndw * 8; + addr += ndw * incr; + count -= ndw; + } +} + +/** + * sdma_v3_0_vm_pad_ib - pad the IB to the required number of dw + * + * @ib: indirect buffer to fill with padding + * + */ +static void sdma_v3_0_vm_pad_ib(struct amdgpu_ib *ib) +{ + while (ib->length_dw & 0x7) + ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_NOP); +} + +/** + * sdma_v3_0_ring_emit_vm_flush - cik vm flush using sDMA + * + * @ring: amdgpu_ring pointer + * @vm: amdgpu_vm pointer + * + * Update the page table base and flush the VM TLB + * using sDMA (VI). + */ +static void sdma_v3_0_ring_emit_vm_flush(struct amdgpu_ring *ring, + unsigned vm_id, uint64_t pd_addr) +{ + u32 srbm_gfx_cntl = 0; + + amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_SRBM_WRITE) | + SDMA_PKT_SRBM_WRITE_HEADER_BYTE_EN(0xf)); + if (vm_id < 8) { + amdgpu_ring_write(ring, (mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + vm_id)); + } else { + amdgpu_ring_write(ring, (mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + vm_id - 8)); + } + amdgpu_ring_write(ring, pd_addr >> 12); + + /* update SH_MEM_* regs */ + srbm_gfx_cntl = REG_SET_FIELD(srbm_gfx_cntl, SRBM_GFX_CNTL, VMID, vm_id); + amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_SRBM_WRITE) | + SDMA_PKT_SRBM_WRITE_HEADER_BYTE_EN(0xf)); + amdgpu_ring_write(ring, mmSRBM_GFX_CNTL); + amdgpu_ring_write(ring, srbm_gfx_cntl); + + amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_SRBM_WRITE) | + SDMA_PKT_SRBM_WRITE_HEADER_BYTE_EN(0xf)); + amdgpu_ring_write(ring, mmSH_MEM_BASES); + amdgpu_ring_write(ring, 0); + + amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_SRBM_WRITE) | + SDMA_PKT_SRBM_WRITE_HEADER_BYTE_EN(0xf)); + amdgpu_ring_write(ring, mmSH_MEM_CONFIG); + amdgpu_ring_write(ring, 0); + + amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_SRBM_WRITE) | + SDMA_PKT_SRBM_WRITE_HEADER_BYTE_EN(0xf)); + amdgpu_ring_write(ring, mmSH_MEM_APE1_BASE); + amdgpu_ring_write(ring, 1); + + amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_SRBM_WRITE) | + SDMA_PKT_SRBM_WRITE_HEADER_BYTE_EN(0xf)); + amdgpu_ring_write(ring, mmSH_MEM_APE1_LIMIT); + amdgpu_ring_write(ring, 0); + + srbm_gfx_cntl = REG_SET_FIELD(srbm_gfx_cntl, SRBM_GFX_CNTL, VMID, 0); + amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_SRBM_WRITE) | + SDMA_PKT_SRBM_WRITE_HEADER_BYTE_EN(0xf)); + amdgpu_ring_write(ring, mmSRBM_GFX_CNTL); + amdgpu_ring_write(ring, srbm_gfx_cntl); + + + /* flush TLB */ + amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_SRBM_WRITE) | + SDMA_PKT_SRBM_WRITE_HEADER_BYTE_EN(0xf)); + amdgpu_ring_write(ring, mmVM_INVALIDATE_REQUEST); + amdgpu_ring_write(ring, 1 << vm_id); + + /* wait for flush */ + amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_POLL_REGMEM) | + SDMA_PKT_POLL_REGMEM_HEADER_HDP_FLUSH(0) | + SDMA_PKT_POLL_REGMEM_HEADER_FUNC(0)); /* always */ + amdgpu_ring_write(ring, mmVM_INVALIDATE_REQUEST << 2); + amdgpu_ring_write(ring, 0); + amdgpu_ring_write(ring, 0); /* reference */ + amdgpu_ring_write(ring, 0); /* mask */ + amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) | + SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(10)); /* retry count, poll interval */ +} + +static int sdma_v3_0_early_init(struct amdgpu_device *adev) +{ + sdma_v3_0_set_ring_funcs(adev); + sdma_v3_0_set_buffer_funcs(adev); + sdma_v3_0_set_vm_pte_funcs(adev); + sdma_v3_0_set_irq_funcs(adev); + + return 0; +} + +static int sdma_v3_0_sw_init(struct amdgpu_device *adev) +{ + struct amdgpu_ring *ring; + int r; + + /* SDMA trap event */ + r = amdgpu_irq_add_id(adev, 224, &adev->sdma_trap_irq); + if (r) + return r; + + /* SDMA Privileged inst */ + r = amdgpu_irq_add_id(adev, 241, &adev->sdma_illegal_inst_irq); + if (r) + return r; + + /* SDMA Privileged inst */ + r = amdgpu_irq_add_id(adev, 247, &adev->sdma_illegal_inst_irq); + if (r) + return r; + + r = sdma_v3_0_init_microcode(adev); + if (r) { + DRM_ERROR("Failed to load sdma firmware!\n"); + return r; + } + + ring = &adev->sdma[0].ring; + ring->ring_obj = NULL; + ring->use_doorbell = true; + ring->doorbell_index = AMDGPU_DOORBELL_sDMA_ENGINE0; + + ring = &adev->sdma[1].ring; + ring->ring_obj = NULL; + ring->use_doorbell = true; + ring->doorbell_index = AMDGPU_DOORBELL_sDMA_ENGINE1; + + ring = &adev->sdma[0].ring; + sprintf(ring->name, "sdma0"); + r = amdgpu_ring_init(adev, ring, 256 * 1024, + SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP), 0xf, + &adev->sdma_trap_irq, AMDGPU_SDMA_IRQ_TRAP0, + AMDGPU_RING_TYPE_SDMA); + if (r) + return r; + + ring = &adev->sdma[1].ring; + sprintf(ring->name, "sdma1"); + r = amdgpu_ring_init(adev, ring, 256 * 1024, + SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP), 0xf, + &adev->sdma_trap_irq, AMDGPU_SDMA_IRQ_TRAP1, + AMDGPU_RING_TYPE_SDMA); + if (r) + return r; + + return r; +} + +static int sdma_v3_0_sw_fini(struct amdgpu_device *adev) +{ + amdgpu_ring_fini(&adev->sdma[0].ring); + amdgpu_ring_fini(&adev->sdma[1].ring); + + return 0; +} + +static int sdma_v3_0_hw_init(struct amdgpu_device *adev) +{ + int r; + + sdma_v3_0_init_golden_registers(adev); + + r = sdma_v3_0_start(adev); + if (r) + return r; + + return r; +} + +static int sdma_v3_0_hw_fini(struct amdgpu_device *adev) +{ + sdma_v3_0_enable(adev, false); + + return 0; +} + +static int sdma_v3_0_suspend(struct amdgpu_device *adev) +{ + + return sdma_v3_0_hw_fini(adev); +} + +static int sdma_v3_0_resume(struct amdgpu_device *adev) +{ + + return sdma_v3_0_hw_init(adev); +} + +static bool sdma_v3_0_is_idle(struct amdgpu_device *adev) +{ + u32 tmp = RREG32(mmSRBM_STATUS2); + + if (tmp & (SRBM_STATUS2__SDMA_BUSY_MASK | + SRBM_STATUS2__SDMA1_BUSY_MASK)) + return false; + + return true; +} + +static int sdma_v3_0_wait_for_idle(struct amdgpu_device *adev) +{ + unsigned i; + u32 tmp; + + for (i = 0; i < adev->usec_timeout; i++) { + tmp = RREG32(mmSRBM_STATUS2) & (SRBM_STATUS2__SDMA_BUSY_MASK | + SRBM_STATUS2__SDMA1_BUSY_MASK); + + if (!tmp) + return 0; + udelay(1); + } + return -ETIMEDOUT; +} + +static void sdma_v3_0_print_status(struct amdgpu_device *adev) +{ + int i, j; + + dev_info(adev->dev, "VI SDMA registers\n"); + dev_info(adev->dev, " SRBM_STATUS2=0x%08X\n", + RREG32(mmSRBM_STATUS2)); + for (i = 0; i < SDMA_MAX_INSTANCE; i++) { + dev_info(adev->dev, " SDMA%d_STATUS_REG=0x%08X\n", + i, RREG32(mmSDMA0_STATUS_REG + sdma_offsets[i])); + dev_info(adev->dev, " SDMA%d_F32_CNTL=0x%08X\n", + i, RREG32(mmSDMA0_F32_CNTL + sdma_offsets[i])); + dev_info(adev->dev, " SDMA%d_CNTL=0x%08X\n", + i, RREG32(mmSDMA0_CNTL + sdma_offsets[i])); + dev_info(adev->dev, " SDMA%d_SEM_WAIT_FAIL_TIMER_CNTL=0x%08X\n", + i, RREG32(mmSDMA0_SEM_WAIT_FAIL_TIMER_CNTL + sdma_offsets[i])); + dev_info(adev->dev, " SDMA%d_GFX_IB_CNTL=0x%08X\n", + i, RREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i])); + dev_info(adev->dev, " SDMA%d_GFX_RB_CNTL=0x%08X\n", + i, RREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i])); + dev_info(adev->dev, " SDMA%d_GFX_RB_RPTR=0x%08X\n", + i, RREG32(mmSDMA0_GFX_RB_RPTR + sdma_offsets[i])); + dev_info(adev->dev, " SDMA%d_GFX_RB_WPTR=0x%08X\n", + i, RREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[i])); + dev_info(adev->dev, " SDMA%d_GFX_RB_RPTR_ADDR_HI=0x%08X\n", + i, RREG32(mmSDMA0_GFX_RB_RPTR_ADDR_HI + sdma_offsets[i])); + dev_info(adev->dev, " SDMA%d_GFX_RB_RPTR_ADDR_LO=0x%08X\n", + i, RREG32(mmSDMA0_GFX_RB_RPTR_ADDR_LO + sdma_offsets[i])); + dev_info(adev->dev, " SDMA%d_GFX_RB_BASE=0x%08X\n", + i, RREG32(mmSDMA0_GFX_RB_BASE + sdma_offsets[i])); + dev_info(adev->dev, " SDMA%d_GFX_RB_BASE_HI=0x%08X\n", + i, RREG32(mmSDMA0_GFX_RB_BASE_HI + sdma_offsets[i])); + dev_info(adev->dev, " SDMA%d_GFX_DOORBELL=0x%08X\n", + i, RREG32(mmSDMA0_GFX_DOORBELL + sdma_offsets[i])); + mutex_lock(&adev->srbm_mutex); + for (j = 0; j < 16; j++) { + vi_srbm_select(adev, 0, 0, 0, j); + dev_info(adev->dev, " VM %d:\n", j); + dev_info(adev->dev, " SDMA%d_GFX_VIRTUAL_ADDR=0x%08X\n", + i, RREG32(mmSDMA0_GFX_VIRTUAL_ADDR + sdma_offsets[i])); + dev_info(adev->dev, " SDMA%d_GFX_APE1_CNTL=0x%08X\n", + i, RREG32(mmSDMA0_GFX_APE1_CNTL + sdma_offsets[i])); + } + vi_srbm_select(adev, 0, 0, 0, 0); + mutex_unlock(&adev->srbm_mutex); + } +} + +static int sdma_v3_0_soft_reset(struct amdgpu_device *adev) +{ + u32 srbm_soft_reset = 0; + u32 tmp = RREG32(mmSRBM_STATUS2); + + if (tmp & SRBM_STATUS2__SDMA_BUSY_MASK) { + /* sdma0 */ + tmp = RREG32(mmSDMA0_F32_CNTL + SDMA0_REGISTER_OFFSET); + tmp = REG_SET_FIELD(tmp, SDMA0_F32_CNTL, HALT, 0); + WREG32(mmSDMA0_F32_CNTL + SDMA0_REGISTER_OFFSET, tmp); + srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_SDMA_MASK; + } + if (tmp & SRBM_STATUS2__SDMA1_BUSY_MASK) { + /* sdma1 */ + tmp = RREG32(mmSDMA0_F32_CNTL + SDMA1_REGISTER_OFFSET); + tmp = REG_SET_FIELD(tmp, SDMA0_F32_CNTL, HALT, 0); + WREG32(mmSDMA0_F32_CNTL + SDMA1_REGISTER_OFFSET, tmp); + srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_SDMA1_MASK; + } + + if (srbm_soft_reset) { + sdma_v3_0_print_status(adev); + + tmp = RREG32(mmSRBM_SOFT_RESET); + tmp |= srbm_soft_reset; + dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp); + WREG32(mmSRBM_SOFT_RESET, tmp); + tmp = RREG32(mmSRBM_SOFT_RESET); + + udelay(50); + + tmp &= ~srbm_soft_reset; + WREG32(mmSRBM_SOFT_RESET, tmp); + tmp = RREG32(mmSRBM_SOFT_RESET); + + /* Wait a little for things to settle down */ + udelay(50); + + sdma_v3_0_print_status(adev); + } + + return 0; +} + +static int sdma_v3_0_set_trap_irq_state(struct amdgpu_device *adev, + struct amdgpu_irq_src *source, + unsigned type, + enum amdgpu_interrupt_state state) +{ + u32 sdma_cntl; + + switch (type) { + case AMDGPU_SDMA_IRQ_TRAP0: + switch (state) { + case AMDGPU_IRQ_STATE_DISABLE: + sdma_cntl = RREG32(mmSDMA0_CNTL + SDMA0_REGISTER_OFFSET); + sdma_cntl = REG_SET_FIELD(sdma_cntl, SDMA0_CNTL, TRAP_ENABLE, 0); + WREG32(mmSDMA0_CNTL + SDMA0_REGISTER_OFFSET, sdma_cntl); + break; + case AMDGPU_IRQ_STATE_ENABLE: + sdma_cntl = RREG32(mmSDMA0_CNTL + SDMA0_REGISTER_OFFSET); + sdma_cntl = REG_SET_FIELD(sdma_cntl, SDMA0_CNTL, TRAP_ENABLE, 1); + WREG32(mmSDMA0_CNTL + SDMA0_REGISTER_OFFSET, sdma_cntl); + break; + default: + break; + } + break; + case AMDGPU_SDMA_IRQ_TRAP1: + switch (state) { + case AMDGPU_IRQ_STATE_DISABLE: + sdma_cntl = RREG32(mmSDMA0_CNTL + SDMA1_REGISTER_OFFSET); + sdma_cntl = REG_SET_FIELD(sdma_cntl, SDMA0_CNTL, TRAP_ENABLE, 0); + WREG32(mmSDMA0_CNTL + SDMA1_REGISTER_OFFSET, sdma_cntl); + break; + case AMDGPU_IRQ_STATE_ENABLE: + sdma_cntl = RREG32(mmSDMA0_CNTL + SDMA1_REGISTER_OFFSET); + sdma_cntl = REG_SET_FIELD(sdma_cntl, SDMA0_CNTL, TRAP_ENABLE, 1); + WREG32(mmSDMA0_CNTL + SDMA1_REGISTER_OFFSET, sdma_cntl); + break; + default: + break; + } + break; + default: + break; + } + return 0; +} + +static int sdma_v3_0_process_trap_irq(struct amdgpu_device *adev, + struct amdgpu_irq_src *source, + struct amdgpu_iv_entry *entry) +{ + u8 instance_id, queue_id; + + instance_id = (entry->ring_id & 0x3) >> 0; + queue_id = (entry->ring_id & 0xc) >> 2; + DRM_DEBUG("IH: SDMA trap\n"); + switch (instance_id) { + case 0: + switch (queue_id) { + case 0: + amdgpu_fence_process(&adev->sdma[0].ring); + break; + case 1: + /* XXX compute */ + break; + case 2: + /* XXX compute */ + break; + } + break; + case 1: + switch (queue_id) { + case 0: + amdgpu_fence_process(&adev->sdma[1].ring); + break; + case 1: + /* XXX compute */ + break; + case 2: + /* XXX compute */ + break; + } + break; + } + return 0; +} + +static int sdma_v3_0_process_illegal_inst_irq(struct amdgpu_device *adev, + struct amdgpu_irq_src *source, + struct amdgpu_iv_entry *entry) +{ + DRM_ERROR("Illegal instruction in SDMA command stream\n"); + schedule_work(&adev->reset_work); + return 0; +} + +static int sdma_v3_0_set_clockgating_state(struct amdgpu_device *adev, + enum amdgpu_clockgating_state state) +{ + /* XXX handled via the smc on VI */ + + return 0; +} + +static int sdma_v3_0_set_powergating_state(struct amdgpu_device *adev, + enum amdgpu_powergating_state state) +{ + return 0; +} + +const struct amdgpu_ip_funcs sdma_v3_0_ip_funcs = { + .early_init = sdma_v3_0_early_init, + .late_init = NULL, + .sw_init = sdma_v3_0_sw_init, + .sw_fini = sdma_v3_0_sw_fini, + .hw_init = sdma_v3_0_hw_init, + .hw_fini = sdma_v3_0_hw_fini, + .suspend = sdma_v3_0_suspend, + .resume = sdma_v3_0_resume, + .is_idle = sdma_v3_0_is_idle, + .wait_for_idle = sdma_v3_0_wait_for_idle, + .soft_reset = sdma_v3_0_soft_reset, + .print_status = sdma_v3_0_print_status, + .set_clockgating_state = sdma_v3_0_set_clockgating_state, + .set_powergating_state = sdma_v3_0_set_powergating_state, +}; + +/** + * sdma_v3_0_ring_is_lockup - Check if the DMA engine is locked up + * + * @ring: amdgpu_ring structure holding ring information + * + * Check if the async DMA engine is locked up (VI). + * Returns true if the engine appears to be locked up, false if not. + */ +static bool sdma_v3_0_ring_is_lockup(struct amdgpu_ring *ring) +{ + + if (sdma_v3_0_is_idle(ring->adev)) { + amdgpu_ring_lockup_update(ring); + return false; + } + return amdgpu_ring_test_lockup(ring); +} + +static const struct amdgpu_ring_funcs sdma_v3_0_ring_funcs = { + .get_rptr = sdma_v3_0_ring_get_rptr, + .get_wptr = sdma_v3_0_ring_get_wptr, + .set_wptr = sdma_v3_0_ring_set_wptr, + .parse_cs = NULL, + .emit_ib = sdma_v3_0_ring_emit_ib, + .emit_fence = sdma_v3_0_ring_emit_fence, + .emit_semaphore = sdma_v3_0_ring_emit_semaphore, + .emit_vm_flush = sdma_v3_0_ring_emit_vm_flush, + .test_ring = sdma_v3_0_ring_test_ring, + .test_ib = sdma_v3_0_ring_test_ib, + .is_lockup = sdma_v3_0_ring_is_lockup, +}; + +static void sdma_v3_0_set_ring_funcs(struct amdgpu_device *adev) +{ + adev->sdma[0].ring.funcs = &sdma_v3_0_ring_funcs; + adev->sdma[1].ring.funcs = &sdma_v3_0_ring_funcs; +} + +static const struct amdgpu_irq_src_funcs sdma_v3_0_trap_irq_funcs = { + .set = sdma_v3_0_set_trap_irq_state, + .process = sdma_v3_0_process_trap_irq, +}; + +static const struct amdgpu_irq_src_funcs sdma_v3_0_illegal_inst_irq_funcs = { + .process = sdma_v3_0_process_illegal_inst_irq, +}; + +static void sdma_v3_0_set_irq_funcs(struct amdgpu_device *adev) +{ + adev->sdma_trap_irq.num_types = AMDGPU_SDMA_IRQ_LAST; + adev->sdma_trap_irq.funcs = &sdma_v3_0_trap_irq_funcs; + adev->sdma_illegal_inst_irq.funcs = &sdma_v3_0_illegal_inst_irq_funcs; +} + +/** + * sdma_v3_0_emit_copy_buffer - copy buffer using the sDMA engine + * + * @ring: amdgpu_ring structure holding ring information + * @src_offset: src GPU address + * @dst_offset: dst GPU address + * @byte_count: number of bytes to xfer + * + * Copy GPU buffers using the DMA engine (VI). + * Used by the amdgpu ttm implementation to move pages if + * registered as the asic copy callback. + */ +static void sdma_v3_0_emit_copy_buffer(struct amdgpu_ring *ring, + uint64_t src_offset, + uint64_t dst_offset, + uint32_t byte_count) +{ + amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_COPY) | + SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR)); + amdgpu_ring_write(ring, byte_count); + amdgpu_ring_write(ring, 0); /* src/dst endian swap */ + amdgpu_ring_write(ring, lower_32_bits(src_offset)); + amdgpu_ring_write(ring, upper_32_bits(src_offset)); + amdgpu_ring_write(ring, lower_32_bits(dst_offset)); + amdgpu_ring_write(ring, upper_32_bits(dst_offset)); +} + +/** + * sdma_v3_0_emit_fill_buffer - fill buffer using the sDMA engine + * + * @ring: amdgpu_ring structure holding ring information + * @src_data: value to write to buffer + * @dst_offset: dst GPU address + * @byte_count: number of bytes to xfer + * + * Fill GPU buffers using the DMA engine (VI). + */ +static void sdma_v3_0_emit_fill_buffer(struct amdgpu_ring *ring, + uint32_t src_data, + uint64_t dst_offset, + uint32_t byte_count) +{ + amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_CONST_FILL)); + amdgpu_ring_write(ring, lower_32_bits(dst_offset)); + amdgpu_ring_write(ring, upper_32_bits(dst_offset)); + amdgpu_ring_write(ring, src_data); + amdgpu_ring_write(ring, byte_count); +} + +static const struct amdgpu_buffer_funcs sdma_v3_0_buffer_funcs = { + .copy_max_bytes = 0x1fffff, + .copy_num_dw = 7, + .emit_copy_buffer = sdma_v3_0_emit_copy_buffer, + + .fill_max_bytes = 0x1fffff, + .fill_num_dw = 5, + .emit_fill_buffer = sdma_v3_0_emit_fill_buffer, +}; + +static void sdma_v3_0_set_buffer_funcs(struct amdgpu_device *adev) +{ + if (adev->mman.buffer_funcs == NULL) { + adev->mman.buffer_funcs = &sdma_v3_0_buffer_funcs; + adev->mman.buffer_funcs_ring = &adev->sdma[0].ring; + } +} + +static const struct amdgpu_vm_pte_funcs sdma_v3_0_vm_pte_funcs = { + .copy_pte = sdma_v3_0_vm_copy_pte, + .write_pte = sdma_v3_0_vm_write_pte, + .set_pte_pde = sdma_v3_0_vm_set_pte_pde, + .pad_ib = sdma_v3_0_vm_pad_ib, +}; + +static void sdma_v3_0_set_vm_pte_funcs(struct amdgpu_device *adev) +{ + if (adev->vm_manager.vm_pte_funcs == NULL) { + adev->vm_manager.vm_pte_funcs = &sdma_v3_0_vm_pte_funcs; + adev->vm_manager.vm_pte_funcs_ring = &adev->sdma[0].ring; + } +} diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.h b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.h new file mode 100644 index 000000000000..85bf2ac59252 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.h @@ -0,0 +1,29 @@ +/* + * Copyright 2014 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef __SDMA_V3_0_H__ +#define __SDMA_V3_0_H__ + +extern const struct amdgpu_ip_funcs sdma_v3_0_ip_funcs; + +#endif diff --git a/drivers/gpu/drm/amd/amdgpu/smu8.h b/drivers/gpu/drm/amd/amdgpu/smu8.h new file mode 100644 index 000000000000..d758d07b6a31 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/smu8.h @@ -0,0 +1,72 @@ +/* + * Copyright 2014 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef SMU8_H +#define SMU8_H + +#pragma pack(push, 1) + +#define ENABLE_DEBUG_FEATURES + +struct SMU8_Firmware_Header { + uint32_t Version; + uint32_t ImageSize; + uint32_t CodeSize; + uint32_t HeaderSize; + uint32_t EntryPoint; + uint32_t Rtos; + uint32_t UcodeLoadStatus; + uint32_t DpmTable; + uint32_t FanTable; + uint32_t PmFuseTable; + uint32_t Globals; + uint32_t Reserved[20]; + uint32_t Signature; +}; + +struct SMU8_MultimediaPowerLogData { + uint32_t avgTotalPower; + uint32_t avgGpuPower; + uint32_t avgUvdPower; + uint32_t avgVcePower; + + uint32_t avgSclk; + uint32_t avgDclk; + uint32_t avgVclk; + uint32_t avgEclk; + + uint32_t startTimeHi; + uint32_t startTimeLo; + + uint32_t endTimeHi; + uint32_t endTimeLo; +}; + +#define SMU8_FIRMWARE_HEADER_LOCATION 0x1FF80 +#define SMU8_UNBCSR_START_ADDR 0xC0100000 + +#define SMN_MP1_SRAM_START_ADDR 0x10000000 + +#pragma pack(pop) + +#endif diff --git a/drivers/gpu/drm/amd/amdgpu/smu8_fusion.h b/drivers/gpu/drm/amd/amdgpu/smu8_fusion.h new file mode 100644 index 000000000000..5c9cc3c0bbfa --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/smu8_fusion.h @@ -0,0 +1,127 @@ +/* + * Copyright 2014 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef SMU8_FUSION_H +#define SMU8_FUSION_H + +#include "smu8.h" + +#pragma pack(push, 1) + +#define SMU8_MAX_CUS 2 +#define SMU8_PSMS_PER_CU 4 +#define SMU8_CACS_PER_CU 4 + +struct SMU8_GfxCuPgScoreboard { + uint8_t Enabled; + uint8_t spare[3]; +}; + +struct SMU8_Port80MonitorTable { + uint32_t MmioAddress; + uint32_t MemoryBaseHi; + uint32_t MemoryBaseLo; + uint16_t MemoryBufferSize; + uint16_t MemoryPosition; + uint16_t PollingInterval; + uint8_t EnableCsrShadow; + uint8_t EnableDramShadow; +}; + +/* Clock Table Definitions */ +#define NUM_SCLK_LEVELS 8 +#define NUM_LCLK_LEVELS 8 +#define NUM_UVD_LEVELS 8 +#define NUM_ECLK_LEVELS 8 +#define NUM_ACLK_LEVELS 8 + +struct SMU8_Fusion_ClkLevel { + uint8_t GnbVid; + uint8_t GfxVid; + uint8_t DfsDid; + uint8_t DeepSleepDid; + uint32_t DfsBypass; + uint32_t Frequency; +}; + +struct SMU8_Fusion_SclkBreakdownTable { + struct SMU8_Fusion_ClkLevel ClkLevel[NUM_SCLK_LEVELS]; + struct SMU8_Fusion_ClkLevel DpmOffLevel; + /* SMU8_Fusion_ClkLevel PwrOffLevel; */ + uint32_t SclkValidMask; + uint32_t MaxSclkIndex; +}; + +struct SMU8_Fusion_LclkBreakdownTable { + struct SMU8_Fusion_ClkLevel ClkLevel[NUM_LCLK_LEVELS]; + struct SMU8_Fusion_ClkLevel DpmOffLevel; + /* SMU8_Fusion_ClkLevel PwrOffLevel; */ + uint32_t LclkValidMask; + uint32_t MaxLclkIndex; +}; + +struct SMU8_Fusion_EclkBreakdownTable { + struct SMU8_Fusion_ClkLevel ClkLevel[NUM_ECLK_LEVELS]; + struct SMU8_Fusion_ClkLevel DpmOffLevel; + struct SMU8_Fusion_ClkLevel PwrOffLevel; + uint32_t EclkValidMask; + uint32_t MaxEclkIndex; +}; + +struct SMU8_Fusion_VclkBreakdownTable { + struct SMU8_Fusion_ClkLevel ClkLevel[NUM_UVD_LEVELS]; + struct SMU8_Fusion_ClkLevel DpmOffLevel; + struct SMU8_Fusion_ClkLevel PwrOffLevel; + uint32_t VclkValidMask; + uint32_t MaxVclkIndex; +}; + +struct SMU8_Fusion_DclkBreakdownTable { + struct SMU8_Fusion_ClkLevel ClkLevel[NUM_UVD_LEVELS]; + struct SMU8_Fusion_ClkLevel DpmOffLevel; + struct SMU8_Fusion_ClkLevel PwrOffLevel; + uint32_t DclkValidMask; + uint32_t MaxDclkIndex; +}; + +struct SMU8_Fusion_AclkBreakdownTable { + struct SMU8_Fusion_ClkLevel ClkLevel[NUM_ACLK_LEVELS]; + struct SMU8_Fusion_ClkLevel DpmOffLevel; + struct SMU8_Fusion_ClkLevel PwrOffLevel; + uint32_t AclkValidMask; + uint32_t MaxAclkIndex; +}; + + +struct SMU8_Fusion_ClkTable { + struct SMU8_Fusion_SclkBreakdownTable SclkBreakdownTable; + struct SMU8_Fusion_LclkBreakdownTable LclkBreakdownTable; + struct SMU8_Fusion_EclkBreakdownTable EclkBreakdownTable; + struct SMU8_Fusion_VclkBreakdownTable VclkBreakdownTable; + struct SMU8_Fusion_DclkBreakdownTable DclkBreakdownTable; + struct SMU8_Fusion_AclkBreakdownTable AclkBreakdownTable; +}; + +#pragma pack(pop) + +#endif diff --git a/drivers/gpu/drm/amd/amdgpu/smu_ucode_xfer_cz.h b/drivers/gpu/drm/amd/amdgpu/smu_ucode_xfer_cz.h new file mode 100644 index 000000000000..f8ba071f39c8 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/smu_ucode_xfer_cz.h @@ -0,0 +1,147 @@ +// CZ Ucode Loading Definitions +#ifndef SMU_UCODE_XFER_CZ_H +#define SMU_UCODE_XFER_CZ_H + +#define NUM_JOBLIST_ENTRIES 32 + +#define TASK_TYPE_NO_ACTION 0 +#define TASK_TYPE_UCODE_LOAD 1 +#define TASK_TYPE_UCODE_SAVE 2 +#define TASK_TYPE_REG_LOAD 3 +#define TASK_TYPE_REG_SAVE 4 +#define TASK_TYPE_INITIALIZE 5 + +#define TASK_ARG_REG_SMCIND 0 +#define TASK_ARG_REG_MMIO 1 +#define TASK_ARG_REG_FCH 2 +#define TASK_ARG_REG_UNB 3 + +#define TASK_ARG_INIT_MM_PWR_LOG 0 +#define TASK_ARG_INIT_CLK_TABLE 1 + +#define JOB_GFX_SAVE 0 +#define JOB_GFX_RESTORE 1 +#define JOB_FCH_SAVE 2 +#define JOB_FCH_RESTORE 3 +#define JOB_UNB_SAVE 4 +#define JOB_UNB_RESTORE 5 +#define JOB_GMC_SAVE 6 +#define JOB_GMC_RESTORE 7 +#define JOB_GNB_SAVE 8 +#define JOB_GNB_RESTORE 9 + +#define IGNORE_JOB 0xff +#define END_OF_TASK_LIST (uint16_t)0xffff + +// Size of DRAM regions (in bytes) requested by SMU: +#define SMU_DRAM_REQ_MM_PWR_LOG 48 + +#define UCODE_ID_SDMA0 0 +#define UCODE_ID_SDMA1 1 +#define UCODE_ID_CP_CE 2 +#define UCODE_ID_CP_PFP 3 +#define UCODE_ID_CP_ME 4 +#define UCODE_ID_CP_MEC_JT1 5 +#define UCODE_ID_CP_MEC_JT2 6 +#define UCODE_ID_GMCON_RENG 7 +#define UCODE_ID_RLC_G 8 +#define UCODE_ID_RLC_SCRATCH 9 +#define UCODE_ID_RLC_SRM_ARAM 10 +#define UCODE_ID_RLC_SRM_DRAM 11 +#define UCODE_ID_DMCU_ERAM 12 +#define UCODE_ID_DMCU_IRAM 13 + +#define UCODE_ID_SDMA0_MASK 0x00000001 +#define UCODE_ID_SDMA1_MASK 0x00000002 +#define UCODE_ID_CP_CE_MASK 0x00000004 +#define UCODE_ID_CP_PFP_MASK 0x00000008 +#define UCODE_ID_CP_ME_MASK 0x00000010 +#define UCODE_ID_CP_MEC_JT1_MASK 0x00000020 +#define UCODE_ID_CP_MEC_JT2_MASK 0x00000040 +#define UCODE_ID_GMCON_RENG_MASK 0x00000080 +#define UCODE_ID_RLC_G_MASK 0x00000100 +#define UCODE_ID_RLC_SCRATCH_MASK 0x00000200 +#define UCODE_ID_RLC_SRM_ARAM_MASK 0x00000400 +#define UCODE_ID_RLC_SRM_DRAM_MASK 0x00000800 +#define UCODE_ID_DMCU_ERAM_MASK 0x00001000 +#define UCODE_ID_DMCU_IRAM_MASK 0x00002000 + +#define UCODE_ID_SDMA0_SIZE_BYTE 10368 +#define UCODE_ID_SDMA1_SIZE_BYTE 10368 +#define UCODE_ID_CP_CE_SIZE_BYTE 8576 +#define UCODE_ID_CP_PFP_SIZE_BYTE 16768 +#define UCODE_ID_CP_ME_SIZE_BYTE 16768 +#define UCODE_ID_CP_MEC_JT1_SIZE_BYTE 384 +#define UCODE_ID_CP_MEC_JT2_SIZE_BYTE 384 +#define UCODE_ID_GMCON_RENG_SIZE_BYTE 4096 +#define UCODE_ID_RLC_G_SIZE_BYTE 2048 +#define UCODE_ID_RLC_SCRATCH_SIZE_BYTE 132 +#define UCODE_ID_RLC_SRM_ARAM_SIZE_BYTE 8192 +#define UCODE_ID_RLC_SRM_DRAM_SIZE_BYTE 4096 +#define UCODE_ID_DMCU_ERAM_SIZE_BYTE 24576 +#define UCODE_ID_DMCU_IRAM_SIZE_BYTE 1024 + +#define NUM_UCODES 14 + +typedef struct { + uint32_t high; + uint32_t low; +} data_64_t; + +struct SMU_Task { + uint8_t type; + uint8_t arg; + uint16_t next; + data_64_t addr; + uint32_t size_bytes; +}; +typedef struct SMU_Task SMU_Task; + +struct TOC { + uint8_t JobList[NUM_JOBLIST_ENTRIES]; + SMU_Task tasks[1]; +}; + +// META DATA COMMAND Definitions +#define METADATA_CMD_MODE0 0x00000103 +#define METADATA_CMD_MODE1 0x00000113 +#define METADATA_CMD_MODE2 0x00000123 +#define METADATA_CMD_MODE3 0x00000133 +#define METADATA_CMD_DELAY 0x00000203 +#define METADATA_CMD_CHNG_REGSPACE 0x00000303 +#define METADATA_PERFORM_ON_SAVE 0x00001000 +#define METADATA_PERFORM_ON_LOAD 0x00002000 +#define METADATA_CMD_ARG_MASK 0xFFFF0000 +#define METADATA_CMD_ARG_SHIFT 16 + +// Simple register addr/data fields +struct SMU_MetaData_Mode0 { + uint32_t register_address; + uint32_t register_data; +}; +typedef struct SMU_MetaData_Mode0 SMU_MetaData_Mode0; + +// Register addr/data with mask +struct SMU_MetaData_Mode1 { + uint32_t register_address; + uint32_t register_mask; + uint32_t register_data; +}; +typedef struct SMU_MetaData_Mode1 SMU_MetaData_Mode1; + +struct SMU_MetaData_Mode2 { + uint32_t register_address; + uint32_t register_mask; + uint32_t target_value; +}; +typedef struct SMU_MetaData_Mode2 SMU_MetaData_Mode2; + +// Always write data (even on a save operation) +struct SMU_MetaData_Mode3 { + uint32_t register_address; + uint32_t register_mask; + uint32_t register_data; +}; +typedef struct SMU_MetaData_Mode3 SMU_MetaData_Mode3; + +#endif diff --git a/drivers/gpu/drm/amd/amdgpu/smu_ucode_xfer_vi.h b/drivers/gpu/drm/amd/amdgpu/smu_ucode_xfer_vi.h new file mode 100644 index 000000000000..c24a81eebc7c --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/smu_ucode_xfer_vi.h @@ -0,0 +1,100 @@ +/* + * Copyright 2014 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef SMU_UCODE_XFER_VI_H +#define SMU_UCODE_XFER_VI_H + +#define SMU_DRAMData_TOC_VERSION 1 +#define MAX_IH_REGISTER_COUNT 65535 +#define SMU_DIGEST_SIZE_BYTES 20 +#define SMU_FB_SIZE_BYTES 1048576 +#define SMU_MAX_ENTRIES 12 + +#define UCODE_ID_SMU 0 +#define UCODE_ID_SDMA0 1 +#define UCODE_ID_SDMA1 2 +#define UCODE_ID_CP_CE 3 +#define UCODE_ID_CP_PFP 4 +#define UCODE_ID_CP_ME 5 +#define UCODE_ID_CP_MEC 6 +#define UCODE_ID_CP_MEC_JT1 7 +#define UCODE_ID_CP_MEC_JT2 8 +#define UCODE_ID_GMCON_RENG 9 +#define UCODE_ID_RLC_G 10 +#define UCODE_ID_IH_REG_RESTORE 11 +#define UCODE_ID_VBIOS 12 +#define UCODE_ID_MISC_METADATA 13 +#define UCODE_ID_RLC_SCRATCH 32 +#define UCODE_ID_RLC_SRM_ARAM 33 +#define UCODE_ID_RLC_SRM_DRAM 34 +#define UCODE_ID_MEC_STORAGE 35 +#define UCODE_ID_VBIOS_PARAMETERS 36 +#define UCODE_META_DATA 0xFF + +#define UCODE_ID_SMU_MASK 0x00000001 +#define UCODE_ID_SDMA0_MASK 0x00000002 +#define UCODE_ID_SDMA1_MASK 0x00000004 +#define UCODE_ID_CP_CE_MASK 0x00000008 +#define UCODE_ID_CP_PFP_MASK 0x00000010 +#define UCODE_ID_CP_ME_MASK 0x00000020 +#define UCODE_ID_CP_MEC_MASK 0x00000040 +#define UCODE_ID_CP_MEC_JT1_MASK 0x00000080 +#define UCODE_ID_CP_MEC_JT2_MASK 0x00000100 +#define UCODE_ID_GMCON_RENG_MASK 0x00000200 +#define UCODE_ID_RLC_G_MASK 0x00000400 +#define UCODE_ID_IH_REG_RESTORE_MASK 0x00000800 +#define UCODE_ID_VBIOS_MASK 0x00001000 + +#define UCODE_FLAG_UNHALT_MASK 0x1 + +struct SMU_Entry { +#ifndef __BIG_ENDIAN + uint16_t id; + uint16_t version; + uint32_t image_addr_high; + uint32_t image_addr_low; + uint32_t meta_data_addr_high; + uint32_t meta_data_addr_low; + uint32_t data_size_byte; + uint16_t flags; + uint16_t num_register_entries; +#else + uint16_t version; + uint16_t id; + uint32_t image_addr_high; + uint32_t image_addr_low; + uint32_t meta_data_addr_high; + uint32_t meta_data_addr_low; + uint32_t data_size_byte; + uint16_t num_register_entries; + uint16_t flags; +#endif +}; + +struct SMU_DRAMData_TOC { + uint32_t structure_version; + uint32_t num_entries; + struct SMU_Entry entry[SMU_MAX_ENTRIES]; +}; + +#endif diff --git a/drivers/gpu/drm/amd/amdgpu/tonga_dpm.c b/drivers/gpu/drm/amd/amdgpu/tonga_dpm.c new file mode 100644 index 000000000000..98bd707ac5dc --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/tonga_dpm.c @@ -0,0 +1,172 @@ +/* + * Copyright 2014 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#include +#include "drmP.h" +#include "amdgpu.h" +#include "tonga_smumgr.h" + +MODULE_FIRMWARE("radeon/tonga_smc.bin"); + +static void tonga_dpm_set_funcs(struct amdgpu_device *adev); + +static int tonga_dpm_early_init(struct amdgpu_device *adev) +{ + tonga_dpm_set_funcs(adev); + + return 0; +} + +static int tonga_dpm_init_microcode(struct amdgpu_device *adev) +{ + char fw_name[30] = "radeon/tonga_smc.bin"; + int err; + + err = request_firmware(&adev->pm.fw, fw_name, adev->dev); + if (err) + goto out; + err = amdgpu_ucode_validate(adev->pm.fw); + +out: + if (err) { + DRM_ERROR("Failed to load firmware \"%s\"", fw_name); + release_firmware(adev->pm.fw); + adev->pm.fw = NULL; + } + return err; +} + +static int tonga_dpm_sw_init(struct amdgpu_device *adev) +{ + int ret; + + ret = tonga_dpm_init_microcode(adev); + if (ret) + return ret; + + return 0; +} + +static int tonga_dpm_sw_fini(struct amdgpu_device *adev) +{ + return 0; +} + +static int tonga_dpm_hw_init(struct amdgpu_device *adev) +{ + int ret; + + mutex_lock(&adev->pm.mutex); + + ret = tonga_smu_init(adev); + if (ret) { + DRM_ERROR("SMU initialization failed\n"); + goto fail; + } + + ret = tonga_smu_start(adev); + if (ret) { + DRM_ERROR("SMU start failed\n"); + goto fail; + } + + mutex_unlock(&adev->pm.mutex); + return 0; + +fail: + adev->firmware.smu_load = false; + mutex_unlock(&adev->pm.mutex); + return -EINVAL; +} + +static int tonga_dpm_hw_fini(struct amdgpu_device *adev) +{ + mutex_lock(&adev->pm.mutex); + tonga_smu_fini(adev); + mutex_unlock(&adev->pm.mutex); + return 0; +} + +static int tonga_dpm_suspend(struct amdgpu_device *adev) +{ + tonga_dpm_hw_fini(adev); + + return 0; +} + +static int tonga_dpm_resume(struct amdgpu_device *adev) +{ + tonga_dpm_hw_init(adev); + + return 0; +} + +static int tonga_dpm_set_clockgating_state(struct amdgpu_device *adev, + enum amdgpu_clockgating_state state) +{ + return 0; +} + +static int tonga_dpm_set_powergating_state(struct amdgpu_device *adev, + enum amdgpu_powergating_state state) +{ + return 0; +} + +const struct amdgpu_ip_funcs tonga_dpm_ip_funcs = { + .early_init = tonga_dpm_early_init, + .late_init = NULL, + .sw_init = tonga_dpm_sw_init, + .sw_fini = tonga_dpm_sw_fini, + .hw_init = tonga_dpm_hw_init, + .hw_fini = tonga_dpm_hw_fini, + .suspend = tonga_dpm_suspend, + .resume = tonga_dpm_resume, + .is_idle = NULL, + .wait_for_idle = NULL, + .soft_reset = NULL, + .print_status = NULL, + .set_clockgating_state = tonga_dpm_set_clockgating_state, + .set_powergating_state = tonga_dpm_set_powergating_state, +}; + +static const struct amdgpu_dpm_funcs tonga_dpm_funcs = { + .get_temperature = NULL, + .pre_set_power_state = NULL, + .set_power_state = NULL, + .post_set_power_state = NULL, + .display_configuration_changed = NULL, + .get_sclk = NULL, + .get_mclk = NULL, + .print_power_state = NULL, + .debugfs_print_current_performance_level = NULL, + .force_performance_level = NULL, + .vblank_too_short = NULL, + .powergate_uvd = NULL, +}; + +static void tonga_dpm_set_funcs(struct amdgpu_device *adev) +{ + if (NULL == adev->pm.funcs) + adev->pm.funcs = &tonga_dpm_funcs; +} diff --git a/drivers/gpu/drm/amd/amdgpu/tonga_ih.c b/drivers/gpu/drm/amd/amdgpu/tonga_ih.c new file mode 100644 index 000000000000..cff1b8bce6a4 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/tonga_ih.c @@ -0,0 +1,458 @@ +/* + * Copyright 2014 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#include "drmP.h" +#include "amdgpu.h" +#include "amdgpu_ih.h" +#include "vid.h" + +#include "oss/oss_3_0_d.h" +#include "oss/oss_3_0_sh_mask.h" + +#include "bif/bif_5_1_d.h" +#include "bif/bif_5_1_sh_mask.h" + +/* + * Interrupts + * Starting with r6xx, interrupts are handled via a ring buffer. + * Ring buffers are areas of GPU accessible memory that the GPU + * writes interrupt vectors into and the host reads vectors out of. + * There is a rptr (read pointer) that determines where the + * host is currently reading, and a wptr (write pointer) + * which determines where the GPU has written. When the + * pointers are equal, the ring is idle. When the GPU + * writes vectors to the ring buffer, it increments the + * wptr. When there is an interrupt, the host then starts + * fetching commands and processing them until the pointers are + * equal again at which point it updates the rptr. + */ + +static void tonga_ih_set_interrupt_funcs(struct amdgpu_device *adev); + +/** + * tonga_ih_enable_interrupts - Enable the interrupt ring buffer + * + * @adev: amdgpu_device pointer + * + * Enable the interrupt ring buffer (VI). + */ +static void tonga_ih_enable_interrupts(struct amdgpu_device *adev) +{ + u32 ih_rb_cntl = RREG32(mmIH_RB_CNTL); + + ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, RB_ENABLE, 1); + ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, ENABLE_INTR, 1); + WREG32(mmIH_RB_CNTL, ih_rb_cntl); + adev->irq.ih.enabled = true; +} + +/** + * tonga_ih_disable_interrupts - Disable the interrupt ring buffer + * + * @adev: amdgpu_device pointer + * + * Disable the interrupt ring buffer (VI). + */ +static void tonga_ih_disable_interrupts(struct amdgpu_device *adev) +{ + u32 ih_rb_cntl = RREG32(mmIH_RB_CNTL); + + ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, RB_ENABLE, 0); + ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, ENABLE_INTR, 0); + WREG32(mmIH_RB_CNTL, ih_rb_cntl); + /* set rptr, wptr to 0 */ + WREG32(mmIH_RB_RPTR, 0); + WREG32(mmIH_RB_WPTR, 0); + adev->irq.ih.enabled = false; + adev->irq.ih.rptr = 0; +} + +/** + * tonga_ih_irq_init - init and enable the interrupt ring + * + * @adev: amdgpu_device pointer + * + * Allocate a ring buffer for the interrupt controller, + * enable the RLC, disable interrupts, enable the IH + * ring buffer and enable it (VI). + * Called at device load and reume. + * Returns 0 for success, errors for failure. + */ +static int tonga_ih_irq_init(struct amdgpu_device *adev) +{ + int ret = 0; + int rb_bufsz; + u32 interrupt_cntl, ih_rb_cntl, ih_doorbell_rtpr; + u64 wptr_off; + + /* disable irqs */ + tonga_ih_disable_interrupts(adev); + + /* setup interrupt control */ + WREG32(mmINTERRUPT_CNTL2, adev->dummy_page.addr >> 8); + interrupt_cntl = RREG32(mmINTERRUPT_CNTL); + /* INTERRUPT_CNTL__IH_DUMMY_RD_OVERRIDE_MASK=0 - dummy read disabled with msi, enabled without msi + * INTERRUPT_CNTL__IH_DUMMY_RD_OVERRIDE_MASK=1 - dummy read controlled by IH_DUMMY_RD_EN + */ + interrupt_cntl = REG_SET_FIELD(interrupt_cntl, INTERRUPT_CNTL, IH_DUMMY_RD_OVERRIDE, 0); + /* INTERRUPT_CNTL__IH_REQ_NONSNOOP_EN_MASK=1 if ring is in non-cacheable memory, e.g., vram */ + interrupt_cntl = REG_SET_FIELD(interrupt_cntl, INTERRUPT_CNTL, IH_REQ_NONSNOOP_EN, 0); + WREG32(mmINTERRUPT_CNTL, interrupt_cntl); + + /* Ring Buffer base. [39:8] of 40-bit address of the beginning of the ring buffer*/ + if (adev->irq.ih.use_bus_addr) + WREG32(mmIH_RB_BASE, adev->irq.ih.rb_dma_addr >> 8); + else + WREG32(mmIH_RB_BASE, adev->irq.ih.gpu_addr >> 8); + + rb_bufsz = order_base_2(adev->irq.ih.ring_size / 4); + ih_rb_cntl = REG_SET_FIELD(0, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 1); + ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, RB_SIZE, rb_bufsz); + /* Ring Buffer write pointer writeback. If enabled, IH_RB_WPTR register value is written to memory */ + ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, WPTR_WRITEBACK_ENABLE, 1); + ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, MC_VMID, 0); + + if (adev->irq.msi_enabled) + ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, RPTR_REARM, 1); + + WREG32(mmIH_RB_CNTL, ih_rb_cntl); + + /* set the writeback address whether it's enabled or not */ + if (adev->irq.ih.use_bus_addr) + wptr_off = adev->irq.ih.rb_dma_addr + (adev->irq.ih.wptr_offs * 4); + else + wptr_off = adev->wb.gpu_addr + (adev->irq.ih.wptr_offs * 4); + WREG32(mmIH_RB_WPTR_ADDR_LO, lower_32_bits(wptr_off)); + WREG32(mmIH_RB_WPTR_ADDR_HI, upper_32_bits(wptr_off) & 0xFF); + + /* set rptr, wptr to 0 */ + WREG32(mmIH_RB_RPTR, 0); + WREG32(mmIH_RB_WPTR, 0); + + ih_doorbell_rtpr = RREG32(mmIH_DOORBELL_RPTR); + if (adev->irq.ih.use_doorbell) { + ih_doorbell_rtpr = REG_SET_FIELD(ih_doorbell_rtpr, IH_DOORBELL_RPTR, + OFFSET, adev->irq.ih.doorbell_index); + ih_doorbell_rtpr = REG_SET_FIELD(ih_doorbell_rtpr, IH_DOORBELL_RPTR, + ENABLE, 1); + } else { + ih_doorbell_rtpr = REG_SET_FIELD(ih_doorbell_rtpr, IH_DOORBELL_RPTR, + ENABLE, 0); + } + WREG32(mmIH_DOORBELL_RPTR, ih_doorbell_rtpr); + + pci_set_master(adev->pdev); + + /* enable interrupts */ + tonga_ih_enable_interrupts(adev); + + return ret; +} + +/** + * tonga_ih_irq_disable - disable interrupts + * + * @adev: amdgpu_device pointer + * + * Disable interrupts on the hw (VI). + */ +static void tonga_ih_irq_disable(struct amdgpu_device *adev) +{ + tonga_ih_disable_interrupts(adev); + + /* Wait and acknowledge irq */ + mdelay(1); +} + +/** + * tonga_ih_get_wptr - get the IH ring buffer wptr + * + * @adev: amdgpu_device pointer + * + * Get the IH ring buffer wptr from either the register + * or the writeback memory buffer (VI). Also check for + * ring buffer overflow and deal with it. + * Used by cz_irq_process(VI). + * Returns the value of the wptr. + */ +static u32 tonga_ih_get_wptr(struct amdgpu_device *adev) +{ + u32 wptr, tmp; + + if (adev->irq.ih.use_bus_addr) + wptr = le32_to_cpu(adev->irq.ih.ring[adev->irq.ih.wptr_offs]); + else + wptr = le32_to_cpu(adev->wb.wb[adev->irq.ih.wptr_offs]); + + if (REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW)) { + wptr = REG_SET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW, 0); + /* When a ring buffer overflow happen start parsing interrupt + * from the last not overwritten vector (wptr + 16). Hopefully + * this should allow us to catchup. + */ + dev_warn(adev->dev, "IH ring buffer overflow (0x%08X, 0x%08X, 0x%08X)\n", + wptr, adev->irq.ih.rptr, (wptr + 16) & adev->irq.ih.ptr_mask); + adev->irq.ih.rptr = (wptr + 16) & adev->irq.ih.ptr_mask; + tmp = RREG32(mmIH_RB_CNTL); + tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 1); + WREG32(mmIH_RB_CNTL, tmp); + } + return (wptr & adev->irq.ih.ptr_mask); +} + +/** + * tonga_ih_decode_iv - decode an interrupt vector + * + * @adev: amdgpu_device pointer + * + * Decodes the interrupt vector at the current rptr + * position and also advance the position. + */ +static void tonga_ih_decode_iv(struct amdgpu_device *adev, + struct amdgpu_iv_entry *entry) +{ + /* wptr/rptr are in bytes! */ + u32 ring_index = adev->irq.ih.rptr >> 2; + uint32_t dw[4]; + + dw[0] = le32_to_cpu(adev->irq.ih.ring[ring_index + 0]); + dw[1] = le32_to_cpu(adev->irq.ih.ring[ring_index + 1]); + dw[2] = le32_to_cpu(adev->irq.ih.ring[ring_index + 2]); + dw[3] = le32_to_cpu(adev->irq.ih.ring[ring_index + 3]); + + entry->src_id = dw[0] & 0xff; + entry->src_data = dw[1] & 0xfffffff; + entry->ring_id = dw[2] & 0xff; + entry->vm_id = (dw[2] >> 8) & 0xff; + entry->pas_id = (dw[2] >> 16) & 0xffff; + + /* wptr/rptr are in bytes! */ + adev->irq.ih.rptr += 16; +} + +/** + * tonga_ih_set_rptr - set the IH ring buffer rptr + * + * @adev: amdgpu_device pointer + * + * Set the IH ring buffer rptr. + */ +static void tonga_ih_set_rptr(struct amdgpu_device *adev) +{ + if (adev->irq.ih.use_doorbell) { + /* XXX check if swapping is necessary on BE */ + if (adev->irq.ih.use_bus_addr) + adev->irq.ih.ring[adev->irq.ih.rptr_offs] = adev->irq.ih.rptr; + else + adev->wb.wb[adev->irq.ih.rptr_offs] = adev->irq.ih.rptr; + WDOORBELL32(adev->irq.ih.doorbell_index, adev->irq.ih.rptr); + } else { + WREG32(mmIH_RB_RPTR, adev->irq.ih.rptr); + } +} + +static int tonga_ih_early_init(struct amdgpu_device *adev) +{ + tonga_ih_set_interrupt_funcs(adev); + return 0; +} + +static int tonga_ih_sw_init(struct amdgpu_device *adev) +{ + int r; + + r = amdgpu_ih_ring_init(adev, 4 * 1024, true); + if (r) + return r; + + adev->irq.ih.use_doorbell = true; + adev->irq.ih.doorbell_index = AMDGPU_DOORBELL_IH; + + r = amdgpu_irq_init(adev); + + return r; +} + +static int tonga_ih_sw_fini(struct amdgpu_device *adev) +{ + amdgpu_irq_fini(adev); + amdgpu_ih_ring_fini(adev); + + return 0; +} + +static int tonga_ih_hw_init(struct amdgpu_device *adev) +{ + int r; + + r = tonga_ih_irq_init(adev); + if (r) + return r; + + return 0; +} + +static int tonga_ih_hw_fini(struct amdgpu_device *adev) +{ + tonga_ih_irq_disable(adev); + + return 0; +} + +static int tonga_ih_suspend(struct amdgpu_device *adev) +{ + return tonga_ih_hw_fini(adev); +} + +static int tonga_ih_resume(struct amdgpu_device *adev) +{ + return tonga_ih_hw_init(adev); +} + +static bool tonga_ih_is_idle(struct amdgpu_device *adev) +{ + u32 tmp = RREG32(mmSRBM_STATUS); + + if (REG_GET_FIELD(tmp, SRBM_STATUS, IH_BUSY)) + return false; + + return true; +} + +static int tonga_ih_wait_for_idle(struct amdgpu_device *adev) +{ + unsigned i; + u32 tmp; + + for (i = 0; i < adev->usec_timeout; i++) { + /* read MC_STATUS */ + tmp = RREG32(mmSRBM_STATUS); + if (!REG_GET_FIELD(tmp, SRBM_STATUS, IH_BUSY)) + return 0; + udelay(1); + } + return -ETIMEDOUT; +} + +static void tonga_ih_print_status(struct amdgpu_device *adev) +{ + dev_info(adev->dev, "TONGA IH registers\n"); + dev_info(adev->dev, " SRBM_STATUS=0x%08X\n", + RREG32(mmSRBM_STATUS)); + dev_info(adev->dev, " SRBM_STATUS2=0x%08X\n", + RREG32(mmSRBM_STATUS2)); + dev_info(adev->dev, " INTERRUPT_CNTL=0x%08X\n", + RREG32(mmINTERRUPT_CNTL)); + dev_info(adev->dev, " INTERRUPT_CNTL2=0x%08X\n", + RREG32(mmINTERRUPT_CNTL2)); + dev_info(adev->dev, " IH_CNTL=0x%08X\n", + RREG32(mmIH_CNTL)); + dev_info(adev->dev, " IH_RB_CNTL=0x%08X\n", + RREG32(mmIH_RB_CNTL)); + dev_info(adev->dev, " IH_RB_BASE=0x%08X\n", + RREG32(mmIH_RB_BASE)); + dev_info(adev->dev, " IH_RB_WPTR_ADDR_LO=0x%08X\n", + RREG32(mmIH_RB_WPTR_ADDR_LO)); + dev_info(adev->dev, " IH_RB_WPTR_ADDR_HI=0x%08X\n", + RREG32(mmIH_RB_WPTR_ADDR_HI)); + dev_info(adev->dev, " IH_RB_RPTR=0x%08X\n", + RREG32(mmIH_RB_RPTR)); + dev_info(adev->dev, " IH_RB_WPTR=0x%08X\n", + RREG32(mmIH_RB_WPTR)); +} + +static int tonga_ih_soft_reset(struct amdgpu_device *adev) +{ + u32 srbm_soft_reset = 0; + u32 tmp = RREG32(mmSRBM_STATUS); + + if (tmp & SRBM_STATUS__IH_BUSY_MASK) + srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, + SOFT_RESET_IH, 1); + + if (srbm_soft_reset) { + tonga_ih_print_status(adev); + + tmp = RREG32(mmSRBM_SOFT_RESET); + tmp |= srbm_soft_reset; + dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp); + WREG32(mmSRBM_SOFT_RESET, tmp); + tmp = RREG32(mmSRBM_SOFT_RESET); + + udelay(50); + + tmp &= ~srbm_soft_reset; + WREG32(mmSRBM_SOFT_RESET, tmp); + tmp = RREG32(mmSRBM_SOFT_RESET); + + /* Wait a little for things to settle down */ + udelay(50); + + tonga_ih_print_status(adev); + } + + return 0; +} + +static int tonga_ih_set_clockgating_state(struct amdgpu_device *adev, + enum amdgpu_clockgating_state state) +{ + // TODO + return 0; +} + +static int tonga_ih_set_powergating_state(struct amdgpu_device *adev, + enum amdgpu_powergating_state state) +{ + // TODO + return 0; +} + +const struct amdgpu_ip_funcs tonga_ih_ip_funcs = { + .early_init = tonga_ih_early_init, + .late_init = NULL, + .sw_init = tonga_ih_sw_init, + .sw_fini = tonga_ih_sw_fini, + .hw_init = tonga_ih_hw_init, + .hw_fini = tonga_ih_hw_fini, + .suspend = tonga_ih_suspend, + .resume = tonga_ih_resume, + .is_idle = tonga_ih_is_idle, + .wait_for_idle = tonga_ih_wait_for_idle, + .soft_reset = tonga_ih_soft_reset, + .print_status = tonga_ih_print_status, + .set_clockgating_state = tonga_ih_set_clockgating_state, + .set_powergating_state = tonga_ih_set_powergating_state, +}; + +static const struct amdgpu_ih_funcs tonga_ih_funcs = { + .get_wptr = tonga_ih_get_wptr, + .decode_iv = tonga_ih_decode_iv, + .set_rptr = tonga_ih_set_rptr +}; + +static void tonga_ih_set_interrupt_funcs(struct amdgpu_device *adev) +{ + if (adev->irq.ih_funcs == NULL) + adev->irq.ih_funcs = &tonga_ih_funcs; +} + diff --git a/drivers/gpu/drm/amd/amdgpu/tonga_ih.h b/drivers/gpu/drm/amd/amdgpu/tonga_ih.h new file mode 100644 index 000000000000..7c9bae87124e --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/tonga_ih.h @@ -0,0 +1,29 @@ +/* + * Copyright 2014 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef __TONGA_IH_H__ +#define __TONGA_IH_H__ + +extern const struct amdgpu_ip_funcs tonga_ih_ip_funcs; + +#endif /* __CZ_IH_H__ */ diff --git a/drivers/gpu/drm/amd/amdgpu/tonga_ppsmc.h b/drivers/gpu/drm/amd/amdgpu/tonga_ppsmc.h new file mode 100644 index 000000000000..811781f69482 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/tonga_ppsmc.h @@ -0,0 +1,198 @@ +/* + * Copyright 2014 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef TONGA_PP_SMC_H +#define TONGA_PP_SMC_H + +#pragma pack(push, 1) + +#define PPSMC_SWSTATE_FLAG_DC 0x01 +#define PPSMC_SWSTATE_FLAG_UVD 0x02 +#define PPSMC_SWSTATE_FLAG_VCE 0x04 +#define PPSMC_SWSTATE_FLAG_PCIE_X1 0x08 + +#define PPSMC_THERMAL_PROTECT_TYPE_INTERNAL 0x00 +#define PPSMC_THERMAL_PROTECT_TYPE_EXTERNAL 0x01 +#define PPSMC_THERMAL_PROTECT_TYPE_NONE 0xff + +#define PPSMC_SYSTEMFLAG_GPIO_DC 0x01 +#define PPSMC_SYSTEMFLAG_STEPVDDC 0x02 +#define PPSMC_SYSTEMFLAG_GDDR5 0x04 + +#define PPSMC_SYSTEMFLAG_DISABLE_BABYSTEP 0x08 + +#define PPSMC_SYSTEMFLAG_REGULATOR_HOT 0x10 +#define PPSMC_SYSTEMFLAG_REGULATOR_HOT_ANALOG 0x20 +#define PPSMC_SYSTEMFLAG_12CHANNEL 0x40 + +#define PPSMC_EXTRAFLAGS_AC2DC_ACTION_MASK 0x07 +#define PPSMC_EXTRAFLAGS_AC2DC_DONT_WAIT_FOR_VBLANK 0x08 + +#define PPSMC_EXTRAFLAGS_AC2DC_ACTION_GOTODPMLOWSTATE 0x00 +#define PPSMC_EXTRAFLAGS_AC2DC_ACTION_GOTOINITIALSTATE 0x01 + +#define PPSMC_EXTRAFLAGS_AC2DC_GPIO5_POLARITY_HIGH 0x10 +#define PPSMC_EXTRAFLAGS_DRIVER_TO_GPIO17 0x20 +#define PPSMC_EXTRAFLAGS_PCC_TO_GPIO17 0x40 + +#define PPSMC_DPM2FLAGS_TDPCLMP 0x01 +#define PPSMC_DPM2FLAGS_PWRSHFT 0x02 +#define PPSMC_DPM2FLAGS_OCP 0x04 + +#define PPSMC_DISPLAY_WATERMARK_LOW 0 +#define PPSMC_DISPLAY_WATERMARK_HIGH 1 + +#define PPSMC_STATEFLAG_AUTO_PULSE_SKIP 0x01 +#define PPSMC_STATEFLAG_POWERBOOST 0x02 +#define PPSMC_STATEFLAG_PSKIP_ON_TDP_FAULT 0x04 +#define PPSMC_STATEFLAG_POWERSHIFT 0x08 +#define PPSMC_STATEFLAG_SLOW_READ_MARGIN 0x10 +#define PPSMC_STATEFLAG_DEEPSLEEP_THROTTLE 0x20 +#define PPSMC_STATEFLAG_DEEPSLEEP_BYPASS 0x40 + +#define FDO_MODE_HARDWARE 0 +#define FDO_MODE_PIECE_WISE_LINEAR 1 + +enum FAN_CONTROL { + FAN_CONTROL_FUZZY, + FAN_CONTROL_TABLE +}; + +#define PPSMC_Result_OK ((uint16_t)0x01) +#define PPSMC_Result_NoMore ((uint16_t)0x02) +#define PPSMC_Result_NotNow ((uint16_t)0x03) +#define PPSMC_Result_Failed ((uint16_t)0xFF) +#define PPSMC_Result_UnknownCmd ((uint16_t)0xFE) +#define PPSMC_Result_UnknownVT ((uint16_t)0xFD) + +typedef uint16_t PPSMC_Result; + +#define PPSMC_isERROR(x) ((uint16_t)0x80 & (x)) + +#define PPSMC_MSG_Halt ((uint16_t)0x10) +#define PPSMC_MSG_Resume ((uint16_t)0x11) +#define PPSMC_MSG_EnableDPMLevel ((uint16_t)0x12) +#define PPSMC_MSG_ZeroLevelsDisabled ((uint16_t)0x13) +#define PPSMC_MSG_OneLevelsDisabled ((uint16_t)0x14) +#define PPSMC_MSG_TwoLevelsDisabled ((uint16_t)0x15) +#define PPSMC_MSG_EnableThermalInterrupt ((uint16_t)0x16) +#define PPSMC_MSG_RunningOnAC ((uint16_t)0x17) +#define PPSMC_MSG_LevelUp ((uint16_t)0x18) +#define PPSMC_MSG_LevelDown ((uint16_t)0x19) +#define PPSMC_MSG_ResetDPMCounters ((uint16_t)0x1a) +#define PPSMC_MSG_SwitchToSwState ((uint16_t)0x20) +#define PPSMC_MSG_SwitchToSwStateLast ((uint16_t)0x3f) +#define PPSMC_MSG_SwitchToInitialState ((uint16_t)0x40) +#define PPSMC_MSG_NoForcedLevel ((uint16_t)0x41) +#define PPSMC_MSG_ForceHigh ((uint16_t)0x42) +#define PPSMC_MSG_ForceMediumOrHigh ((uint16_t)0x43) +#define PPSMC_MSG_SwitchToMinimumPower ((uint16_t)0x51) +#define PPSMC_MSG_ResumeFromMinimumPower ((uint16_t)0x52) +#define PPSMC_MSG_EnableCac ((uint16_t)0x53) +#define PPSMC_MSG_DisableCac ((uint16_t)0x54) +#define PPSMC_DPMStateHistoryStart ((uint16_t)0x55) +#define PPSMC_DPMStateHistoryStop ((uint16_t)0x56) +#define PPSMC_CACHistoryStart ((uint16_t)0x57) +#define PPSMC_CACHistoryStop ((uint16_t)0x58) +#define PPSMC_TDPClampingActive ((uint16_t)0x59) +#define PPSMC_TDPClampingInactive ((uint16_t)0x5A) +#define PPSMC_StartFanControl ((uint16_t)0x5B) +#define PPSMC_StopFanControl ((uint16_t)0x5C) +#define PPSMC_NoDisplay ((uint16_t)0x5D) +#define PPSMC_HasDisplay ((uint16_t)0x5E) +#define PPSMC_MSG_UVDPowerOFF ((uint16_t)0x60) +#define PPSMC_MSG_UVDPowerON ((uint16_t)0x61) +#define PPSMC_MSG_EnableULV ((uint16_t)0x62) +#define PPSMC_MSG_DisableULV ((uint16_t)0x63) +#define PPSMC_MSG_EnterULV ((uint16_t)0x64) +#define PPSMC_MSG_ExitULV ((uint16_t)0x65) +#define PPSMC_PowerShiftActive ((uint16_t)0x6A) +#define PPSMC_PowerShiftInactive ((uint16_t)0x6B) +#define PPSMC_OCPActive ((uint16_t)0x6C) +#define PPSMC_OCPInactive ((uint16_t)0x6D) +#define PPSMC_CACLongTermAvgEnable ((uint16_t)0x6E) +#define PPSMC_CACLongTermAvgDisable ((uint16_t)0x6F) +#define PPSMC_MSG_InferredStateSweep_Start ((uint16_t)0x70) +#define PPSMC_MSG_InferredStateSweep_Stop ((uint16_t)0x71) +#define PPSMC_MSG_SwitchToLowestInfState ((uint16_t)0x72) +#define PPSMC_MSG_SwitchToNonInfState ((uint16_t)0x73) +#define PPSMC_MSG_AllStateSweep_Start ((uint16_t)0x74) +#define PPSMC_MSG_AllStateSweep_Stop ((uint16_t)0x75) +#define PPSMC_MSG_SwitchNextLowerInfState ((uint16_t)0x76) +#define PPSMC_MSG_SwitchNextHigherInfState ((uint16_t)0x77) +#define PPSMC_MSG_MclkRetrainingTest ((uint16_t)0x78) +#define PPSMC_MSG_ForceTDPClamping ((uint16_t)0x79) +#define PPSMC_MSG_CollectCAC_PowerCorreln ((uint16_t)0x7A) +#define PPSMC_MSG_CollectCAC_WeightCalib ((uint16_t)0x7B) +#define PPSMC_MSG_CollectCAC_SQonly ((uint16_t)0x7C) +#define PPSMC_MSG_CollectCAC_TemperaturePwr ((uint16_t)0x7D) +#define PPSMC_MSG_ExtremitiesTest_Start ((uint16_t)0x7E) +#define PPSMC_MSG_ExtremitiesTest_Stop ((uint16_t)0x7F) +#define PPSMC_FlushDataCache ((uint16_t)0x80) +#define PPSMC_FlushInstrCache ((uint16_t)0x81) +#define PPSMC_MSG_SetEnabledLevels ((uint16_t)0x82) +#define PPSMC_MSG_SetForcedLevels ((uint16_t)0x83) +#define PPSMC_MSG_ResetToDefaults ((uint16_t)0x84) +#define PPSMC_MSG_SetForcedLevelsAndJump ((uint16_t)0x85) +#define PPSMC_MSG_SetCACHistoryMode ((uint16_t)0x86) +#define PPSMC_MSG_EnableDTE ((uint16_t)0x87) +#define PPSMC_MSG_DisableDTE ((uint16_t)0x88) +#define PPSMC_MSG_SmcSpaceSetAddress ((uint16_t)0x89) +#define PPSMC_MSG_SmcSpaceWriteDWordInc ((uint16_t)0x8A) +#define PPSMC_MSG_SmcSpaceWriteWordInc ((uint16_t)0x8B) +#define PPSMC_MSG_SmcSpaceWriteByteInc ((uint16_t)0x8C) +#define PPSMC_MSG_ChangeNearTDPLimit ((uint16_t)0x90) +#define PPSMC_MSG_ChangeSafePowerLimit ((uint16_t)0x91) +#define PPSMC_MSG_DPMStateSweepStart ((uint16_t)0x92) +#define PPSMC_MSG_DPMStateSweepStop ((uint16_t)0x93) +#define PPSMC_MSG_OVRDDisableSCLKDS ((uint16_t)0x94) +#define PPSMC_MSG_CancelDisableOVRDSCLKDS ((uint16_t)0x95) +#define PPSMC_MSG_ThrottleOVRDSCLKDS ((uint16_t)0x96) +#define PPSMC_MSG_CancelThrottleOVRDSCLKDS ((uint16_t)0x97) +#define PPSMC_MSG_GPIO17 ((uint16_t)0x98) +#define PPSMC_MSG_API_SetSvi2Volt_Vddc ((uint16_t)0x99) +#define PPSMC_MSG_API_SetSvi2Volt_Vddci ((uint16_t)0x9A) +#define PPSMC_MSG_API_SetSvi2Volt_Mvdd ((uint16_t)0x9B) +#define PPSMC_MSG_API_GetSvi2Volt_Vddc ((uint16_t)0x9C) +#define PPSMC_MSG_API_GetSvi2Volt_Vddci ((uint16_t)0x9D) +#define PPSMC_MSG_API_GetSvi2Volt_Mvdd ((uint16_t)0x9E) + +#define PPSMC_MSG_BREAK ((uint16_t)0xF8) + +#define PPSMC_MSG_Test ((uint16_t)0x100) +#define PPSMC_MSG_DRV_DRAM_ADDR_HI ((uint16_t)0x250) +#define PPSMC_MSG_DRV_DRAM_ADDR_LO ((uint16_t)0x251) +#define PPSMC_MSG_SMU_DRAM_ADDR_HI ((uint16_t)0x252) +#define PPSMC_MSG_SMU_DRAM_ADDR_LO ((uint16_t)0x253) +#define PPSMC_MSG_LoadUcodes ((uint16_t)0x254) + +typedef uint16_t PPSMC_Msg; + +#define PPSMC_EVENT_STATUS_THERMAL 0x00000001 +#define PPSMC_EVENT_STATUS_REGULATORHOT 0x00000002 +#define PPSMC_EVENT_STATUS_DC 0x00000004 +#define PPSMC_EVENT_STATUS_GPIO17 0x00000008 + +#pragma pack(pop) + +#endif diff --git a/drivers/gpu/drm/amd/amdgpu/tonga_sdma_pkt_open.h b/drivers/gpu/drm/amd/amdgpu/tonga_sdma_pkt_open.h new file mode 100644 index 000000000000..099b7b56113c --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/tonga_sdma_pkt_open.h @@ -0,0 +1,2240 @@ +/* + * Copyright (C) 2014 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN + * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef __TONGA_SDMA_PKT_OPEN_H_ +#define __TONGA_SDMA_PKT_OPEN_H_ + +#define SDMA_OP_NOP 0 +#define SDMA_OP_COPY 1 +#define SDMA_OP_WRITE 2 +#define SDMA_OP_INDIRECT 4 +#define SDMA_OP_FENCE 5 +#define SDMA_OP_TRAP 6 +#define SDMA_OP_SEM 7 +#define SDMA_OP_POLL_REGMEM 8 +#define SDMA_OP_COND_EXE 9 +#define SDMA_OP_ATOMIC 10 +#define SDMA_OP_CONST_FILL 11 +#define SDMA_OP_GEN_PTEPDE 12 +#define SDMA_OP_TIMESTAMP 13 +#define SDMA_OP_SRBM_WRITE 14 +#define SDMA_OP_PRE_EXE 15 +#define SDMA_SUBOP_TIMESTAMP_SET 0 +#define SDMA_SUBOP_TIMESTAMP_GET 1 +#define SDMA_SUBOP_TIMESTAMP_GET_GLOBAL 2 +#define SDMA_SUBOP_COPY_LINEAR 0 +#define SDMA_SUBOP_COPY_LINEAR_SUB_WIND 4 +#define SDMA_SUBOP_COPY_TILED 1 +#define SDMA_SUBOP_COPY_TILED_SUB_WIND 5 +#define SDMA_SUBOP_COPY_T2T_SUB_WIND 6 +#define SDMA_SUBOP_COPY_SOA 3 +#define SDMA_SUBOP_WRITE_LINEAR 0 +#define SDMA_SUBOP_WRITE_TILED 1 + +/*define for op field*/ +#define SDMA_PKT_HEADER_op_offset 0 +#define SDMA_PKT_HEADER_op_mask 0x000000FF +#define SDMA_PKT_HEADER_op_shift 0 +#define SDMA_PKT_HEADER_OP(x) (((x) & SDMA_PKT_HEADER_op_mask) << SDMA_PKT_HEADER_op_shift) + +/*define for sub_op field*/ +#define SDMA_PKT_HEADER_sub_op_offset 0 +#define SDMA_PKT_HEADER_sub_op_mask 0x000000FF +#define SDMA_PKT_HEADER_sub_op_shift 8 +#define SDMA_PKT_HEADER_SUB_OP(x) (((x) & SDMA_PKT_HEADER_sub_op_mask) << SDMA_PKT_HEADER_sub_op_shift) + +/* +** Definitions for SDMA_PKT_COPY_LINEAR packet +*/ + +/*define for HEADER word*/ +/*define for op field*/ +#define SDMA_PKT_COPY_LINEAR_HEADER_op_offset 0 +#define SDMA_PKT_COPY_LINEAR_HEADER_op_mask 0x000000FF +#define SDMA_PKT_COPY_LINEAR_HEADER_op_shift 0 +#define SDMA_PKT_COPY_LINEAR_HEADER_OP(x) (((x) & SDMA_PKT_COPY_LINEAR_HEADER_op_mask) << SDMA_PKT_COPY_LINEAR_HEADER_op_shift) + +/*define for sub_op field*/ +#define SDMA_PKT_COPY_LINEAR_HEADER_sub_op_offset 0 +#define SDMA_PKT_COPY_LINEAR_HEADER_sub_op_mask 0x000000FF +#define SDMA_PKT_COPY_LINEAR_HEADER_sub_op_shift 8 +#define SDMA_PKT_COPY_LINEAR_HEADER_SUB_OP(x) (((x) & SDMA_PKT_COPY_LINEAR_HEADER_sub_op_mask) << SDMA_PKT_COPY_LINEAR_HEADER_sub_op_shift) + +/*define for broadcast field*/ +#define SDMA_PKT_COPY_LINEAR_HEADER_broadcast_offset 0 +#define SDMA_PKT_COPY_LINEAR_HEADER_broadcast_mask 0x00000001 +#define SDMA_PKT_COPY_LINEAR_HEADER_broadcast_shift 27 +#define SDMA_PKT_COPY_LINEAR_HEADER_BROADCAST(x) (((x) & SDMA_PKT_COPY_LINEAR_HEADER_broadcast_mask) << SDMA_PKT_COPY_LINEAR_HEADER_broadcast_shift) + +/*define for COUNT word*/ +/*define for count field*/ +#define SDMA_PKT_COPY_LINEAR_COUNT_count_offset 1 +#define SDMA_PKT_COPY_LINEAR_COUNT_count_mask 0x003FFFFF +#define SDMA_PKT_COPY_LINEAR_COUNT_count_shift 0 +#define SDMA_PKT_COPY_LINEAR_COUNT_COUNT(x) (((x) & SDMA_PKT_COPY_LINEAR_COUNT_count_mask) << SDMA_PKT_COPY_LINEAR_COUNT_count_shift) + +/*define for PARAMETER word*/ +/*define for dst_sw field*/ +#define SDMA_PKT_COPY_LINEAR_PARAMETER_dst_sw_offset 2 +#define SDMA_PKT_COPY_LINEAR_PARAMETER_dst_sw_mask 0x00000003 +#define SDMA_PKT_COPY_LINEAR_PARAMETER_dst_sw_shift 16 +#define SDMA_PKT_COPY_LINEAR_PARAMETER_DST_SW(x) (((x) & SDMA_PKT_COPY_LINEAR_PARAMETER_dst_sw_mask) << SDMA_PKT_COPY_LINEAR_PARAMETER_dst_sw_shift) + +/*define for dst_ha field*/ +#define SDMA_PKT_COPY_LINEAR_PARAMETER_dst_ha_offset 2 +#define SDMA_PKT_COPY_LINEAR_PARAMETER_dst_ha_mask 0x00000001 +#define SDMA_PKT_COPY_LINEAR_PARAMETER_dst_ha_shift 22 +#define SDMA_PKT_COPY_LINEAR_PARAMETER_DST_HA(x) (((x) & SDMA_PKT_COPY_LINEAR_PARAMETER_dst_ha_mask) << SDMA_PKT_COPY_LINEAR_PARAMETER_dst_ha_shift) + +/*define for src_sw field*/ +#define SDMA_PKT_COPY_LINEAR_PARAMETER_src_sw_offset 2 +#define SDMA_PKT_COPY_LINEAR_PARAMETER_src_sw_mask 0x00000003 +#define SDMA_PKT_COPY_LINEAR_PARAMETER_src_sw_shift 24 +#define SDMA_PKT_COPY_LINEAR_PARAMETER_SRC_SW(x) (((x) & SDMA_PKT_COPY_LINEAR_PARAMETER_src_sw_mask) << SDMA_PKT_COPY_LINEAR_PARAMETER_src_sw_shift) + +/*define for src_ha field*/ +#define SDMA_PKT_COPY_LINEAR_PARAMETER_src_ha_offset 2 +#define SDMA_PKT_COPY_LINEAR_PARAMETER_src_ha_mask 0x00000001 +#define SDMA_PKT_COPY_LINEAR_PARAMETER_src_ha_shift 30 +#define SDMA_PKT_COPY_LINEAR_PARAMETER_SRC_HA(x) (((x) & SDMA_PKT_COPY_LINEAR_PARAMETER_src_ha_mask) << SDMA_PKT_COPY_LINEAR_PARAMETER_src_ha_shift) + +/*define for SRC_ADDR_LO word*/ +/*define for src_addr_31_0 field*/ +#define SDMA_PKT_COPY_LINEAR_SRC_ADDR_LO_src_addr_31_0_offset 3 +#define SDMA_PKT_COPY_LINEAR_SRC_ADDR_LO_src_addr_31_0_mask 0xFFFFFFFF +#define SDMA_PKT_COPY_LINEAR_SRC_ADDR_LO_src_addr_31_0_shift 0 +#define SDMA_PKT_COPY_LINEAR_SRC_ADDR_LO_SRC_ADDR_31_0(x) (((x) & SDMA_PKT_COPY_LINEAR_SRC_ADDR_LO_src_addr_31_0_mask) << SDMA_PKT_COPY_LINEAR_SRC_ADDR_LO_src_addr_31_0_shift) + +/*define for SRC_ADDR_HI word*/ +/*define for src_addr_63_32 field*/ +#define SDMA_PKT_COPY_LINEAR_SRC_ADDR_HI_src_addr_63_32_offset 4 +#define SDMA_PKT_COPY_LINEAR_SRC_ADDR_HI_src_addr_63_32_mask 0xFFFFFFFF +#define SDMA_PKT_COPY_LINEAR_SRC_ADDR_HI_src_addr_63_32_shift 0 +#define SDMA_PKT_COPY_LINEAR_SRC_ADDR_HI_SRC_ADDR_63_32(x) (((x) & SDMA_PKT_COPY_LINEAR_SRC_ADDR_HI_src_addr_63_32_mask) << SDMA_PKT_COPY_LINEAR_SRC_ADDR_HI_src_addr_63_32_shift) + +/*define for DST_ADDR_LO word*/ +/*define for dst_addr_31_0 field*/ +#define SDMA_PKT_COPY_LINEAR_DST_ADDR_LO_dst_addr_31_0_offset 5 +#define SDMA_PKT_COPY_LINEAR_DST_ADDR_LO_dst_addr_31_0_mask 0xFFFFFFFF +#define SDMA_PKT_COPY_LINEAR_DST_ADDR_LO_dst_addr_31_0_shift 0 +#define SDMA_PKT_COPY_LINEAR_DST_ADDR_LO_DST_ADDR_31_0(x) (((x) & SDMA_PKT_COPY_LINEAR_DST_ADDR_LO_dst_addr_31_0_mask) << SDMA_PKT_COPY_LINEAR_DST_ADDR_LO_dst_addr_31_0_shift) + +/*define for DST_ADDR_HI word*/ +/*define for dst_addr_63_32 field*/ +#define SDMA_PKT_COPY_LINEAR_DST_ADDR_HI_dst_addr_63_32_offset 6 +#define SDMA_PKT_COPY_LINEAR_DST_ADDR_HI_dst_addr_63_32_mask 0xFFFFFFFF +#define SDMA_PKT_COPY_LINEAR_DST_ADDR_HI_dst_addr_63_32_shift 0 +#define SDMA_PKT_COPY_LINEAR_DST_ADDR_HI_DST_ADDR_63_32(x) (((x) & SDMA_PKT_COPY_LINEAR_DST_ADDR_HI_dst_addr_63_32_mask) << SDMA_PKT_COPY_LINEAR_DST_ADDR_HI_dst_addr_63_32_shift) + + +/* +** Definitions for SDMA_PKT_COPY_BROADCAST_LINEAR packet +*/ + +/*define for HEADER word*/ +/*define for op field*/ +#define SDMA_PKT_COPY_BROADCAST_LINEAR_HEADER_op_offset 0 +#define SDMA_PKT_COPY_BROADCAST_LINEAR_HEADER_op_mask 0x000000FF +#define SDMA_PKT_COPY_BROADCAST_LINEAR_HEADER_op_shift 0 +#define SDMA_PKT_COPY_BROADCAST_LINEAR_HEADER_OP(x) (((x) & SDMA_PKT_COPY_BROADCAST_LINEAR_HEADER_op_mask) << SDMA_PKT_COPY_BROADCAST_LINEAR_HEADER_op_shift) + +/*define for sub_op field*/ +#define SDMA_PKT_COPY_BROADCAST_LINEAR_HEADER_sub_op_offset 0 +#define SDMA_PKT_COPY_BROADCAST_LINEAR_HEADER_sub_op_mask 0x000000FF +#define SDMA_PKT_COPY_BROADCAST_LINEAR_HEADER_sub_op_shift 8 +#define SDMA_PKT_COPY_BROADCAST_LINEAR_HEADER_SUB_OP(x) (((x) & SDMA_PKT_COPY_BROADCAST_LINEAR_HEADER_sub_op_mask) << SDMA_PKT_COPY_BROADCAST_LINEAR_HEADER_sub_op_shift) + +/*define for broadcast field*/ +#define SDMA_PKT_COPY_BROADCAST_LINEAR_HEADER_broadcast_offset 0 +#define SDMA_PKT_COPY_BROADCAST_LINEAR_HEADER_broadcast_mask 0x00000001 +#define SDMA_PKT_COPY_BROADCAST_LINEAR_HEADER_broadcast_shift 27 +#define SDMA_PKT_COPY_BROADCAST_LINEAR_HEADER_BROADCAST(x) (((x) & SDMA_PKT_COPY_BROADCAST_LINEAR_HEADER_broadcast_mask) << SDMA_PKT_COPY_BROADCAST_LINEAR_HEADER_broadcast_shift) + +/*define for COUNT word*/ +/*define for count field*/ +#define SDMA_PKT_COPY_BROADCAST_LINEAR_COUNT_count_offset 1 +#define SDMA_PKT_COPY_BROADCAST_LINEAR_COUNT_count_mask 0x003FFFFF +#define SDMA_PKT_COPY_BROADCAST_LINEAR_COUNT_count_shift 0 +#define SDMA_PKT_COPY_BROADCAST_LINEAR_COUNT_COUNT(x) (((x) & SDMA_PKT_COPY_BROADCAST_LINEAR_COUNT_count_mask) << SDMA_PKT_COPY_BROADCAST_LINEAR_COUNT_count_shift) + +/*define for PARAMETER word*/ +/*define for dst2_sw field*/ +#define SDMA_PKT_COPY_BROADCAST_LINEAR_PARAMETER_dst2_sw_offset 2 +#define SDMA_PKT_COPY_BROADCAST_LINEAR_PARAMETER_dst2_sw_mask 0x00000003 +#define SDMA_PKT_COPY_BROADCAST_LINEAR_PARAMETER_dst2_sw_shift 8 +#define SDMA_PKT_COPY_BROADCAST_LINEAR_PARAMETER_DST2_SW(x) (((x) & SDMA_PKT_COPY_BROADCAST_LINEAR_PARAMETER_dst2_sw_mask) << SDMA_PKT_COPY_BROADCAST_LINEAR_PARAMETER_dst2_sw_shift) + +/*define for dst2_ha field*/ +#define SDMA_PKT_COPY_BROADCAST_LINEAR_PARAMETER_dst2_ha_offset 2 +#define SDMA_PKT_COPY_BROADCAST_LINEAR_PARAMETER_dst2_ha_mask 0x00000001 +#define SDMA_PKT_COPY_BROADCAST_LINEAR_PARAMETER_dst2_ha_shift 14 +#define SDMA_PKT_COPY_BROADCAST_LINEAR_PARAMETER_DST2_HA(x) (((x) & SDMA_PKT_COPY_BROADCAST_LINEAR_PARAMETER_dst2_ha_mask) << SDMA_PKT_COPY_BROADCAST_LINEAR_PARAMETER_dst2_ha_shift) + +/*define for dst1_sw field*/ +#define SDMA_PKT_COPY_BROADCAST_LINEAR_PARAMETER_dst1_sw_offset 2 +#define SDMA_PKT_COPY_BROADCAST_LINEAR_PARAMETER_dst1_sw_mask 0x00000003 +#define SDMA_PKT_COPY_BROADCAST_LINEAR_PARAMETER_dst1_sw_shift 16 +#define SDMA_PKT_COPY_BROADCAST_LINEAR_PARAMETER_DST1_SW(x) (((x) & SDMA_PKT_COPY_BROADCAST_LINEAR_PARAMETER_dst1_sw_mask) << SDMA_PKT_COPY_BROADCAST_LINEAR_PARAMETER_dst1_sw_shift) + +/*define for dst1_ha field*/ +#define SDMA_PKT_COPY_BROADCAST_LINEAR_PARAMETER_dst1_ha_offset 2 +#define SDMA_PKT_COPY_BROADCAST_LINEAR_PARAMETER_dst1_ha_mask 0x00000001 +#define SDMA_PKT_COPY_BROADCAST_LINEAR_PARAMETER_dst1_ha_shift 22 +#define SDMA_PKT_COPY_BROADCAST_LINEAR_PARAMETER_DST1_HA(x) (((x) & SDMA_PKT_COPY_BROADCAST_LINEAR_PARAMETER_dst1_ha_mask) << SDMA_PKT_COPY_BROADCAST_LINEAR_PARAMETER_dst1_ha_shift) + +/*define for src_sw field*/ +#define SDMA_PKT_COPY_BROADCAST_LINEAR_PARAMETER_src_sw_offset 2 +#define SDMA_PKT_COPY_BROADCAST_LINEAR_PARAMETER_src_sw_mask 0x00000003 +#define SDMA_PKT_COPY_BROADCAST_LINEAR_PARAMETER_src_sw_shift 24 +#define SDMA_PKT_COPY_BROADCAST_LINEAR_PARAMETER_SRC_SW(x) (((x) & SDMA_PKT_COPY_BROADCAST_LINEAR_PARAMETER_src_sw_mask) << SDMA_PKT_COPY_BROADCAST_LINEAR_PARAMETER_src_sw_shift) + +/*define for src_ha field*/ +#define SDMA_PKT_COPY_BROADCAST_LINEAR_PARAMETER_src_ha_offset 2 +#define SDMA_PKT_COPY_BROADCAST_LINEAR_PARAMETER_src_ha_mask 0x00000001 +#define SDMA_PKT_COPY_BROADCAST_LINEAR_PARAMETER_src_ha_shift 30 +#define SDMA_PKT_COPY_BROADCAST_LINEAR_PARAMETER_SRC_HA(x) (((x) & SDMA_PKT_COPY_BROADCAST_LINEAR_PARAMETER_src_ha_mask) << SDMA_PKT_COPY_BROADCAST_LINEAR_PARAMETER_src_ha_shift) + +/*define for SRC_ADDR_LO word*/ +/*define for src_addr_31_0 field*/ +#define SDMA_PKT_COPY_BROADCAST_LINEAR_SRC_ADDR_LO_src_addr_31_0_offset 3 +#define SDMA_PKT_COPY_BROADCAST_LINEAR_SRC_ADDR_LO_src_addr_31_0_mask 0xFFFFFFFF +#define SDMA_PKT_COPY_BROADCAST_LINEAR_SRC_ADDR_LO_src_addr_31_0_shift 0 +#define SDMA_PKT_COPY_BROADCAST_LINEAR_SRC_ADDR_LO_SRC_ADDR_31_0(x) (((x) & SDMA_PKT_COPY_BROADCAST_LINEAR_SRC_ADDR_LO_src_addr_31_0_mask) << SDMA_PKT_COPY_BROADCAST_LINEAR_SRC_ADDR_LO_src_addr_31_0_shift) + +/*define for SRC_ADDR_HI word*/ +/*define for src_addr_63_32 field*/ +#define SDMA_PKT_COPY_BROADCAST_LINEAR_SRC_ADDR_HI_src_addr_63_32_offset 4 +#define SDMA_PKT_COPY_BROADCAST_LINEAR_SRC_ADDR_HI_src_addr_63_32_mask 0xFFFFFFFF +#define SDMA_PKT_COPY_BROADCAST_LINEAR_SRC_ADDR_HI_src_addr_63_32_shift 0 +#define SDMA_PKT_COPY_BROADCAST_LINEAR_SRC_ADDR_HI_SRC_ADDR_63_32(x) (((x) & SDMA_PKT_COPY_BROADCAST_LINEAR_SRC_ADDR_HI_src_addr_63_32_mask) << SDMA_PKT_COPY_BROADCAST_LINEAR_SRC_ADDR_HI_src_addr_63_32_shift) + +/*define for DST1_ADDR_LO word*/ +/*define for dst1_addr_31_0 field*/ +#define SDMA_PKT_COPY_BROADCAST_LINEAR_DST1_ADDR_LO_dst1_addr_31_0_offset 5 +#define SDMA_PKT_COPY_BROADCAST_LINEAR_DST1_ADDR_LO_dst1_addr_31_0_mask 0xFFFFFFFF +#define SDMA_PKT_COPY_BROADCAST_LINEAR_DST1_ADDR_LO_dst1_addr_31_0_shift 0 +#define SDMA_PKT_COPY_BROADCAST_LINEAR_DST1_ADDR_LO_DST1_ADDR_31_0(x) (((x) & SDMA_PKT_COPY_BROADCAST_LINEAR_DST1_ADDR_LO_dst1_addr_31_0_mask) << SDMA_PKT_COPY_BROADCAST_LINEAR_DST1_ADDR_LO_dst1_addr_31_0_shift) + +/*define for DST1_ADDR_HI word*/ +/*define for dst1_addr_63_32 field*/ +#define SDMA_PKT_COPY_BROADCAST_LINEAR_DST1_ADDR_HI_dst1_addr_63_32_offset 6 +#define SDMA_PKT_COPY_BROADCAST_LINEAR_DST1_ADDR_HI_dst1_addr_63_32_mask 0xFFFFFFFF +#define SDMA_PKT_COPY_BROADCAST_LINEAR_DST1_ADDR_HI_dst1_addr_63_32_shift 0 +#define SDMA_PKT_COPY_BROADCAST_LINEAR_DST1_ADDR_HI_DST1_ADDR_63_32(x) (((x) & SDMA_PKT_COPY_BROADCAST_LINEAR_DST1_ADDR_HI_dst1_addr_63_32_mask) << SDMA_PKT_COPY_BROADCAST_LINEAR_DST1_ADDR_HI_dst1_addr_63_32_shift) + +/*define for DST2_ADDR_LO word*/ +/*define for dst2_addr_31_0 field*/ +#define SDMA_PKT_COPY_BROADCAST_LINEAR_DST2_ADDR_LO_dst2_addr_31_0_offset 7 +#define SDMA_PKT_COPY_BROADCAST_LINEAR_DST2_ADDR_LO_dst2_addr_31_0_mask 0xFFFFFFFF +#define SDMA_PKT_COPY_BROADCAST_LINEAR_DST2_ADDR_LO_dst2_addr_31_0_shift 0 +#define SDMA_PKT_COPY_BROADCAST_LINEAR_DST2_ADDR_LO_DST2_ADDR_31_0(x) (((x) & SDMA_PKT_COPY_BROADCAST_LINEAR_DST2_ADDR_LO_dst2_addr_31_0_mask) << SDMA_PKT_COPY_BROADCAST_LINEAR_DST2_ADDR_LO_dst2_addr_31_0_shift) + +/*define for DST2_ADDR_HI word*/ +/*define for dst2_addr_63_32 field*/ +#define SDMA_PKT_COPY_BROADCAST_LINEAR_DST2_ADDR_HI_dst2_addr_63_32_offset 8 +#define SDMA_PKT_COPY_BROADCAST_LINEAR_DST2_ADDR_HI_dst2_addr_63_32_mask 0xFFFFFFFF +#define SDMA_PKT_COPY_BROADCAST_LINEAR_DST2_ADDR_HI_dst2_addr_63_32_shift 0 +#define SDMA_PKT_COPY_BROADCAST_LINEAR_DST2_ADDR_HI_DST2_ADDR_63_32(x) (((x) & SDMA_PKT_COPY_BROADCAST_LINEAR_DST2_ADDR_HI_dst2_addr_63_32_mask) << SDMA_PKT_COPY_BROADCAST_LINEAR_DST2_ADDR_HI_dst2_addr_63_32_shift) + + +/* +** Definitions for SDMA_PKT_COPY_LINEAR_SUBWIN packet +*/ + +/*define for HEADER word*/ +/*define for op field*/ +#define SDMA_PKT_COPY_LINEAR_SUBWIN_HEADER_op_offset 0 +#define SDMA_PKT_COPY_LINEAR_SUBWIN_HEADER_op_mask 0x000000FF +#define SDMA_PKT_COPY_LINEAR_SUBWIN_HEADER_op_shift 0 +#define SDMA_PKT_COPY_LINEAR_SUBWIN_HEADER_OP(x) (((x) & SDMA_PKT_COPY_LINEAR_SUBWIN_HEADER_op_mask) << SDMA_PKT_COPY_LINEAR_SUBWIN_HEADER_op_shift) + +/*define for sub_op field*/ +#define SDMA_PKT_COPY_LINEAR_SUBWIN_HEADER_sub_op_offset 0 +#define SDMA_PKT_COPY_LINEAR_SUBWIN_HEADER_sub_op_mask 0x000000FF +#define SDMA_PKT_COPY_LINEAR_SUBWIN_HEADER_sub_op_shift 8 +#define SDMA_PKT_COPY_LINEAR_SUBWIN_HEADER_SUB_OP(x) (((x) & SDMA_PKT_COPY_LINEAR_SUBWIN_HEADER_sub_op_mask) << SDMA_PKT_COPY_LINEAR_SUBWIN_HEADER_sub_op_shift) + +/*define for elementsize field*/ +#define SDMA_PKT_COPY_LINEAR_SUBWIN_HEADER_elementsize_offset 0 +#define SDMA_PKT_COPY_LINEAR_SUBWIN_HEADER_elementsize_mask 0x00000007 +#define SDMA_PKT_COPY_LINEAR_SUBWIN_HEADER_elementsize_shift 29 +#define SDMA_PKT_COPY_LINEAR_SUBWIN_HEADER_ELEMENTSIZE(x) (((x) & SDMA_PKT_COPY_LINEAR_SUBWIN_HEADER_elementsize_mask) << SDMA_PKT_COPY_LINEAR_SUBWIN_HEADER_elementsize_shift) + +/*define for SRC_ADDR_LO word*/ +/*define for src_addr_31_0 field*/ +#define SDMA_PKT_COPY_LINEAR_SUBWIN_SRC_ADDR_LO_src_addr_31_0_offset 1 +#define SDMA_PKT_COPY_LINEAR_SUBWIN_SRC_ADDR_LO_src_addr_31_0_mask 0xFFFFFFFF +#define SDMA_PKT_COPY_LINEAR_SUBWIN_SRC_ADDR_LO_src_addr_31_0_shift 0 +#define SDMA_PKT_COPY_LINEAR_SUBWIN_SRC_ADDR_LO_SRC_ADDR_31_0(x) (((x) & SDMA_PKT_COPY_LINEAR_SUBWIN_SRC_ADDR_LO_src_addr_31_0_mask) << SDMA_PKT_COPY_LINEAR_SUBWIN_SRC_ADDR_LO_src_addr_31_0_shift) + +/*define for SRC_ADDR_HI word*/ +/*define for src_addr_63_32 field*/ +#define SDMA_PKT_COPY_LINEAR_SUBWIN_SRC_ADDR_HI_src_addr_63_32_offset 2 +#define SDMA_PKT_COPY_LINEAR_SUBWIN_SRC_ADDR_HI_src_addr_63_32_mask 0xFFFFFFFF +#define SDMA_PKT_COPY_LINEAR_SUBWIN_SRC_ADDR_HI_src_addr_63_32_shift 0 +#define SDMA_PKT_COPY_LINEAR_SUBWIN_SRC_ADDR_HI_SRC_ADDR_63_32(x) (((x) & SDMA_PKT_COPY_LINEAR_SUBWIN_SRC_ADDR_HI_src_addr_63_32_mask) << SDMA_PKT_COPY_LINEAR_SUBWIN_SRC_ADDR_HI_src_addr_63_32_shift) + +/*define for DW_3 word*/ +/*define for src_x field*/ +#define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_3_src_x_offset 3 +#define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_3_src_x_mask 0x00003FFF +#define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_3_src_x_shift 0 +#define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_3_SRC_X(x) (((x) & SDMA_PKT_COPY_LINEAR_SUBWIN_DW_3_src_x_mask) << SDMA_PKT_COPY_LINEAR_SUBWIN_DW_3_src_x_shift) + +/*define for src_y field*/ +#define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_3_src_y_offset 3 +#define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_3_src_y_mask 0x00003FFF +#define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_3_src_y_shift 16 +#define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_3_SRC_Y(x) (((x) & SDMA_PKT_COPY_LINEAR_SUBWIN_DW_3_src_y_mask) << SDMA_PKT_COPY_LINEAR_SUBWIN_DW_3_src_y_shift) + +/*define for DW_4 word*/ +/*define for src_z field*/ +#define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_4_src_z_offset 4 +#define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_4_src_z_mask 0x000007FF +#define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_4_src_z_shift 0 +#define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_4_SRC_Z(x) (((x) & SDMA_PKT_COPY_LINEAR_SUBWIN_DW_4_src_z_mask) << SDMA_PKT_COPY_LINEAR_SUBWIN_DW_4_src_z_shift) + +/*define for src_pitch field*/ +#define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_4_src_pitch_offset 4 +#define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_4_src_pitch_mask 0x00003FFF +#define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_4_src_pitch_shift 16 +#define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_4_SRC_PITCH(x) (((x) & SDMA_PKT_COPY_LINEAR_SUBWIN_DW_4_src_pitch_mask) << SDMA_PKT_COPY_LINEAR_SUBWIN_DW_4_src_pitch_shift) + +/*define for DW_5 word*/ +/*define for src_slice_pitch field*/ +#define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_5_src_slice_pitch_offset 5 +#define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_5_src_slice_pitch_mask 0x0FFFFFFF +#define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_5_src_slice_pitch_shift 0 +#define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_5_SRC_SLICE_PITCH(x) (((x) & SDMA_PKT_COPY_LINEAR_SUBWIN_DW_5_src_slice_pitch_mask) << SDMA_PKT_COPY_LINEAR_SUBWIN_DW_5_src_slice_pitch_shift) + +/*define for DST_ADDR_LO word*/ +/*define for dst_addr_31_0 field*/ +#define SDMA_PKT_COPY_LINEAR_SUBWIN_DST_ADDR_LO_dst_addr_31_0_offset 6 +#define SDMA_PKT_COPY_LINEAR_SUBWIN_DST_ADDR_LO_dst_addr_31_0_mask 0xFFFFFFFF +#define SDMA_PKT_COPY_LINEAR_SUBWIN_DST_ADDR_LO_dst_addr_31_0_shift 0 +#define SDMA_PKT_COPY_LINEAR_SUBWIN_DST_ADDR_LO_DST_ADDR_31_0(x) (((x) & SDMA_PKT_COPY_LINEAR_SUBWIN_DST_ADDR_LO_dst_addr_31_0_mask) << SDMA_PKT_COPY_LINEAR_SUBWIN_DST_ADDR_LO_dst_addr_31_0_shift) + +/*define for DST_ADDR_HI word*/ +/*define for dst_addr_63_32 field*/ +#define SDMA_PKT_COPY_LINEAR_SUBWIN_DST_ADDR_HI_dst_addr_63_32_offset 7 +#define SDMA_PKT_COPY_LINEAR_SUBWIN_DST_ADDR_HI_dst_addr_63_32_mask 0xFFFFFFFF +#define SDMA_PKT_COPY_LINEAR_SUBWIN_DST_ADDR_HI_dst_addr_63_32_shift 0 +#define SDMA_PKT_COPY_LINEAR_SUBWIN_DST_ADDR_HI_DST_ADDR_63_32(x) (((x) & SDMA_PKT_COPY_LINEAR_SUBWIN_DST_ADDR_HI_dst_addr_63_32_mask) << SDMA_PKT_COPY_LINEAR_SUBWIN_DST_ADDR_HI_dst_addr_63_32_shift) + +/*define for DW_8 word*/ +/*define for dst_x field*/ +#define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_8_dst_x_offset 8 +#define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_8_dst_x_mask 0x00003FFF +#define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_8_dst_x_shift 0 +#define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_8_DST_X(x) (((x) & SDMA_PKT_COPY_LINEAR_SUBWIN_DW_8_dst_x_mask) << SDMA_PKT_COPY_LINEAR_SUBWIN_DW_8_dst_x_shift) + +/*define for dst_y field*/ +#define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_8_dst_y_offset 8 +#define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_8_dst_y_mask 0x00003FFF +#define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_8_dst_y_shift 16 +#define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_8_DST_Y(x) (((x) & SDMA_PKT_COPY_LINEAR_SUBWIN_DW_8_dst_y_mask) << SDMA_PKT_COPY_LINEAR_SUBWIN_DW_8_dst_y_shift) + +/*define for DW_9 word*/ +/*define for dst_z field*/ +#define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_9_dst_z_offset 9 +#define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_9_dst_z_mask 0x000007FF +#define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_9_dst_z_shift 0 +#define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_9_DST_Z(x) (((x) & SDMA_PKT_COPY_LINEAR_SUBWIN_DW_9_dst_z_mask) << SDMA_PKT_COPY_LINEAR_SUBWIN_DW_9_dst_z_shift) + +/*define for dst_pitch field*/ +#define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_9_dst_pitch_offset 9 +#define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_9_dst_pitch_mask 0x00003FFF +#define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_9_dst_pitch_shift 16 +#define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_9_DST_PITCH(x) (((x) & SDMA_PKT_COPY_LINEAR_SUBWIN_DW_9_dst_pitch_mask) << SDMA_PKT_COPY_LINEAR_SUBWIN_DW_9_dst_pitch_shift) + +/*define for DW_10 word*/ +/*define for dst_slice_pitch field*/ +#define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_10_dst_slice_pitch_offset 10 +#define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_10_dst_slice_pitch_mask 0x0FFFFFFF +#define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_10_dst_slice_pitch_shift 0 +#define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_10_DST_SLICE_PITCH(x) (((x) & SDMA_PKT_COPY_LINEAR_SUBWIN_DW_10_dst_slice_pitch_mask) << SDMA_PKT_COPY_LINEAR_SUBWIN_DW_10_dst_slice_pitch_shift) + +/*define for DW_11 word*/ +/*define for rect_x field*/ +#define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_11_rect_x_offset 11 +#define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_11_rect_x_mask 0x00003FFF +#define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_11_rect_x_shift 0 +#define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_11_RECT_X(x) (((x) & SDMA_PKT_COPY_LINEAR_SUBWIN_DW_11_rect_x_mask) << SDMA_PKT_COPY_LINEAR_SUBWIN_DW_11_rect_x_shift) + +/*define for rect_y field*/ +#define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_11_rect_y_offset 11 +#define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_11_rect_y_mask 0x00003FFF +#define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_11_rect_y_shift 16 +#define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_11_RECT_Y(x) (((x) & SDMA_PKT_COPY_LINEAR_SUBWIN_DW_11_rect_y_mask) << SDMA_PKT_COPY_LINEAR_SUBWIN_DW_11_rect_y_shift) + +/*define for DW_12 word*/ +/*define for rect_z field*/ +#define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_12_rect_z_offset 12 +#define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_12_rect_z_mask 0x000007FF +#define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_12_rect_z_shift 0 +#define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_12_RECT_Z(x) (((x) & SDMA_PKT_COPY_LINEAR_SUBWIN_DW_12_rect_z_mask) << SDMA_PKT_COPY_LINEAR_SUBWIN_DW_12_rect_z_shift) + +/*define for dst_sw field*/ +#define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_12_dst_sw_offset 12 +#define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_12_dst_sw_mask 0x00000003 +#define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_12_dst_sw_shift 16 +#define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_12_DST_SW(x) (((x) & SDMA_PKT_COPY_LINEAR_SUBWIN_DW_12_dst_sw_mask) << SDMA_PKT_COPY_LINEAR_SUBWIN_DW_12_dst_sw_shift) + +/*define for dst_ha field*/ +#define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_12_dst_ha_offset 12 +#define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_12_dst_ha_mask 0x00000001 +#define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_12_dst_ha_shift 22 +#define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_12_DST_HA(x) (((x) & SDMA_PKT_COPY_LINEAR_SUBWIN_DW_12_dst_ha_mask) << SDMA_PKT_COPY_LINEAR_SUBWIN_DW_12_dst_ha_shift) + +/*define for src_sw field*/ +#define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_12_src_sw_offset 12 +#define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_12_src_sw_mask 0x00000003 +#define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_12_src_sw_shift 24 +#define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_12_SRC_SW(x) (((x) & SDMA_PKT_COPY_LINEAR_SUBWIN_DW_12_src_sw_mask) << SDMA_PKT_COPY_LINEAR_SUBWIN_DW_12_src_sw_shift) + +/*define for src_ha field*/ +#define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_12_src_ha_offset 12 +#define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_12_src_ha_mask 0x00000001 +#define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_12_src_ha_shift 30 +#define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_12_SRC_HA(x) (((x) & SDMA_PKT_COPY_LINEAR_SUBWIN_DW_12_src_ha_mask) << SDMA_PKT_COPY_LINEAR_SUBWIN_DW_12_src_ha_shift) + + +/* +** Definitions for SDMA_PKT_COPY_TILED packet +*/ + +/*define for HEADER word*/ +/*define for op field*/ +#define SDMA_PKT_COPY_TILED_HEADER_op_offset 0 +#define SDMA_PKT_COPY_TILED_HEADER_op_mask 0x000000FF +#define SDMA_PKT_COPY_TILED_HEADER_op_shift 0 +#define SDMA_PKT_COPY_TILED_HEADER_OP(x) (((x) & SDMA_PKT_COPY_TILED_HEADER_op_mask) << SDMA_PKT_COPY_TILED_HEADER_op_shift) + +/*define for sub_op field*/ +#define SDMA_PKT_COPY_TILED_HEADER_sub_op_offset 0 +#define SDMA_PKT_COPY_TILED_HEADER_sub_op_mask 0x000000FF +#define SDMA_PKT_COPY_TILED_HEADER_sub_op_shift 8 +#define SDMA_PKT_COPY_TILED_HEADER_SUB_OP(x) (((x) & SDMA_PKT_COPY_TILED_HEADER_sub_op_mask) << SDMA_PKT_COPY_TILED_HEADER_sub_op_shift) + +/*define for detile field*/ +#define SDMA_PKT_COPY_TILED_HEADER_detile_offset 0 +#define SDMA_PKT_COPY_TILED_HEADER_detile_mask 0x00000001 +#define SDMA_PKT_COPY_TILED_HEADER_detile_shift 31 +#define SDMA_PKT_COPY_TILED_HEADER_DETILE(x) (((x) & SDMA_PKT_COPY_TILED_HEADER_detile_mask) << SDMA_PKT_COPY_TILED_HEADER_detile_shift) + +/*define for TILED_ADDR_LO word*/ +/*define for tiled_addr_31_0 field*/ +#define SDMA_PKT_COPY_TILED_TILED_ADDR_LO_tiled_addr_31_0_offset 1 +#define SDMA_PKT_COPY_TILED_TILED_ADDR_LO_tiled_addr_31_0_mask 0xFFFFFFFF +#define SDMA_PKT_COPY_TILED_TILED_ADDR_LO_tiled_addr_31_0_shift 0 +#define SDMA_PKT_COPY_TILED_TILED_ADDR_LO_TILED_ADDR_31_0(x) (((x) & SDMA_PKT_COPY_TILED_TILED_ADDR_LO_tiled_addr_31_0_mask) << SDMA_PKT_COPY_TILED_TILED_ADDR_LO_tiled_addr_31_0_shift) + +/*define for TILED_ADDR_HI word*/ +/*define for tiled_addr_63_32 field*/ +#define SDMA_PKT_COPY_TILED_TILED_ADDR_HI_tiled_addr_63_32_offset 2 +#define SDMA_PKT_COPY_TILED_TILED_ADDR_HI_tiled_addr_63_32_mask 0xFFFFFFFF +#define SDMA_PKT_COPY_TILED_TILED_ADDR_HI_tiled_addr_63_32_shift 0 +#define SDMA_PKT_COPY_TILED_TILED_ADDR_HI_TILED_ADDR_63_32(x) (((x) & SDMA_PKT_COPY_TILED_TILED_ADDR_HI_tiled_addr_63_32_mask) << SDMA_PKT_COPY_TILED_TILED_ADDR_HI_tiled_addr_63_32_shift) + +/*define for DW_3 word*/ +/*define for pitch_in_tile field*/ +#define SDMA_PKT_COPY_TILED_DW_3_pitch_in_tile_offset 3 +#define SDMA_PKT_COPY_TILED_DW_3_pitch_in_tile_mask 0x000007FF +#define SDMA_PKT_COPY_TILED_DW_3_pitch_in_tile_shift 0 +#define SDMA_PKT_COPY_TILED_DW_3_PITCH_IN_TILE(x) (((x) & SDMA_PKT_COPY_TILED_DW_3_pitch_in_tile_mask) << SDMA_PKT_COPY_TILED_DW_3_pitch_in_tile_shift) + +/*define for height field*/ +#define SDMA_PKT_COPY_TILED_DW_3_height_offset 3 +#define SDMA_PKT_COPY_TILED_DW_3_height_mask 0x00003FFF +#define SDMA_PKT_COPY_TILED_DW_3_height_shift 16 +#define SDMA_PKT_COPY_TILED_DW_3_HEIGHT(x) (((x) & SDMA_PKT_COPY_TILED_DW_3_height_mask) << SDMA_PKT_COPY_TILED_DW_3_height_shift) + +/*define for DW_4 word*/ +/*define for slice_pitch field*/ +#define SDMA_PKT_COPY_TILED_DW_4_slice_pitch_offset 4 +#define SDMA_PKT_COPY_TILED_DW_4_slice_pitch_mask 0x003FFFFF +#define SDMA_PKT_COPY_TILED_DW_4_slice_pitch_shift 0 +#define SDMA_PKT_COPY_TILED_DW_4_SLICE_PITCH(x) (((x) & SDMA_PKT_COPY_TILED_DW_4_slice_pitch_mask) << SDMA_PKT_COPY_TILED_DW_4_slice_pitch_shift) + +/*define for DW_5 word*/ +/*define for element_size field*/ +#define SDMA_PKT_COPY_TILED_DW_5_element_size_offset 5 +#define SDMA_PKT_COPY_TILED_DW_5_element_size_mask 0x00000007 +#define SDMA_PKT_COPY_TILED_DW_5_element_size_shift 0 +#define SDMA_PKT_COPY_TILED_DW_5_ELEMENT_SIZE(x) (((x) & SDMA_PKT_COPY_TILED_DW_5_element_size_mask) << SDMA_PKT_COPY_TILED_DW_5_element_size_shift) + +/*define for array_mode field*/ +#define SDMA_PKT_COPY_TILED_DW_5_array_mode_offset 5 +#define SDMA_PKT_COPY_TILED_DW_5_array_mode_mask 0x0000000F +#define SDMA_PKT_COPY_TILED_DW_5_array_mode_shift 3 +#define SDMA_PKT_COPY_TILED_DW_5_ARRAY_MODE(x) (((x) & SDMA_PKT_COPY_TILED_DW_5_array_mode_mask) << SDMA_PKT_COPY_TILED_DW_5_array_mode_shift) + +/*define for mit_mode field*/ +#define SDMA_PKT_COPY_TILED_DW_5_mit_mode_offset 5 +#define SDMA_PKT_COPY_TILED_DW_5_mit_mode_mask 0x00000007 +#define SDMA_PKT_COPY_TILED_DW_5_mit_mode_shift 8 +#define SDMA_PKT_COPY_TILED_DW_5_MIT_MODE(x) (((x) & SDMA_PKT_COPY_TILED_DW_5_mit_mode_mask) << SDMA_PKT_COPY_TILED_DW_5_mit_mode_shift) + +/*define for tilesplit_size field*/ +#define SDMA_PKT_COPY_TILED_DW_5_tilesplit_size_offset 5 +#define SDMA_PKT_COPY_TILED_DW_5_tilesplit_size_mask 0x00000007 +#define SDMA_PKT_COPY_TILED_DW_5_tilesplit_size_shift 11 +#define SDMA_PKT_COPY_TILED_DW_5_TILESPLIT_SIZE(x) (((x) & SDMA_PKT_COPY_TILED_DW_5_tilesplit_size_mask) << SDMA_PKT_COPY_TILED_DW_5_tilesplit_size_shift) + +/*define for bank_w field*/ +#define SDMA_PKT_COPY_TILED_DW_5_bank_w_offset 5 +#define SDMA_PKT_COPY_TILED_DW_5_bank_w_mask 0x00000003 +#define SDMA_PKT_COPY_TILED_DW_5_bank_w_shift 15 +#define SDMA_PKT_COPY_TILED_DW_5_BANK_W(x) (((x) & SDMA_PKT_COPY_TILED_DW_5_bank_w_mask) << SDMA_PKT_COPY_TILED_DW_5_bank_w_shift) + +/*define for bank_h field*/ +#define SDMA_PKT_COPY_TILED_DW_5_bank_h_offset 5 +#define SDMA_PKT_COPY_TILED_DW_5_bank_h_mask 0x00000003 +#define SDMA_PKT_COPY_TILED_DW_5_bank_h_shift 18 +#define SDMA_PKT_COPY_TILED_DW_5_BANK_H(x) (((x) & SDMA_PKT_COPY_TILED_DW_5_bank_h_mask) << SDMA_PKT_COPY_TILED_DW_5_bank_h_shift) + +/*define for num_bank field*/ +#define SDMA_PKT_COPY_TILED_DW_5_num_bank_offset 5 +#define SDMA_PKT_COPY_TILED_DW_5_num_bank_mask 0x00000003 +#define SDMA_PKT_COPY_TILED_DW_5_num_bank_shift 21 +#define SDMA_PKT_COPY_TILED_DW_5_NUM_BANK(x) (((x) & SDMA_PKT_COPY_TILED_DW_5_num_bank_mask) << SDMA_PKT_COPY_TILED_DW_5_num_bank_shift) + +/*define for mat_aspt field*/ +#define SDMA_PKT_COPY_TILED_DW_5_mat_aspt_offset 5 +#define SDMA_PKT_COPY_TILED_DW_5_mat_aspt_mask 0x00000003 +#define SDMA_PKT_COPY_TILED_DW_5_mat_aspt_shift 24 +#define SDMA_PKT_COPY_TILED_DW_5_MAT_ASPT(x) (((x) & SDMA_PKT_COPY_TILED_DW_5_mat_aspt_mask) << SDMA_PKT_COPY_TILED_DW_5_mat_aspt_shift) + +/*define for pipe_config field*/ +#define SDMA_PKT_COPY_TILED_DW_5_pipe_config_offset 5 +#define SDMA_PKT_COPY_TILED_DW_5_pipe_config_mask 0x0000001F +#define SDMA_PKT_COPY_TILED_DW_5_pipe_config_shift 26 +#define SDMA_PKT_COPY_TILED_DW_5_PIPE_CONFIG(x) (((x) & SDMA_PKT_COPY_TILED_DW_5_pipe_config_mask) << SDMA_PKT_COPY_TILED_DW_5_pipe_config_shift) + +/*define for DW_6 word*/ +/*define for x field*/ +#define SDMA_PKT_COPY_TILED_DW_6_x_offset 6 +#define SDMA_PKT_COPY_TILED_DW_6_x_mask 0x00003FFF +#define SDMA_PKT_COPY_TILED_DW_6_x_shift 0 +#define SDMA_PKT_COPY_TILED_DW_6_X(x) (((x) & SDMA_PKT_COPY_TILED_DW_6_x_mask) << SDMA_PKT_COPY_TILED_DW_6_x_shift) + +/*define for y field*/ +#define SDMA_PKT_COPY_TILED_DW_6_y_offset 6 +#define SDMA_PKT_COPY_TILED_DW_6_y_mask 0x00003FFF +#define SDMA_PKT_COPY_TILED_DW_6_y_shift 16 +#define SDMA_PKT_COPY_TILED_DW_6_Y(x) (((x) & SDMA_PKT_COPY_TILED_DW_6_y_mask) << SDMA_PKT_COPY_TILED_DW_6_y_shift) + +/*define for DW_7 word*/ +/*define for z field*/ +#define SDMA_PKT_COPY_TILED_DW_7_z_offset 7 +#define SDMA_PKT_COPY_TILED_DW_7_z_mask 0x00000FFF +#define SDMA_PKT_COPY_TILED_DW_7_z_shift 0 +#define SDMA_PKT_COPY_TILED_DW_7_Z(x) (((x) & SDMA_PKT_COPY_TILED_DW_7_z_mask) << SDMA_PKT_COPY_TILED_DW_7_z_shift) + +/*define for linear_sw field*/ +#define SDMA_PKT_COPY_TILED_DW_7_linear_sw_offset 7 +#define SDMA_PKT_COPY_TILED_DW_7_linear_sw_mask 0x00000003 +#define SDMA_PKT_COPY_TILED_DW_7_linear_sw_shift 16 +#define SDMA_PKT_COPY_TILED_DW_7_LINEAR_SW(x) (((x) & SDMA_PKT_COPY_TILED_DW_7_linear_sw_mask) << SDMA_PKT_COPY_TILED_DW_7_linear_sw_shift) + +/*define for tile_sw field*/ +#define SDMA_PKT_COPY_TILED_DW_7_tile_sw_offset 7 +#define SDMA_PKT_COPY_TILED_DW_7_tile_sw_mask 0x00000003 +#define SDMA_PKT_COPY_TILED_DW_7_tile_sw_shift 24 +#define SDMA_PKT_COPY_TILED_DW_7_TILE_SW(x) (((x) & SDMA_PKT_COPY_TILED_DW_7_tile_sw_mask) << SDMA_PKT_COPY_TILED_DW_7_tile_sw_shift) + +/*define for LINEAR_ADDR_LO word*/ +/*define for linear_addr_31_0 field*/ +#define SDMA_PKT_COPY_TILED_LINEAR_ADDR_LO_linear_addr_31_0_offset 8 +#define SDMA_PKT_COPY_TILED_LINEAR_ADDR_LO_linear_addr_31_0_mask 0xFFFFFFFF +#define SDMA_PKT_COPY_TILED_LINEAR_ADDR_LO_linear_addr_31_0_shift 0 +#define SDMA_PKT_COPY_TILED_LINEAR_ADDR_LO_LINEAR_ADDR_31_0(x) (((x) & SDMA_PKT_COPY_TILED_LINEAR_ADDR_LO_linear_addr_31_0_mask) << SDMA_PKT_COPY_TILED_LINEAR_ADDR_LO_linear_addr_31_0_shift) + +/*define for LINEAR_ADDR_HI word*/ +/*define for linear_addr_63_32 field*/ +#define SDMA_PKT_COPY_TILED_LINEAR_ADDR_HI_linear_addr_63_32_offset 9 +#define SDMA_PKT_COPY_TILED_LINEAR_ADDR_HI_linear_addr_63_32_mask 0xFFFFFFFF +#define SDMA_PKT_COPY_TILED_LINEAR_ADDR_HI_linear_addr_63_32_shift 0 +#define SDMA_PKT_COPY_TILED_LINEAR_ADDR_HI_LINEAR_ADDR_63_32(x) (((x) & SDMA_PKT_COPY_TILED_LINEAR_ADDR_HI_linear_addr_63_32_mask) << SDMA_PKT_COPY_TILED_LINEAR_ADDR_HI_linear_addr_63_32_shift) + +/*define for LINEAR_PITCH word*/ +/*define for linear_pitch field*/ +#define SDMA_PKT_COPY_TILED_LINEAR_PITCH_linear_pitch_offset 10 +#define SDMA_PKT_COPY_TILED_LINEAR_PITCH_linear_pitch_mask 0x0007FFFF +#define SDMA_PKT_COPY_TILED_LINEAR_PITCH_linear_pitch_shift 0 +#define SDMA_PKT_COPY_TILED_LINEAR_PITCH_LINEAR_PITCH(x) (((x) & SDMA_PKT_COPY_TILED_LINEAR_PITCH_linear_pitch_mask) << SDMA_PKT_COPY_TILED_LINEAR_PITCH_linear_pitch_shift) + +/*define for COUNT word*/ +/*define for count field*/ +#define SDMA_PKT_COPY_TILED_COUNT_count_offset 11 +#define SDMA_PKT_COPY_TILED_COUNT_count_mask 0x000FFFFF +#define SDMA_PKT_COPY_TILED_COUNT_count_shift 0 +#define SDMA_PKT_COPY_TILED_COUNT_COUNT(x) (((x) & SDMA_PKT_COPY_TILED_COUNT_count_mask) << SDMA_PKT_COPY_TILED_COUNT_count_shift) + + +/* +** Definitions for SDMA_PKT_COPY_L2T_BROADCAST packet +*/ + +/*define for HEADER word*/ +/*define for op field*/ +#define SDMA_PKT_COPY_L2T_BROADCAST_HEADER_op_offset 0 +#define SDMA_PKT_COPY_L2T_BROADCAST_HEADER_op_mask 0x000000FF +#define SDMA_PKT_COPY_L2T_BROADCAST_HEADER_op_shift 0 +#define SDMA_PKT_COPY_L2T_BROADCAST_HEADER_OP(x) (((x) & SDMA_PKT_COPY_L2T_BROADCAST_HEADER_op_mask) << SDMA_PKT_COPY_L2T_BROADCAST_HEADER_op_shift) + +/*define for sub_op field*/ +#define SDMA_PKT_COPY_L2T_BROADCAST_HEADER_sub_op_offset 0 +#define SDMA_PKT_COPY_L2T_BROADCAST_HEADER_sub_op_mask 0x000000FF +#define SDMA_PKT_COPY_L2T_BROADCAST_HEADER_sub_op_shift 8 +#define SDMA_PKT_COPY_L2T_BROADCAST_HEADER_SUB_OP(x) (((x) & SDMA_PKT_COPY_L2T_BROADCAST_HEADER_sub_op_mask) << SDMA_PKT_COPY_L2T_BROADCAST_HEADER_sub_op_shift) + +/*define for videocopy field*/ +#define SDMA_PKT_COPY_L2T_BROADCAST_HEADER_videocopy_offset 0 +#define SDMA_PKT_COPY_L2T_BROADCAST_HEADER_videocopy_mask 0x00000001 +#define SDMA_PKT_COPY_L2T_BROADCAST_HEADER_videocopy_shift 26 +#define SDMA_PKT_COPY_L2T_BROADCAST_HEADER_VIDEOCOPY(x) (((x) & SDMA_PKT_COPY_L2T_BROADCAST_HEADER_videocopy_mask) << SDMA_PKT_COPY_L2T_BROADCAST_HEADER_videocopy_shift) + +/*define for broadcast field*/ +#define SDMA_PKT_COPY_L2T_BROADCAST_HEADER_broadcast_offset 0 +#define SDMA_PKT_COPY_L2T_BROADCAST_HEADER_broadcast_mask 0x00000001 +#define SDMA_PKT_COPY_L2T_BROADCAST_HEADER_broadcast_shift 27 +#define SDMA_PKT_COPY_L2T_BROADCAST_HEADER_BROADCAST(x) (((x) & SDMA_PKT_COPY_L2T_BROADCAST_HEADER_broadcast_mask) << SDMA_PKT_COPY_L2T_BROADCAST_HEADER_broadcast_shift) + +/*define for TILED_ADDR_LO_0 word*/ +/*define for tiled_addr0_31_0 field*/ +#define SDMA_PKT_COPY_L2T_BROADCAST_TILED_ADDR_LO_0_tiled_addr0_31_0_offset 1 +#define SDMA_PKT_COPY_L2T_BROADCAST_TILED_ADDR_LO_0_tiled_addr0_31_0_mask 0xFFFFFFFF +#define SDMA_PKT_COPY_L2T_BROADCAST_TILED_ADDR_LO_0_tiled_addr0_31_0_shift 0 +#define SDMA_PKT_COPY_L2T_BROADCAST_TILED_ADDR_LO_0_TILED_ADDR0_31_0(x) (((x) & SDMA_PKT_COPY_L2T_BROADCAST_TILED_ADDR_LO_0_tiled_addr0_31_0_mask) << SDMA_PKT_COPY_L2T_BROADCAST_TILED_ADDR_LO_0_tiled_addr0_31_0_shift) + +/*define for TILED_ADDR_HI_0 word*/ +/*define for tiled_addr0_63_32 field*/ +#define SDMA_PKT_COPY_L2T_BROADCAST_TILED_ADDR_HI_0_tiled_addr0_63_32_offset 2 +#define SDMA_PKT_COPY_L2T_BROADCAST_TILED_ADDR_HI_0_tiled_addr0_63_32_mask 0xFFFFFFFF +#define SDMA_PKT_COPY_L2T_BROADCAST_TILED_ADDR_HI_0_tiled_addr0_63_32_shift 0 +#define SDMA_PKT_COPY_L2T_BROADCAST_TILED_ADDR_HI_0_TILED_ADDR0_63_32(x) (((x) & SDMA_PKT_COPY_L2T_BROADCAST_TILED_ADDR_HI_0_tiled_addr0_63_32_mask) << SDMA_PKT_COPY_L2T_BROADCAST_TILED_ADDR_HI_0_tiled_addr0_63_32_shift) + +/*define for TILED_ADDR_LO_1 word*/ +/*define for tiled_addr1_31_0 field*/ +#define SDMA_PKT_COPY_L2T_BROADCAST_TILED_ADDR_LO_1_tiled_addr1_31_0_offset 3 +#define SDMA_PKT_COPY_L2T_BROADCAST_TILED_ADDR_LO_1_tiled_addr1_31_0_mask 0xFFFFFFFF +#define SDMA_PKT_COPY_L2T_BROADCAST_TILED_ADDR_LO_1_tiled_addr1_31_0_shift 0 +#define SDMA_PKT_COPY_L2T_BROADCAST_TILED_ADDR_LO_1_TILED_ADDR1_31_0(x) (((x) & SDMA_PKT_COPY_L2T_BROADCAST_TILED_ADDR_LO_1_tiled_addr1_31_0_mask) << SDMA_PKT_COPY_L2T_BROADCAST_TILED_ADDR_LO_1_tiled_addr1_31_0_shift) + +/*define for TILED_ADDR_HI_1 word*/ +/*define for tiled_addr1_63_32 field*/ +#define SDMA_PKT_COPY_L2T_BROADCAST_TILED_ADDR_HI_1_tiled_addr1_63_32_offset 4 +#define SDMA_PKT_COPY_L2T_BROADCAST_TILED_ADDR_HI_1_tiled_addr1_63_32_mask 0xFFFFFFFF +#define SDMA_PKT_COPY_L2T_BROADCAST_TILED_ADDR_HI_1_tiled_addr1_63_32_shift 0 +#define SDMA_PKT_COPY_L2T_BROADCAST_TILED_ADDR_HI_1_TILED_ADDR1_63_32(x) (((x) & SDMA_PKT_COPY_L2T_BROADCAST_TILED_ADDR_HI_1_tiled_addr1_63_32_mask) << SDMA_PKT_COPY_L2T_BROADCAST_TILED_ADDR_HI_1_tiled_addr1_63_32_shift) + +/*define for DW_5 word*/ +/*define for pitch_in_tile field*/ +#define SDMA_PKT_COPY_L2T_BROADCAST_DW_5_pitch_in_tile_offset 5 +#define SDMA_PKT_COPY_L2T_BROADCAST_DW_5_pitch_in_tile_mask 0x000007FF +#define SDMA_PKT_COPY_L2T_BROADCAST_DW_5_pitch_in_tile_shift 0 +#define SDMA_PKT_COPY_L2T_BROADCAST_DW_5_PITCH_IN_TILE(x) (((x) & SDMA_PKT_COPY_L2T_BROADCAST_DW_5_pitch_in_tile_mask) << SDMA_PKT_COPY_L2T_BROADCAST_DW_5_pitch_in_tile_shift) + +/*define for height field*/ +#define SDMA_PKT_COPY_L2T_BROADCAST_DW_5_height_offset 5 +#define SDMA_PKT_COPY_L2T_BROADCAST_DW_5_height_mask 0x00003FFF +#define SDMA_PKT_COPY_L2T_BROADCAST_DW_5_height_shift 16 +#define SDMA_PKT_COPY_L2T_BROADCAST_DW_5_HEIGHT(x) (((x) & SDMA_PKT_COPY_L2T_BROADCAST_DW_5_height_mask) << SDMA_PKT_COPY_L2T_BROADCAST_DW_5_height_shift) + +/*define for DW_6 word*/ +/*define for slice_pitch field*/ +#define SDMA_PKT_COPY_L2T_BROADCAST_DW_6_slice_pitch_offset 6 +#define SDMA_PKT_COPY_L2T_BROADCAST_DW_6_slice_pitch_mask 0x003FFFFF +#define SDMA_PKT_COPY_L2T_BROADCAST_DW_6_slice_pitch_shift 0 +#define SDMA_PKT_COPY_L2T_BROADCAST_DW_6_SLICE_PITCH(x) (((x) & SDMA_PKT_COPY_L2T_BROADCAST_DW_6_slice_pitch_mask) << SDMA_PKT_COPY_L2T_BROADCAST_DW_6_slice_pitch_shift) + +/*define for DW_7 word*/ +/*define for element_size field*/ +#define SDMA_PKT_COPY_L2T_BROADCAST_DW_7_element_size_offset 7 +#define SDMA_PKT_COPY_L2T_BROADCAST_DW_7_element_size_mask 0x00000007 +#define SDMA_PKT_COPY_L2T_BROADCAST_DW_7_element_size_shift 0 +#define SDMA_PKT_COPY_L2T_BROADCAST_DW_7_ELEMENT_SIZE(x) (((x) & SDMA_PKT_COPY_L2T_BROADCAST_DW_7_element_size_mask) << SDMA_PKT_COPY_L2T_BROADCAST_DW_7_element_size_shift) + +/*define for array_mode field*/ +#define SDMA_PKT_COPY_L2T_BROADCAST_DW_7_array_mode_offset 7 +#define SDMA_PKT_COPY_L2T_BROADCAST_DW_7_array_mode_mask 0x0000000F +#define SDMA_PKT_COPY_L2T_BROADCAST_DW_7_array_mode_shift 3 +#define SDMA_PKT_COPY_L2T_BROADCAST_DW_7_ARRAY_MODE(x) (((x) & SDMA_PKT_COPY_L2T_BROADCAST_DW_7_array_mode_mask) << SDMA_PKT_COPY_L2T_BROADCAST_DW_7_array_mode_shift) + +/*define for mit_mode field*/ +#define SDMA_PKT_COPY_L2T_BROADCAST_DW_7_mit_mode_offset 7 +#define SDMA_PKT_COPY_L2T_BROADCAST_DW_7_mit_mode_mask 0x00000007 +#define SDMA_PKT_COPY_L2T_BROADCAST_DW_7_mit_mode_shift 8 +#define SDMA_PKT_COPY_L2T_BROADCAST_DW_7_MIT_MODE(x) (((x) & SDMA_PKT_COPY_L2T_BROADCAST_DW_7_mit_mode_mask) << SDMA_PKT_COPY_L2T_BROADCAST_DW_7_mit_mode_shift) + +/*define for tilesplit_size field*/ +#define SDMA_PKT_COPY_L2T_BROADCAST_DW_7_tilesplit_size_offset 7 +#define SDMA_PKT_COPY_L2T_BROADCAST_DW_7_tilesplit_size_mask 0x00000007 +#define SDMA_PKT_COPY_L2T_BROADCAST_DW_7_tilesplit_size_shift 11 +#define SDMA_PKT_COPY_L2T_BROADCAST_DW_7_TILESPLIT_SIZE(x) (((x) & SDMA_PKT_COPY_L2T_BROADCAST_DW_7_tilesplit_size_mask) << SDMA_PKT_COPY_L2T_BROADCAST_DW_7_tilesplit_size_shift) + +/*define for bank_w field*/ +#define SDMA_PKT_COPY_L2T_BROADCAST_DW_7_bank_w_offset 7 +#define SDMA_PKT_COPY_L2T_BROADCAST_DW_7_bank_w_mask 0x00000003 +#define SDMA_PKT_COPY_L2T_BROADCAST_DW_7_bank_w_shift 15 +#define SDMA_PKT_COPY_L2T_BROADCAST_DW_7_BANK_W(x) (((x) & SDMA_PKT_COPY_L2T_BROADCAST_DW_7_bank_w_mask) << SDMA_PKT_COPY_L2T_BROADCAST_DW_7_bank_w_shift) + +/*define for bank_h field*/ +#define SDMA_PKT_COPY_L2T_BROADCAST_DW_7_bank_h_offset 7 +#define SDMA_PKT_COPY_L2T_BROADCAST_DW_7_bank_h_mask 0x00000003 +#define SDMA_PKT_COPY_L2T_BROADCAST_DW_7_bank_h_shift 18 +#define SDMA_PKT_COPY_L2T_BROADCAST_DW_7_BANK_H(x) (((x) & SDMA_PKT_COPY_L2T_BROADCAST_DW_7_bank_h_mask) << SDMA_PKT_COPY_L2T_BROADCAST_DW_7_bank_h_shift) + +/*define for num_bank field*/ +#define SDMA_PKT_COPY_L2T_BROADCAST_DW_7_num_bank_offset 7 +#define SDMA_PKT_COPY_L2T_BROADCAST_DW_7_num_bank_mask 0x00000003 +#define SDMA_PKT_COPY_L2T_BROADCAST_DW_7_num_bank_shift 21 +#define SDMA_PKT_COPY_L2T_BROADCAST_DW_7_NUM_BANK(x) (((x) & SDMA_PKT_COPY_L2T_BROADCAST_DW_7_num_bank_mask) << SDMA_PKT_COPY_L2T_BROADCAST_DW_7_num_bank_shift) + +/*define for mat_aspt field*/ +#define SDMA_PKT_COPY_L2T_BROADCAST_DW_7_mat_aspt_offset 7 +#define SDMA_PKT_COPY_L2T_BROADCAST_DW_7_mat_aspt_mask 0x00000003 +#define SDMA_PKT_COPY_L2T_BROADCAST_DW_7_mat_aspt_shift 24 +#define SDMA_PKT_COPY_L2T_BROADCAST_DW_7_MAT_ASPT(x) (((x) & SDMA_PKT_COPY_L2T_BROADCAST_DW_7_mat_aspt_mask) << SDMA_PKT_COPY_L2T_BROADCAST_DW_7_mat_aspt_shift) + +/*define for pipe_config field*/ +#define SDMA_PKT_COPY_L2T_BROADCAST_DW_7_pipe_config_offset 7 +#define SDMA_PKT_COPY_L2T_BROADCAST_DW_7_pipe_config_mask 0x0000001F +#define SDMA_PKT_COPY_L2T_BROADCAST_DW_7_pipe_config_shift 26 +#define SDMA_PKT_COPY_L2T_BROADCAST_DW_7_PIPE_CONFIG(x) (((x) & SDMA_PKT_COPY_L2T_BROADCAST_DW_7_pipe_config_mask) << SDMA_PKT_COPY_L2T_BROADCAST_DW_7_pipe_config_shift) + +/*define for DW_8 word*/ +/*define for x field*/ +#define SDMA_PKT_COPY_L2T_BROADCAST_DW_8_x_offset 8 +#define SDMA_PKT_COPY_L2T_BROADCAST_DW_8_x_mask 0x00003FFF +#define SDMA_PKT_COPY_L2T_BROADCAST_DW_8_x_shift 0 +#define SDMA_PKT_COPY_L2T_BROADCAST_DW_8_X(x) (((x) & SDMA_PKT_COPY_L2T_BROADCAST_DW_8_x_mask) << SDMA_PKT_COPY_L2T_BROADCAST_DW_8_x_shift) + +/*define for y field*/ +#define SDMA_PKT_COPY_L2T_BROADCAST_DW_8_y_offset 8 +#define SDMA_PKT_COPY_L2T_BROADCAST_DW_8_y_mask 0x00003FFF +#define SDMA_PKT_COPY_L2T_BROADCAST_DW_8_y_shift 16 +#define SDMA_PKT_COPY_L2T_BROADCAST_DW_8_Y(x) (((x) & SDMA_PKT_COPY_L2T_BROADCAST_DW_8_y_mask) << SDMA_PKT_COPY_L2T_BROADCAST_DW_8_y_shift) + +/*define for DW_9 word*/ +/*define for z field*/ +#define SDMA_PKT_COPY_L2T_BROADCAST_DW_9_z_offset 9 +#define SDMA_PKT_COPY_L2T_BROADCAST_DW_9_z_mask 0x00000FFF +#define SDMA_PKT_COPY_L2T_BROADCAST_DW_9_z_shift 0 +#define SDMA_PKT_COPY_L2T_BROADCAST_DW_9_Z(x) (((x) & SDMA_PKT_COPY_L2T_BROADCAST_DW_9_z_mask) << SDMA_PKT_COPY_L2T_BROADCAST_DW_9_z_shift) + +/*define for DW_10 word*/ +/*define for dst2_sw field*/ +#define SDMA_PKT_COPY_L2T_BROADCAST_DW_10_dst2_sw_offset 10 +#define SDMA_PKT_COPY_L2T_BROADCAST_DW_10_dst2_sw_mask 0x00000003 +#define SDMA_PKT_COPY_L2T_BROADCAST_DW_10_dst2_sw_shift 8 +#define SDMA_PKT_COPY_L2T_BROADCAST_DW_10_DST2_SW(x) (((x) & SDMA_PKT_COPY_L2T_BROADCAST_DW_10_dst2_sw_mask) << SDMA_PKT_COPY_L2T_BROADCAST_DW_10_dst2_sw_shift) + +/*define for dst2_ha field*/ +#define SDMA_PKT_COPY_L2T_BROADCAST_DW_10_dst2_ha_offset 10 +#define SDMA_PKT_COPY_L2T_BROADCAST_DW_10_dst2_ha_mask 0x00000001 +#define SDMA_PKT_COPY_L2T_BROADCAST_DW_10_dst2_ha_shift 14 +#define SDMA_PKT_COPY_L2T_BROADCAST_DW_10_DST2_HA(x) (((x) & SDMA_PKT_COPY_L2T_BROADCAST_DW_10_dst2_ha_mask) << SDMA_PKT_COPY_L2T_BROADCAST_DW_10_dst2_ha_shift) + +/*define for linear_sw field*/ +#define SDMA_PKT_COPY_L2T_BROADCAST_DW_10_linear_sw_offset 10 +#define SDMA_PKT_COPY_L2T_BROADCAST_DW_10_linear_sw_mask 0x00000003 +#define SDMA_PKT_COPY_L2T_BROADCAST_DW_10_linear_sw_shift 16 +#define SDMA_PKT_COPY_L2T_BROADCAST_DW_10_LINEAR_SW(x) (((x) & SDMA_PKT_COPY_L2T_BROADCAST_DW_10_linear_sw_mask) << SDMA_PKT_COPY_L2T_BROADCAST_DW_10_linear_sw_shift) + +/*define for tile_sw field*/ +#define SDMA_PKT_COPY_L2T_BROADCAST_DW_10_tile_sw_offset 10 +#define SDMA_PKT_COPY_L2T_BROADCAST_DW_10_tile_sw_mask 0x00000003 +#define SDMA_PKT_COPY_L2T_BROADCAST_DW_10_tile_sw_shift 24 +#define SDMA_PKT_COPY_L2T_BROADCAST_DW_10_TILE_SW(x) (((x) & SDMA_PKT_COPY_L2T_BROADCAST_DW_10_tile_sw_mask) << SDMA_PKT_COPY_L2T_BROADCAST_DW_10_tile_sw_shift) + +/*define for LINEAR_ADDR_LO word*/ +/*define for linear_addr_31_0 field*/ +#define SDMA_PKT_COPY_L2T_BROADCAST_LINEAR_ADDR_LO_linear_addr_31_0_offset 11 +#define SDMA_PKT_COPY_L2T_BROADCAST_LINEAR_ADDR_LO_linear_addr_31_0_mask 0xFFFFFFFF +#define SDMA_PKT_COPY_L2T_BROADCAST_LINEAR_ADDR_LO_linear_addr_31_0_shift 0 +#define SDMA_PKT_COPY_L2T_BROADCAST_LINEAR_ADDR_LO_LINEAR_ADDR_31_0(x) (((x) & SDMA_PKT_COPY_L2T_BROADCAST_LINEAR_ADDR_LO_linear_addr_31_0_mask) << SDMA_PKT_COPY_L2T_BROADCAST_LINEAR_ADDR_LO_linear_addr_31_0_shift) + +/*define for LINEAR_ADDR_HI word*/ +/*define for linear_addr_63_32 field*/ +#define SDMA_PKT_COPY_L2T_BROADCAST_LINEAR_ADDR_HI_linear_addr_63_32_offset 12 +#define SDMA_PKT_COPY_L2T_BROADCAST_LINEAR_ADDR_HI_linear_addr_63_32_mask 0xFFFFFFFF +#define SDMA_PKT_COPY_L2T_BROADCAST_LINEAR_ADDR_HI_linear_addr_63_32_shift 0 +#define SDMA_PKT_COPY_L2T_BROADCAST_LINEAR_ADDR_HI_LINEAR_ADDR_63_32(x) (((x) & SDMA_PKT_COPY_L2T_BROADCAST_LINEAR_ADDR_HI_linear_addr_63_32_mask) << SDMA_PKT_COPY_L2T_BROADCAST_LINEAR_ADDR_HI_linear_addr_63_32_shift) + +/*define for LINEAR_PITCH word*/ +/*define for linear_pitch field*/ +#define SDMA_PKT_COPY_L2T_BROADCAST_LINEAR_PITCH_linear_pitch_offset 13 +#define SDMA_PKT_COPY_L2T_BROADCAST_LINEAR_PITCH_linear_pitch_mask 0x0007FFFF +#define SDMA_PKT_COPY_L2T_BROADCAST_LINEAR_PITCH_linear_pitch_shift 0 +#define SDMA_PKT_COPY_L2T_BROADCAST_LINEAR_PITCH_LINEAR_PITCH(x) (((x) & SDMA_PKT_COPY_L2T_BROADCAST_LINEAR_PITCH_linear_pitch_mask) << SDMA_PKT_COPY_L2T_BROADCAST_LINEAR_PITCH_linear_pitch_shift) + +/*define for COUNT word*/ +/*define for count field*/ +#define SDMA_PKT_COPY_L2T_BROADCAST_COUNT_count_offset 14 +#define SDMA_PKT_COPY_L2T_BROADCAST_COUNT_count_mask 0x000FFFFF +#define SDMA_PKT_COPY_L2T_BROADCAST_COUNT_count_shift 0 +#define SDMA_PKT_COPY_L2T_BROADCAST_COUNT_COUNT(x) (((x) & SDMA_PKT_COPY_L2T_BROADCAST_COUNT_count_mask) << SDMA_PKT_COPY_L2T_BROADCAST_COUNT_count_shift) + + +/* +** Definitions for SDMA_PKT_COPY_T2T packet +*/ + +/*define for HEADER word*/ +/*define for op field*/ +#define SDMA_PKT_COPY_T2T_HEADER_op_offset 0 +#define SDMA_PKT_COPY_T2T_HEADER_op_mask 0x000000FF +#define SDMA_PKT_COPY_T2T_HEADER_op_shift 0 +#define SDMA_PKT_COPY_T2T_HEADER_OP(x) (((x) & SDMA_PKT_COPY_T2T_HEADER_op_mask) << SDMA_PKT_COPY_T2T_HEADER_op_shift) + +/*define for sub_op field*/ +#define SDMA_PKT_COPY_T2T_HEADER_sub_op_offset 0 +#define SDMA_PKT_COPY_T2T_HEADER_sub_op_mask 0x000000FF +#define SDMA_PKT_COPY_T2T_HEADER_sub_op_shift 8 +#define SDMA_PKT_COPY_T2T_HEADER_SUB_OP(x) (((x) & SDMA_PKT_COPY_T2T_HEADER_sub_op_mask) << SDMA_PKT_COPY_T2T_HEADER_sub_op_shift) + +/*define for SRC_ADDR_LO word*/ +/*define for src_addr_31_0 field*/ +#define SDMA_PKT_COPY_T2T_SRC_ADDR_LO_src_addr_31_0_offset 1 +#define SDMA_PKT_COPY_T2T_SRC_ADDR_LO_src_addr_31_0_mask 0xFFFFFFFF +#define SDMA_PKT_COPY_T2T_SRC_ADDR_LO_src_addr_31_0_shift 0 +#define SDMA_PKT_COPY_T2T_SRC_ADDR_LO_SRC_ADDR_31_0(x) (((x) & SDMA_PKT_COPY_T2T_SRC_ADDR_LO_src_addr_31_0_mask) << SDMA_PKT_COPY_T2T_SRC_ADDR_LO_src_addr_31_0_shift) + +/*define for SRC_ADDR_HI word*/ +/*define for src_addr_63_32 field*/ +#define SDMA_PKT_COPY_T2T_SRC_ADDR_HI_src_addr_63_32_offset 2 +#define SDMA_PKT_COPY_T2T_SRC_ADDR_HI_src_addr_63_32_mask 0xFFFFFFFF +#define SDMA_PKT_COPY_T2T_SRC_ADDR_HI_src_addr_63_32_shift 0 +#define SDMA_PKT_COPY_T2T_SRC_ADDR_HI_SRC_ADDR_63_32(x) (((x) & SDMA_PKT_COPY_T2T_SRC_ADDR_HI_src_addr_63_32_mask) << SDMA_PKT_COPY_T2T_SRC_ADDR_HI_src_addr_63_32_shift) + +/*define for DW_3 word*/ +/*define for src_x field*/ +#define SDMA_PKT_COPY_T2T_DW_3_src_x_offset 3 +#define SDMA_PKT_COPY_T2T_DW_3_src_x_mask 0x00003FFF +#define SDMA_PKT_COPY_T2T_DW_3_src_x_shift 0 +#define SDMA_PKT_COPY_T2T_DW_3_SRC_X(x) (((x) & SDMA_PKT_COPY_T2T_DW_3_src_x_mask) << SDMA_PKT_COPY_T2T_DW_3_src_x_shift) + +/*define for src_y field*/ +#define SDMA_PKT_COPY_T2T_DW_3_src_y_offset 3 +#define SDMA_PKT_COPY_T2T_DW_3_src_y_mask 0x00003FFF +#define SDMA_PKT_COPY_T2T_DW_3_src_y_shift 16 +#define SDMA_PKT_COPY_T2T_DW_3_SRC_Y(x) (((x) & SDMA_PKT_COPY_T2T_DW_3_src_y_mask) << SDMA_PKT_COPY_T2T_DW_3_src_y_shift) + +/*define for DW_4 word*/ +/*define for src_z field*/ +#define SDMA_PKT_COPY_T2T_DW_4_src_z_offset 4 +#define SDMA_PKT_COPY_T2T_DW_4_src_z_mask 0x000007FF +#define SDMA_PKT_COPY_T2T_DW_4_src_z_shift 0 +#define SDMA_PKT_COPY_T2T_DW_4_SRC_Z(x) (((x) & SDMA_PKT_COPY_T2T_DW_4_src_z_mask) << SDMA_PKT_COPY_T2T_DW_4_src_z_shift) + +/*define for src_pitch_in_tile field*/ +#define SDMA_PKT_COPY_T2T_DW_4_src_pitch_in_tile_offset 4 +#define SDMA_PKT_COPY_T2T_DW_4_src_pitch_in_tile_mask 0x00000FFF +#define SDMA_PKT_COPY_T2T_DW_4_src_pitch_in_tile_shift 16 +#define SDMA_PKT_COPY_T2T_DW_4_SRC_PITCH_IN_TILE(x) (((x) & SDMA_PKT_COPY_T2T_DW_4_src_pitch_in_tile_mask) << SDMA_PKT_COPY_T2T_DW_4_src_pitch_in_tile_shift) + +/*define for DW_5 word*/ +/*define for src_slice_pitch field*/ +#define SDMA_PKT_COPY_T2T_DW_5_src_slice_pitch_offset 5 +#define SDMA_PKT_COPY_T2T_DW_5_src_slice_pitch_mask 0x003FFFFF +#define SDMA_PKT_COPY_T2T_DW_5_src_slice_pitch_shift 0 +#define SDMA_PKT_COPY_T2T_DW_5_SRC_SLICE_PITCH(x) (((x) & SDMA_PKT_COPY_T2T_DW_5_src_slice_pitch_mask) << SDMA_PKT_COPY_T2T_DW_5_src_slice_pitch_shift) + +/*define for DW_6 word*/ +/*define for src_element_size field*/ +#define SDMA_PKT_COPY_T2T_DW_6_src_element_size_offset 6 +#define SDMA_PKT_COPY_T2T_DW_6_src_element_size_mask 0x00000007 +#define SDMA_PKT_COPY_T2T_DW_6_src_element_size_shift 0 +#define SDMA_PKT_COPY_T2T_DW_6_SRC_ELEMENT_SIZE(x) (((x) & SDMA_PKT_COPY_T2T_DW_6_src_element_size_mask) << SDMA_PKT_COPY_T2T_DW_6_src_element_size_shift) + +/*define for src_array_mode field*/ +#define SDMA_PKT_COPY_T2T_DW_6_src_array_mode_offset 6 +#define SDMA_PKT_COPY_T2T_DW_6_src_array_mode_mask 0x0000000F +#define SDMA_PKT_COPY_T2T_DW_6_src_array_mode_shift 3 +#define SDMA_PKT_COPY_T2T_DW_6_SRC_ARRAY_MODE(x) (((x) & SDMA_PKT_COPY_T2T_DW_6_src_array_mode_mask) << SDMA_PKT_COPY_T2T_DW_6_src_array_mode_shift) + +/*define for src_mit_mode field*/ +#define SDMA_PKT_COPY_T2T_DW_6_src_mit_mode_offset 6 +#define SDMA_PKT_COPY_T2T_DW_6_src_mit_mode_mask 0x00000007 +#define SDMA_PKT_COPY_T2T_DW_6_src_mit_mode_shift 8 +#define SDMA_PKT_COPY_T2T_DW_6_SRC_MIT_MODE(x) (((x) & SDMA_PKT_COPY_T2T_DW_6_src_mit_mode_mask) << SDMA_PKT_COPY_T2T_DW_6_src_mit_mode_shift) + +/*define for src_tilesplit_size field*/ +#define SDMA_PKT_COPY_T2T_DW_6_src_tilesplit_size_offset 6 +#define SDMA_PKT_COPY_T2T_DW_6_src_tilesplit_size_mask 0x00000007 +#define SDMA_PKT_COPY_T2T_DW_6_src_tilesplit_size_shift 11 +#define SDMA_PKT_COPY_T2T_DW_6_SRC_TILESPLIT_SIZE(x) (((x) & SDMA_PKT_COPY_T2T_DW_6_src_tilesplit_size_mask) << SDMA_PKT_COPY_T2T_DW_6_src_tilesplit_size_shift) + +/*define for src_bank_w field*/ +#define SDMA_PKT_COPY_T2T_DW_6_src_bank_w_offset 6 +#define SDMA_PKT_COPY_T2T_DW_6_src_bank_w_mask 0x00000003 +#define SDMA_PKT_COPY_T2T_DW_6_src_bank_w_shift 15 +#define SDMA_PKT_COPY_T2T_DW_6_SRC_BANK_W(x) (((x) & SDMA_PKT_COPY_T2T_DW_6_src_bank_w_mask) << SDMA_PKT_COPY_T2T_DW_6_src_bank_w_shift) + +/*define for src_bank_h field*/ +#define SDMA_PKT_COPY_T2T_DW_6_src_bank_h_offset 6 +#define SDMA_PKT_COPY_T2T_DW_6_src_bank_h_mask 0x00000003 +#define SDMA_PKT_COPY_T2T_DW_6_src_bank_h_shift 18 +#define SDMA_PKT_COPY_T2T_DW_6_SRC_BANK_H(x) (((x) & SDMA_PKT_COPY_T2T_DW_6_src_bank_h_mask) << SDMA_PKT_COPY_T2T_DW_6_src_bank_h_shift) + +/*define for src_num_bank field*/ +#define SDMA_PKT_COPY_T2T_DW_6_src_num_bank_offset 6 +#define SDMA_PKT_COPY_T2T_DW_6_src_num_bank_mask 0x00000003 +#define SDMA_PKT_COPY_T2T_DW_6_src_num_bank_shift 21 +#define SDMA_PKT_COPY_T2T_DW_6_SRC_NUM_BANK(x) (((x) & SDMA_PKT_COPY_T2T_DW_6_src_num_bank_mask) << SDMA_PKT_COPY_T2T_DW_6_src_num_bank_shift) + +/*define for src_mat_aspt field*/ +#define SDMA_PKT_COPY_T2T_DW_6_src_mat_aspt_offset 6 +#define SDMA_PKT_COPY_T2T_DW_6_src_mat_aspt_mask 0x00000003 +#define SDMA_PKT_COPY_T2T_DW_6_src_mat_aspt_shift 24 +#define SDMA_PKT_COPY_T2T_DW_6_SRC_MAT_ASPT(x) (((x) & SDMA_PKT_COPY_T2T_DW_6_src_mat_aspt_mask) << SDMA_PKT_COPY_T2T_DW_6_src_mat_aspt_shift) + +/*define for src_pipe_config field*/ +#define SDMA_PKT_COPY_T2T_DW_6_src_pipe_config_offset 6 +#define SDMA_PKT_COPY_T2T_DW_6_src_pipe_config_mask 0x0000001F +#define SDMA_PKT_COPY_T2T_DW_6_src_pipe_config_shift 26 +#define SDMA_PKT_COPY_T2T_DW_6_SRC_PIPE_CONFIG(x) (((x) & SDMA_PKT_COPY_T2T_DW_6_src_pipe_config_mask) << SDMA_PKT_COPY_T2T_DW_6_src_pipe_config_shift) + +/*define for DST_ADDR_LO word*/ +/*define for dst_addr_31_0 field*/ +#define SDMA_PKT_COPY_T2T_DST_ADDR_LO_dst_addr_31_0_offset 7 +#define SDMA_PKT_COPY_T2T_DST_ADDR_LO_dst_addr_31_0_mask 0xFFFFFFFF +#define SDMA_PKT_COPY_T2T_DST_ADDR_LO_dst_addr_31_0_shift 0 +#define SDMA_PKT_COPY_T2T_DST_ADDR_LO_DST_ADDR_31_0(x) (((x) & SDMA_PKT_COPY_T2T_DST_ADDR_LO_dst_addr_31_0_mask) << SDMA_PKT_COPY_T2T_DST_ADDR_LO_dst_addr_31_0_shift) + +/*define for DST_ADDR_HI word*/ +/*define for dst_addr_63_32 field*/ +#define SDMA_PKT_COPY_T2T_DST_ADDR_HI_dst_addr_63_32_offset 8 +#define SDMA_PKT_COPY_T2T_DST_ADDR_HI_dst_addr_63_32_mask 0xFFFFFFFF +#define SDMA_PKT_COPY_T2T_DST_ADDR_HI_dst_addr_63_32_shift 0 +#define SDMA_PKT_COPY_T2T_DST_ADDR_HI_DST_ADDR_63_32(x) (((x) & SDMA_PKT_COPY_T2T_DST_ADDR_HI_dst_addr_63_32_mask) << SDMA_PKT_COPY_T2T_DST_ADDR_HI_dst_addr_63_32_shift) + +/*define for DW_9 word*/ +/*define for dst_x field*/ +#define SDMA_PKT_COPY_T2T_DW_9_dst_x_offset 9 +#define SDMA_PKT_COPY_T2T_DW_9_dst_x_mask 0x00003FFF +#define SDMA_PKT_COPY_T2T_DW_9_dst_x_shift 0 +#define SDMA_PKT_COPY_T2T_DW_9_DST_X(x) (((x) & SDMA_PKT_COPY_T2T_DW_9_dst_x_mask) << SDMA_PKT_COPY_T2T_DW_9_dst_x_shift) + +/*define for dst_y field*/ +#define SDMA_PKT_COPY_T2T_DW_9_dst_y_offset 9 +#define SDMA_PKT_COPY_T2T_DW_9_dst_y_mask 0x00003FFF +#define SDMA_PKT_COPY_T2T_DW_9_dst_y_shift 16 +#define SDMA_PKT_COPY_T2T_DW_9_DST_Y(x) (((x) & SDMA_PKT_COPY_T2T_DW_9_dst_y_mask) << SDMA_PKT_COPY_T2T_DW_9_dst_y_shift) + +/*define for DW_10 word*/ +/*define for dst_z field*/ +#define SDMA_PKT_COPY_T2T_DW_10_dst_z_offset 10 +#define SDMA_PKT_COPY_T2T_DW_10_dst_z_mask 0x000007FF +#define SDMA_PKT_COPY_T2T_DW_10_dst_z_shift 0 +#define SDMA_PKT_COPY_T2T_DW_10_DST_Z(x) (((x) & SDMA_PKT_COPY_T2T_DW_10_dst_z_mask) << SDMA_PKT_COPY_T2T_DW_10_dst_z_shift) + +/*define for dst_pitch_in_tile field*/ +#define SDMA_PKT_COPY_T2T_DW_10_dst_pitch_in_tile_offset 10 +#define SDMA_PKT_COPY_T2T_DW_10_dst_pitch_in_tile_mask 0x00000FFF +#define SDMA_PKT_COPY_T2T_DW_10_dst_pitch_in_tile_shift 16 +#define SDMA_PKT_COPY_T2T_DW_10_DST_PITCH_IN_TILE(x) (((x) & SDMA_PKT_COPY_T2T_DW_10_dst_pitch_in_tile_mask) << SDMA_PKT_COPY_T2T_DW_10_dst_pitch_in_tile_shift) + +/*define for DW_11 word*/ +/*define for dst_slice_pitch field*/ +#define SDMA_PKT_COPY_T2T_DW_11_dst_slice_pitch_offset 11 +#define SDMA_PKT_COPY_T2T_DW_11_dst_slice_pitch_mask 0x003FFFFF +#define SDMA_PKT_COPY_T2T_DW_11_dst_slice_pitch_shift 0 +#define SDMA_PKT_COPY_T2T_DW_11_DST_SLICE_PITCH(x) (((x) & SDMA_PKT_COPY_T2T_DW_11_dst_slice_pitch_mask) << SDMA_PKT_COPY_T2T_DW_11_dst_slice_pitch_shift) + +/*define for DW_12 word*/ +/*define for dst_array_mode field*/ +#define SDMA_PKT_COPY_T2T_DW_12_dst_array_mode_offset 12 +#define SDMA_PKT_COPY_T2T_DW_12_dst_array_mode_mask 0x0000000F +#define SDMA_PKT_COPY_T2T_DW_12_dst_array_mode_shift 3 +#define SDMA_PKT_COPY_T2T_DW_12_DST_ARRAY_MODE(x) (((x) & SDMA_PKT_COPY_T2T_DW_12_dst_array_mode_mask) << SDMA_PKT_COPY_T2T_DW_12_dst_array_mode_shift) + +/*define for dst_mit_mode field*/ +#define SDMA_PKT_COPY_T2T_DW_12_dst_mit_mode_offset 12 +#define SDMA_PKT_COPY_T2T_DW_12_dst_mit_mode_mask 0x00000007 +#define SDMA_PKT_COPY_T2T_DW_12_dst_mit_mode_shift 8 +#define SDMA_PKT_COPY_T2T_DW_12_DST_MIT_MODE(x) (((x) & SDMA_PKT_COPY_T2T_DW_12_dst_mit_mode_mask) << SDMA_PKT_COPY_T2T_DW_12_dst_mit_mode_shift) + +/*define for dst_tilesplit_size field*/ +#define SDMA_PKT_COPY_T2T_DW_12_dst_tilesplit_size_offset 12 +#define SDMA_PKT_COPY_T2T_DW_12_dst_tilesplit_size_mask 0x00000007 +#define SDMA_PKT_COPY_T2T_DW_12_dst_tilesplit_size_shift 11 +#define SDMA_PKT_COPY_T2T_DW_12_DST_TILESPLIT_SIZE(x) (((x) & SDMA_PKT_COPY_T2T_DW_12_dst_tilesplit_size_mask) << SDMA_PKT_COPY_T2T_DW_12_dst_tilesplit_size_shift) + +/*define for dst_bank_w field*/ +#define SDMA_PKT_COPY_T2T_DW_12_dst_bank_w_offset 12 +#define SDMA_PKT_COPY_T2T_DW_12_dst_bank_w_mask 0x00000003 +#define SDMA_PKT_COPY_T2T_DW_12_dst_bank_w_shift 15 +#define SDMA_PKT_COPY_T2T_DW_12_DST_BANK_W(x) (((x) & SDMA_PKT_COPY_T2T_DW_12_dst_bank_w_mask) << SDMA_PKT_COPY_T2T_DW_12_dst_bank_w_shift) + +/*define for dst_bank_h field*/ +#define SDMA_PKT_COPY_T2T_DW_12_dst_bank_h_offset 12 +#define SDMA_PKT_COPY_T2T_DW_12_dst_bank_h_mask 0x00000003 +#define SDMA_PKT_COPY_T2T_DW_12_dst_bank_h_shift 18 +#define SDMA_PKT_COPY_T2T_DW_12_DST_BANK_H(x) (((x) & SDMA_PKT_COPY_T2T_DW_12_dst_bank_h_mask) << SDMA_PKT_COPY_T2T_DW_12_dst_bank_h_shift) + +/*define for dst_num_bank field*/ +#define SDMA_PKT_COPY_T2T_DW_12_dst_num_bank_offset 12 +#define SDMA_PKT_COPY_T2T_DW_12_dst_num_bank_mask 0x00000003 +#define SDMA_PKT_COPY_T2T_DW_12_dst_num_bank_shift 21 +#define SDMA_PKT_COPY_T2T_DW_12_DST_NUM_BANK(x) (((x) & SDMA_PKT_COPY_T2T_DW_12_dst_num_bank_mask) << SDMA_PKT_COPY_T2T_DW_12_dst_num_bank_shift) + +/*define for dst_mat_aspt field*/ +#define SDMA_PKT_COPY_T2T_DW_12_dst_mat_aspt_offset 12 +#define SDMA_PKT_COPY_T2T_DW_12_dst_mat_aspt_mask 0x00000003 +#define SDMA_PKT_COPY_T2T_DW_12_dst_mat_aspt_shift 24 +#define SDMA_PKT_COPY_T2T_DW_12_DST_MAT_ASPT(x) (((x) & SDMA_PKT_COPY_T2T_DW_12_dst_mat_aspt_mask) << SDMA_PKT_COPY_T2T_DW_12_dst_mat_aspt_shift) + +/*define for dst_pipe_config field*/ +#define SDMA_PKT_COPY_T2T_DW_12_dst_pipe_config_offset 12 +#define SDMA_PKT_COPY_T2T_DW_12_dst_pipe_config_mask 0x0000001F +#define SDMA_PKT_COPY_T2T_DW_12_dst_pipe_config_shift 26 +#define SDMA_PKT_COPY_T2T_DW_12_DST_PIPE_CONFIG(x) (((x) & SDMA_PKT_COPY_T2T_DW_12_dst_pipe_config_mask) << SDMA_PKT_COPY_T2T_DW_12_dst_pipe_config_shift) + +/*define for DW_13 word*/ +/*define for rect_x field*/ +#define SDMA_PKT_COPY_T2T_DW_13_rect_x_offset 13 +#define SDMA_PKT_COPY_T2T_DW_13_rect_x_mask 0x00003FFF +#define SDMA_PKT_COPY_T2T_DW_13_rect_x_shift 0 +#define SDMA_PKT_COPY_T2T_DW_13_RECT_X(x) (((x) & SDMA_PKT_COPY_T2T_DW_13_rect_x_mask) << SDMA_PKT_COPY_T2T_DW_13_rect_x_shift) + +/*define for rect_y field*/ +#define SDMA_PKT_COPY_T2T_DW_13_rect_y_offset 13 +#define SDMA_PKT_COPY_T2T_DW_13_rect_y_mask 0x00003FFF +#define SDMA_PKT_COPY_T2T_DW_13_rect_y_shift 16 +#define SDMA_PKT_COPY_T2T_DW_13_RECT_Y(x) (((x) & SDMA_PKT_COPY_T2T_DW_13_rect_y_mask) << SDMA_PKT_COPY_T2T_DW_13_rect_y_shift) + +/*define for DW_14 word*/ +/*define for rect_z field*/ +#define SDMA_PKT_COPY_T2T_DW_14_rect_z_offset 14 +#define SDMA_PKT_COPY_T2T_DW_14_rect_z_mask 0x000007FF +#define SDMA_PKT_COPY_T2T_DW_14_rect_z_shift 0 +#define SDMA_PKT_COPY_T2T_DW_14_RECT_Z(x) (((x) & SDMA_PKT_COPY_T2T_DW_14_rect_z_mask) << SDMA_PKT_COPY_T2T_DW_14_rect_z_shift) + +/*define for dst_sw field*/ +#define SDMA_PKT_COPY_T2T_DW_14_dst_sw_offset 14 +#define SDMA_PKT_COPY_T2T_DW_14_dst_sw_mask 0x00000003 +#define SDMA_PKT_COPY_T2T_DW_14_dst_sw_shift 16 +#define SDMA_PKT_COPY_T2T_DW_14_DST_SW(x) (((x) & SDMA_PKT_COPY_T2T_DW_14_dst_sw_mask) << SDMA_PKT_COPY_T2T_DW_14_dst_sw_shift) + +/*define for src_sw field*/ +#define SDMA_PKT_COPY_T2T_DW_14_src_sw_offset 14 +#define SDMA_PKT_COPY_T2T_DW_14_src_sw_mask 0x00000003 +#define SDMA_PKT_COPY_T2T_DW_14_src_sw_shift 24 +#define SDMA_PKT_COPY_T2T_DW_14_SRC_SW(x) (((x) & SDMA_PKT_COPY_T2T_DW_14_src_sw_mask) << SDMA_PKT_COPY_T2T_DW_14_src_sw_shift) + + +/* +** Definitions for SDMA_PKT_COPY_TILED_SUBWIN packet +*/ + +/*define for HEADER word*/ +/*define for op field*/ +#define SDMA_PKT_COPY_TILED_SUBWIN_HEADER_op_offset 0 +#define SDMA_PKT_COPY_TILED_SUBWIN_HEADER_op_mask 0x000000FF +#define SDMA_PKT_COPY_TILED_SUBWIN_HEADER_op_shift 0 +#define SDMA_PKT_COPY_TILED_SUBWIN_HEADER_OP(x) (((x) & SDMA_PKT_COPY_TILED_SUBWIN_HEADER_op_mask) << SDMA_PKT_COPY_TILED_SUBWIN_HEADER_op_shift) + +/*define for sub_op field*/ +#define SDMA_PKT_COPY_TILED_SUBWIN_HEADER_sub_op_offset 0 +#define SDMA_PKT_COPY_TILED_SUBWIN_HEADER_sub_op_mask 0x000000FF +#define SDMA_PKT_COPY_TILED_SUBWIN_HEADER_sub_op_shift 8 +#define SDMA_PKT_COPY_TILED_SUBWIN_HEADER_SUB_OP(x) (((x) & SDMA_PKT_COPY_TILED_SUBWIN_HEADER_sub_op_mask) << SDMA_PKT_COPY_TILED_SUBWIN_HEADER_sub_op_shift) + +/*define for detile field*/ +#define SDMA_PKT_COPY_TILED_SUBWIN_HEADER_detile_offset 0 +#define SDMA_PKT_COPY_TILED_SUBWIN_HEADER_detile_mask 0x00000001 +#define SDMA_PKT_COPY_TILED_SUBWIN_HEADER_detile_shift 31 +#define SDMA_PKT_COPY_TILED_SUBWIN_HEADER_DETILE(x) (((x) & SDMA_PKT_COPY_TILED_SUBWIN_HEADER_detile_mask) << SDMA_PKT_COPY_TILED_SUBWIN_HEADER_detile_shift) + +/*define for TILED_ADDR_LO word*/ +/*define for tiled_addr_31_0 field*/ +#define SDMA_PKT_COPY_TILED_SUBWIN_TILED_ADDR_LO_tiled_addr_31_0_offset 1 +#define SDMA_PKT_COPY_TILED_SUBWIN_TILED_ADDR_LO_tiled_addr_31_0_mask 0xFFFFFFFF +#define SDMA_PKT_COPY_TILED_SUBWIN_TILED_ADDR_LO_tiled_addr_31_0_shift 0 +#define SDMA_PKT_COPY_TILED_SUBWIN_TILED_ADDR_LO_TILED_ADDR_31_0(x) (((x) & SDMA_PKT_COPY_TILED_SUBWIN_TILED_ADDR_LO_tiled_addr_31_0_mask) << SDMA_PKT_COPY_TILED_SUBWIN_TILED_ADDR_LO_tiled_addr_31_0_shift) + +/*define for TILED_ADDR_HI word*/ +/*define for tiled_addr_63_32 field*/ +#define SDMA_PKT_COPY_TILED_SUBWIN_TILED_ADDR_HI_tiled_addr_63_32_offset 2 +#define SDMA_PKT_COPY_TILED_SUBWIN_TILED_ADDR_HI_tiled_addr_63_32_mask 0xFFFFFFFF +#define SDMA_PKT_COPY_TILED_SUBWIN_TILED_ADDR_HI_tiled_addr_63_32_shift 0 +#define SDMA_PKT_COPY_TILED_SUBWIN_TILED_ADDR_HI_TILED_ADDR_63_32(x) (((x) & SDMA_PKT_COPY_TILED_SUBWIN_TILED_ADDR_HI_tiled_addr_63_32_mask) << SDMA_PKT_COPY_TILED_SUBWIN_TILED_ADDR_HI_tiled_addr_63_32_shift) + +/*define for DW_3 word*/ +/*define for tiled_x field*/ +#define SDMA_PKT_COPY_TILED_SUBWIN_DW_3_tiled_x_offset 3 +#define SDMA_PKT_COPY_TILED_SUBWIN_DW_3_tiled_x_mask 0x00003FFF +#define SDMA_PKT_COPY_TILED_SUBWIN_DW_3_tiled_x_shift 0 +#define SDMA_PKT_COPY_TILED_SUBWIN_DW_3_TILED_X(x) (((x) & SDMA_PKT_COPY_TILED_SUBWIN_DW_3_tiled_x_mask) << SDMA_PKT_COPY_TILED_SUBWIN_DW_3_tiled_x_shift) + +/*define for tiled_y field*/ +#define SDMA_PKT_COPY_TILED_SUBWIN_DW_3_tiled_y_offset 3 +#define SDMA_PKT_COPY_TILED_SUBWIN_DW_3_tiled_y_mask 0x00003FFF +#define SDMA_PKT_COPY_TILED_SUBWIN_DW_3_tiled_y_shift 16 +#define SDMA_PKT_COPY_TILED_SUBWIN_DW_3_TILED_Y(x) (((x) & SDMA_PKT_COPY_TILED_SUBWIN_DW_3_tiled_y_mask) << SDMA_PKT_COPY_TILED_SUBWIN_DW_3_tiled_y_shift) + +/*define for DW_4 word*/ +/*define for tiled_z field*/ +#define SDMA_PKT_COPY_TILED_SUBWIN_DW_4_tiled_z_offset 4 +#define SDMA_PKT_COPY_TILED_SUBWIN_DW_4_tiled_z_mask 0x000007FF +#define SDMA_PKT_COPY_TILED_SUBWIN_DW_4_tiled_z_shift 0 +#define SDMA_PKT_COPY_TILED_SUBWIN_DW_4_TILED_Z(x) (((x) & SDMA_PKT_COPY_TILED_SUBWIN_DW_4_tiled_z_mask) << SDMA_PKT_COPY_TILED_SUBWIN_DW_4_tiled_z_shift) + +/*define for pitch_in_tile field*/ +#define SDMA_PKT_COPY_TILED_SUBWIN_DW_4_pitch_in_tile_offset 4 +#define SDMA_PKT_COPY_TILED_SUBWIN_DW_4_pitch_in_tile_mask 0x00000FFF +#define SDMA_PKT_COPY_TILED_SUBWIN_DW_4_pitch_in_tile_shift 16 +#define SDMA_PKT_COPY_TILED_SUBWIN_DW_4_PITCH_IN_TILE(x) (((x) & SDMA_PKT_COPY_TILED_SUBWIN_DW_4_pitch_in_tile_mask) << SDMA_PKT_COPY_TILED_SUBWIN_DW_4_pitch_in_tile_shift) + +/*define for DW_5 word*/ +/*define for slice_pitch field*/ +#define SDMA_PKT_COPY_TILED_SUBWIN_DW_5_slice_pitch_offset 5 +#define SDMA_PKT_COPY_TILED_SUBWIN_DW_5_slice_pitch_mask 0x003FFFFF +#define SDMA_PKT_COPY_TILED_SUBWIN_DW_5_slice_pitch_shift 0 +#define SDMA_PKT_COPY_TILED_SUBWIN_DW_5_SLICE_PITCH(x) (((x) & SDMA_PKT_COPY_TILED_SUBWIN_DW_5_slice_pitch_mask) << SDMA_PKT_COPY_TILED_SUBWIN_DW_5_slice_pitch_shift) + +/*define for DW_6 word*/ +/*define for element_size field*/ +#define SDMA_PKT_COPY_TILED_SUBWIN_DW_6_element_size_offset 6 +#define SDMA_PKT_COPY_TILED_SUBWIN_DW_6_element_size_mask 0x00000007 +#define SDMA_PKT_COPY_TILED_SUBWIN_DW_6_element_size_shift 0 +#define SDMA_PKT_COPY_TILED_SUBWIN_DW_6_ELEMENT_SIZE(x) (((x) & SDMA_PKT_COPY_TILED_SUBWIN_DW_6_element_size_mask) << SDMA_PKT_COPY_TILED_SUBWIN_DW_6_element_size_shift) + +/*define for array_mode field*/ +#define SDMA_PKT_COPY_TILED_SUBWIN_DW_6_array_mode_offset 6 +#define SDMA_PKT_COPY_TILED_SUBWIN_DW_6_array_mode_mask 0x0000000F +#define SDMA_PKT_COPY_TILED_SUBWIN_DW_6_array_mode_shift 3 +#define SDMA_PKT_COPY_TILED_SUBWIN_DW_6_ARRAY_MODE(x) (((x) & SDMA_PKT_COPY_TILED_SUBWIN_DW_6_array_mode_mask) << SDMA_PKT_COPY_TILED_SUBWIN_DW_6_array_mode_shift) + +/*define for mit_mode field*/ +#define SDMA_PKT_COPY_TILED_SUBWIN_DW_6_mit_mode_offset 6 +#define SDMA_PKT_COPY_TILED_SUBWIN_DW_6_mit_mode_mask 0x00000007 +#define SDMA_PKT_COPY_TILED_SUBWIN_DW_6_mit_mode_shift 8 +#define SDMA_PKT_COPY_TILED_SUBWIN_DW_6_MIT_MODE(x) (((x) & SDMA_PKT_COPY_TILED_SUBWIN_DW_6_mit_mode_mask) << SDMA_PKT_COPY_TILED_SUBWIN_DW_6_mit_mode_shift) + +/*define for tilesplit_size field*/ +#define SDMA_PKT_COPY_TILED_SUBWIN_DW_6_tilesplit_size_offset 6 +#define SDMA_PKT_COPY_TILED_SUBWIN_DW_6_tilesplit_size_mask 0x00000007 +#define SDMA_PKT_COPY_TILED_SUBWIN_DW_6_tilesplit_size_shift 11 +#define SDMA_PKT_COPY_TILED_SUBWIN_DW_6_TILESPLIT_SIZE(x) (((x) & SDMA_PKT_COPY_TILED_SUBWIN_DW_6_tilesplit_size_mask) << SDMA_PKT_COPY_TILED_SUBWIN_DW_6_tilesplit_size_shift) + +/*define for bank_w field*/ +#define SDMA_PKT_COPY_TILED_SUBWIN_DW_6_bank_w_offset 6 +#define SDMA_PKT_COPY_TILED_SUBWIN_DW_6_bank_w_mask 0x00000003 +#define SDMA_PKT_COPY_TILED_SUBWIN_DW_6_bank_w_shift 15 +#define SDMA_PKT_COPY_TILED_SUBWIN_DW_6_BANK_W(x) (((x) & SDMA_PKT_COPY_TILED_SUBWIN_DW_6_bank_w_mask) << SDMA_PKT_COPY_TILED_SUBWIN_DW_6_bank_w_shift) + +/*define for bank_h field*/ +#define SDMA_PKT_COPY_TILED_SUBWIN_DW_6_bank_h_offset 6 +#define SDMA_PKT_COPY_TILED_SUBWIN_DW_6_bank_h_mask 0x00000003 +#define SDMA_PKT_COPY_TILED_SUBWIN_DW_6_bank_h_shift 18 +#define SDMA_PKT_COPY_TILED_SUBWIN_DW_6_BANK_H(x) (((x) & SDMA_PKT_COPY_TILED_SUBWIN_DW_6_bank_h_mask) << SDMA_PKT_COPY_TILED_SUBWIN_DW_6_bank_h_shift) + +/*define for num_bank field*/ +#define SDMA_PKT_COPY_TILED_SUBWIN_DW_6_num_bank_offset 6 +#define SDMA_PKT_COPY_TILED_SUBWIN_DW_6_num_bank_mask 0x00000003 +#define SDMA_PKT_COPY_TILED_SUBWIN_DW_6_num_bank_shift 21 +#define SDMA_PKT_COPY_TILED_SUBWIN_DW_6_NUM_BANK(x) (((x) & SDMA_PKT_COPY_TILED_SUBWIN_DW_6_num_bank_mask) << SDMA_PKT_COPY_TILED_SUBWIN_DW_6_num_bank_shift) + +/*define for mat_aspt field*/ +#define SDMA_PKT_COPY_TILED_SUBWIN_DW_6_mat_aspt_offset 6 +#define SDMA_PKT_COPY_TILED_SUBWIN_DW_6_mat_aspt_mask 0x00000003 +#define SDMA_PKT_COPY_TILED_SUBWIN_DW_6_mat_aspt_shift 24 +#define SDMA_PKT_COPY_TILED_SUBWIN_DW_6_MAT_ASPT(x) (((x) & SDMA_PKT_COPY_TILED_SUBWIN_DW_6_mat_aspt_mask) << SDMA_PKT_COPY_TILED_SUBWIN_DW_6_mat_aspt_shift) + +/*define for pipe_config field*/ +#define SDMA_PKT_COPY_TILED_SUBWIN_DW_6_pipe_config_offset 6 +#define SDMA_PKT_COPY_TILED_SUBWIN_DW_6_pipe_config_mask 0x0000001F +#define SDMA_PKT_COPY_TILED_SUBWIN_DW_6_pipe_config_shift 26 +#define SDMA_PKT_COPY_TILED_SUBWIN_DW_6_PIPE_CONFIG(x) (((x) & SDMA_PKT_COPY_TILED_SUBWIN_DW_6_pipe_config_mask) << SDMA_PKT_COPY_TILED_SUBWIN_DW_6_pipe_config_shift) + +/*define for LINEAR_ADDR_LO word*/ +/*define for linear_addr_31_0 field*/ +#define SDMA_PKT_COPY_TILED_SUBWIN_LINEAR_ADDR_LO_linear_addr_31_0_offset 7 +#define SDMA_PKT_COPY_TILED_SUBWIN_LINEAR_ADDR_LO_linear_addr_31_0_mask 0xFFFFFFFF +#define SDMA_PKT_COPY_TILED_SUBWIN_LINEAR_ADDR_LO_linear_addr_31_0_shift 0 +#define SDMA_PKT_COPY_TILED_SUBWIN_LINEAR_ADDR_LO_LINEAR_ADDR_31_0(x) (((x) & SDMA_PKT_COPY_TILED_SUBWIN_LINEAR_ADDR_LO_linear_addr_31_0_mask) << SDMA_PKT_COPY_TILED_SUBWIN_LINEAR_ADDR_LO_linear_addr_31_0_shift) + +/*define for LINEAR_ADDR_HI word*/ +/*define for linear_addr_63_32 field*/ +#define SDMA_PKT_COPY_TILED_SUBWIN_LINEAR_ADDR_HI_linear_addr_63_32_offset 8 +#define SDMA_PKT_COPY_TILED_SUBWIN_LINEAR_ADDR_HI_linear_addr_63_32_mask 0xFFFFFFFF +#define SDMA_PKT_COPY_TILED_SUBWIN_LINEAR_ADDR_HI_linear_addr_63_32_shift 0 +#define SDMA_PKT_COPY_TILED_SUBWIN_LINEAR_ADDR_HI_LINEAR_ADDR_63_32(x) (((x) & SDMA_PKT_COPY_TILED_SUBWIN_LINEAR_ADDR_HI_linear_addr_63_32_mask) << SDMA_PKT_COPY_TILED_SUBWIN_LINEAR_ADDR_HI_linear_addr_63_32_shift) + +/*define for DW_9 word*/ +/*define for linear_x field*/ +#define SDMA_PKT_COPY_TILED_SUBWIN_DW_9_linear_x_offset 9 +#define SDMA_PKT_COPY_TILED_SUBWIN_DW_9_linear_x_mask 0x00003FFF +#define SDMA_PKT_COPY_TILED_SUBWIN_DW_9_linear_x_shift 0 +#define SDMA_PKT_COPY_TILED_SUBWIN_DW_9_LINEAR_X(x) (((x) & SDMA_PKT_COPY_TILED_SUBWIN_DW_9_linear_x_mask) << SDMA_PKT_COPY_TILED_SUBWIN_DW_9_linear_x_shift) + +/*define for linear_y field*/ +#define SDMA_PKT_COPY_TILED_SUBWIN_DW_9_linear_y_offset 9 +#define SDMA_PKT_COPY_TILED_SUBWIN_DW_9_linear_y_mask 0x00003FFF +#define SDMA_PKT_COPY_TILED_SUBWIN_DW_9_linear_y_shift 16 +#define SDMA_PKT_COPY_TILED_SUBWIN_DW_9_LINEAR_Y(x) (((x) & SDMA_PKT_COPY_TILED_SUBWIN_DW_9_linear_y_mask) << SDMA_PKT_COPY_TILED_SUBWIN_DW_9_linear_y_shift) + +/*define for DW_10 word*/ +/*define for linear_z field*/ +#define SDMA_PKT_COPY_TILED_SUBWIN_DW_10_linear_z_offset 10 +#define SDMA_PKT_COPY_TILED_SUBWIN_DW_10_linear_z_mask 0x000007FF +#define SDMA_PKT_COPY_TILED_SUBWIN_DW_10_linear_z_shift 0 +#define SDMA_PKT_COPY_TILED_SUBWIN_DW_10_LINEAR_Z(x) (((x) & SDMA_PKT_COPY_TILED_SUBWIN_DW_10_linear_z_mask) << SDMA_PKT_COPY_TILED_SUBWIN_DW_10_linear_z_shift) + +/*define for linear_pitch field*/ +#define SDMA_PKT_COPY_TILED_SUBWIN_DW_10_linear_pitch_offset 10 +#define SDMA_PKT_COPY_TILED_SUBWIN_DW_10_linear_pitch_mask 0x00003FFF +#define SDMA_PKT_COPY_TILED_SUBWIN_DW_10_linear_pitch_shift 16 +#define SDMA_PKT_COPY_TILED_SUBWIN_DW_10_LINEAR_PITCH(x) (((x) & SDMA_PKT_COPY_TILED_SUBWIN_DW_10_linear_pitch_mask) << SDMA_PKT_COPY_TILED_SUBWIN_DW_10_linear_pitch_shift) + +/*define for DW_11 word*/ +/*define for linear_slice_pitch field*/ +#define SDMA_PKT_COPY_TILED_SUBWIN_DW_11_linear_slice_pitch_offset 11 +#define SDMA_PKT_COPY_TILED_SUBWIN_DW_11_linear_slice_pitch_mask 0x0FFFFFFF +#define SDMA_PKT_COPY_TILED_SUBWIN_DW_11_linear_slice_pitch_shift 0 +#define SDMA_PKT_COPY_TILED_SUBWIN_DW_11_LINEAR_SLICE_PITCH(x) (((x) & SDMA_PKT_COPY_TILED_SUBWIN_DW_11_linear_slice_pitch_mask) << SDMA_PKT_COPY_TILED_SUBWIN_DW_11_linear_slice_pitch_shift) + +/*define for DW_12 word*/ +/*define for rect_x field*/ +#define SDMA_PKT_COPY_TILED_SUBWIN_DW_12_rect_x_offset 12 +#define SDMA_PKT_COPY_TILED_SUBWIN_DW_12_rect_x_mask 0x00003FFF +#define SDMA_PKT_COPY_TILED_SUBWIN_DW_12_rect_x_shift 0 +#define SDMA_PKT_COPY_TILED_SUBWIN_DW_12_RECT_X(x) (((x) & SDMA_PKT_COPY_TILED_SUBWIN_DW_12_rect_x_mask) << SDMA_PKT_COPY_TILED_SUBWIN_DW_12_rect_x_shift) + +/*define for rect_y field*/ +#define SDMA_PKT_COPY_TILED_SUBWIN_DW_12_rect_y_offset 12 +#define SDMA_PKT_COPY_TILED_SUBWIN_DW_12_rect_y_mask 0x00003FFF +#define SDMA_PKT_COPY_TILED_SUBWIN_DW_12_rect_y_shift 16 +#define SDMA_PKT_COPY_TILED_SUBWIN_DW_12_RECT_Y(x) (((x) & SDMA_PKT_COPY_TILED_SUBWIN_DW_12_rect_y_mask) << SDMA_PKT_COPY_TILED_SUBWIN_DW_12_rect_y_shift) + +/*define for DW_13 word*/ +/*define for rect_z field*/ +#define SDMA_PKT_COPY_TILED_SUBWIN_DW_13_rect_z_offset 13 +#define SDMA_PKT_COPY_TILED_SUBWIN_DW_13_rect_z_mask 0x000007FF +#define SDMA_PKT_COPY_TILED_SUBWIN_DW_13_rect_z_shift 0 +#define SDMA_PKT_COPY_TILED_SUBWIN_DW_13_RECT_Z(x) (((x) & SDMA_PKT_COPY_TILED_SUBWIN_DW_13_rect_z_mask) << SDMA_PKT_COPY_TILED_SUBWIN_DW_13_rect_z_shift) + +/*define for linear_sw field*/ +#define SDMA_PKT_COPY_TILED_SUBWIN_DW_13_linear_sw_offset 13 +#define SDMA_PKT_COPY_TILED_SUBWIN_DW_13_linear_sw_mask 0x00000003 +#define SDMA_PKT_COPY_TILED_SUBWIN_DW_13_linear_sw_shift 16 +#define SDMA_PKT_COPY_TILED_SUBWIN_DW_13_LINEAR_SW(x) (((x) & SDMA_PKT_COPY_TILED_SUBWIN_DW_13_linear_sw_mask) << SDMA_PKT_COPY_TILED_SUBWIN_DW_13_linear_sw_shift) + +/*define for tile_sw field*/ +#define SDMA_PKT_COPY_TILED_SUBWIN_DW_13_tile_sw_offset 13 +#define SDMA_PKT_COPY_TILED_SUBWIN_DW_13_tile_sw_mask 0x00000003 +#define SDMA_PKT_COPY_TILED_SUBWIN_DW_13_tile_sw_shift 24 +#define SDMA_PKT_COPY_TILED_SUBWIN_DW_13_TILE_SW(x) (((x) & SDMA_PKT_COPY_TILED_SUBWIN_DW_13_tile_sw_mask) << SDMA_PKT_COPY_TILED_SUBWIN_DW_13_tile_sw_shift) + + +/* +** Definitions for SDMA_PKT_COPY_STRUCT packet +*/ + +/*define for HEADER word*/ +/*define for op field*/ +#define SDMA_PKT_COPY_STRUCT_HEADER_op_offset 0 +#define SDMA_PKT_COPY_STRUCT_HEADER_op_mask 0x000000FF +#define SDMA_PKT_COPY_STRUCT_HEADER_op_shift 0 +#define SDMA_PKT_COPY_STRUCT_HEADER_OP(x) (((x) & SDMA_PKT_COPY_STRUCT_HEADER_op_mask) << SDMA_PKT_COPY_STRUCT_HEADER_op_shift) + +/*define for sub_op field*/ +#define SDMA_PKT_COPY_STRUCT_HEADER_sub_op_offset 0 +#define SDMA_PKT_COPY_STRUCT_HEADER_sub_op_mask 0x000000FF +#define SDMA_PKT_COPY_STRUCT_HEADER_sub_op_shift 8 +#define SDMA_PKT_COPY_STRUCT_HEADER_SUB_OP(x) (((x) & SDMA_PKT_COPY_STRUCT_HEADER_sub_op_mask) << SDMA_PKT_COPY_STRUCT_HEADER_sub_op_shift) + +/*define for detile field*/ +#define SDMA_PKT_COPY_STRUCT_HEADER_detile_offset 0 +#define SDMA_PKT_COPY_STRUCT_HEADER_detile_mask 0x00000001 +#define SDMA_PKT_COPY_STRUCT_HEADER_detile_shift 31 +#define SDMA_PKT_COPY_STRUCT_HEADER_DETILE(x) (((x) & SDMA_PKT_COPY_STRUCT_HEADER_detile_mask) << SDMA_PKT_COPY_STRUCT_HEADER_detile_shift) + +/*define for SB_ADDR_LO word*/ +/*define for sb_addr_31_0 field*/ +#define SDMA_PKT_COPY_STRUCT_SB_ADDR_LO_sb_addr_31_0_offset 1 +#define SDMA_PKT_COPY_STRUCT_SB_ADDR_LO_sb_addr_31_0_mask 0xFFFFFFFF +#define SDMA_PKT_COPY_STRUCT_SB_ADDR_LO_sb_addr_31_0_shift 0 +#define SDMA_PKT_COPY_STRUCT_SB_ADDR_LO_SB_ADDR_31_0(x) (((x) & SDMA_PKT_COPY_STRUCT_SB_ADDR_LO_sb_addr_31_0_mask) << SDMA_PKT_COPY_STRUCT_SB_ADDR_LO_sb_addr_31_0_shift) + +/*define for SB_ADDR_HI word*/ +/*define for sb_addr_63_32 field*/ +#define SDMA_PKT_COPY_STRUCT_SB_ADDR_HI_sb_addr_63_32_offset 2 +#define SDMA_PKT_COPY_STRUCT_SB_ADDR_HI_sb_addr_63_32_mask 0xFFFFFFFF +#define SDMA_PKT_COPY_STRUCT_SB_ADDR_HI_sb_addr_63_32_shift 0 +#define SDMA_PKT_COPY_STRUCT_SB_ADDR_HI_SB_ADDR_63_32(x) (((x) & SDMA_PKT_COPY_STRUCT_SB_ADDR_HI_sb_addr_63_32_mask) << SDMA_PKT_COPY_STRUCT_SB_ADDR_HI_sb_addr_63_32_shift) + +/*define for START_INDEX word*/ +/*define for start_index field*/ +#define SDMA_PKT_COPY_STRUCT_START_INDEX_start_index_offset 3 +#define SDMA_PKT_COPY_STRUCT_START_INDEX_start_index_mask 0xFFFFFFFF +#define SDMA_PKT_COPY_STRUCT_START_INDEX_start_index_shift 0 +#define SDMA_PKT_COPY_STRUCT_START_INDEX_START_INDEX(x) (((x) & SDMA_PKT_COPY_STRUCT_START_INDEX_start_index_mask) << SDMA_PKT_COPY_STRUCT_START_INDEX_start_index_shift) + +/*define for COUNT word*/ +/*define for count field*/ +#define SDMA_PKT_COPY_STRUCT_COUNT_count_offset 4 +#define SDMA_PKT_COPY_STRUCT_COUNT_count_mask 0xFFFFFFFF +#define SDMA_PKT_COPY_STRUCT_COUNT_count_shift 0 +#define SDMA_PKT_COPY_STRUCT_COUNT_COUNT(x) (((x) & SDMA_PKT_COPY_STRUCT_COUNT_count_mask) << SDMA_PKT_COPY_STRUCT_COUNT_count_shift) + +/*define for DW_5 word*/ +/*define for stride field*/ +#define SDMA_PKT_COPY_STRUCT_DW_5_stride_offset 5 +#define SDMA_PKT_COPY_STRUCT_DW_5_stride_mask 0x000007FF +#define SDMA_PKT_COPY_STRUCT_DW_5_stride_shift 0 +#define SDMA_PKT_COPY_STRUCT_DW_5_STRIDE(x) (((x) & SDMA_PKT_COPY_STRUCT_DW_5_stride_mask) << SDMA_PKT_COPY_STRUCT_DW_5_stride_shift) + +/*define for struct_sw field*/ +#define SDMA_PKT_COPY_STRUCT_DW_5_struct_sw_offset 5 +#define SDMA_PKT_COPY_STRUCT_DW_5_struct_sw_mask 0x00000003 +#define SDMA_PKT_COPY_STRUCT_DW_5_struct_sw_shift 16 +#define SDMA_PKT_COPY_STRUCT_DW_5_STRUCT_SW(x) (((x) & SDMA_PKT_COPY_STRUCT_DW_5_struct_sw_mask) << SDMA_PKT_COPY_STRUCT_DW_5_struct_sw_shift) + +/*define for struct_ha field*/ +#define SDMA_PKT_COPY_STRUCT_DW_5_struct_ha_offset 5 +#define SDMA_PKT_COPY_STRUCT_DW_5_struct_ha_mask 0x00000001 +#define SDMA_PKT_COPY_STRUCT_DW_5_struct_ha_shift 22 +#define SDMA_PKT_COPY_STRUCT_DW_5_STRUCT_HA(x) (((x) & SDMA_PKT_COPY_STRUCT_DW_5_struct_ha_mask) << SDMA_PKT_COPY_STRUCT_DW_5_struct_ha_shift) + +/*define for linear_sw field*/ +#define SDMA_PKT_COPY_STRUCT_DW_5_linear_sw_offset 5 +#define SDMA_PKT_COPY_STRUCT_DW_5_linear_sw_mask 0x00000003 +#define SDMA_PKT_COPY_STRUCT_DW_5_linear_sw_shift 24 +#define SDMA_PKT_COPY_STRUCT_DW_5_LINEAR_SW(x) (((x) & SDMA_PKT_COPY_STRUCT_DW_5_linear_sw_mask) << SDMA_PKT_COPY_STRUCT_DW_5_linear_sw_shift) + +/*define for linear_ha field*/ +#define SDMA_PKT_COPY_STRUCT_DW_5_linear_ha_offset 5 +#define SDMA_PKT_COPY_STRUCT_DW_5_linear_ha_mask 0x00000001 +#define SDMA_PKT_COPY_STRUCT_DW_5_linear_ha_shift 30 +#define SDMA_PKT_COPY_STRUCT_DW_5_LINEAR_HA(x) (((x) & SDMA_PKT_COPY_STRUCT_DW_5_linear_ha_mask) << SDMA_PKT_COPY_STRUCT_DW_5_linear_ha_shift) + +/*define for LINEAR_ADDR_LO word*/ +/*define for linear_addr_31_0 field*/ +#define SDMA_PKT_COPY_STRUCT_LINEAR_ADDR_LO_linear_addr_31_0_offset 6 +#define SDMA_PKT_COPY_STRUCT_LINEAR_ADDR_LO_linear_addr_31_0_mask 0xFFFFFFFF +#define SDMA_PKT_COPY_STRUCT_LINEAR_ADDR_LO_linear_addr_31_0_shift 0 +#define SDMA_PKT_COPY_STRUCT_LINEAR_ADDR_LO_LINEAR_ADDR_31_0(x) (((x) & SDMA_PKT_COPY_STRUCT_LINEAR_ADDR_LO_linear_addr_31_0_mask) << SDMA_PKT_COPY_STRUCT_LINEAR_ADDR_LO_linear_addr_31_0_shift) + +/*define for LINEAR_ADDR_HI word*/ +/*define for linear_addr_63_32 field*/ +#define SDMA_PKT_COPY_STRUCT_LINEAR_ADDR_HI_linear_addr_63_32_offset 7 +#define SDMA_PKT_COPY_STRUCT_LINEAR_ADDR_HI_linear_addr_63_32_mask 0xFFFFFFFF +#define SDMA_PKT_COPY_STRUCT_LINEAR_ADDR_HI_linear_addr_63_32_shift 0 +#define SDMA_PKT_COPY_STRUCT_LINEAR_ADDR_HI_LINEAR_ADDR_63_32(x) (((x) & SDMA_PKT_COPY_STRUCT_LINEAR_ADDR_HI_linear_addr_63_32_mask) << SDMA_PKT_COPY_STRUCT_LINEAR_ADDR_HI_linear_addr_63_32_shift) + + +/* +** Definitions for SDMA_PKT_WRITE_UNTILED packet +*/ + +/*define for HEADER word*/ +/*define for op field*/ +#define SDMA_PKT_WRITE_UNTILED_HEADER_op_offset 0 +#define SDMA_PKT_WRITE_UNTILED_HEADER_op_mask 0x000000FF +#define SDMA_PKT_WRITE_UNTILED_HEADER_op_shift 0 +#define SDMA_PKT_WRITE_UNTILED_HEADER_OP(x) (((x) & SDMA_PKT_WRITE_UNTILED_HEADER_op_mask) << SDMA_PKT_WRITE_UNTILED_HEADER_op_shift) + +/*define for sub_op field*/ +#define SDMA_PKT_WRITE_UNTILED_HEADER_sub_op_offset 0 +#define SDMA_PKT_WRITE_UNTILED_HEADER_sub_op_mask 0x000000FF +#define SDMA_PKT_WRITE_UNTILED_HEADER_sub_op_shift 8 +#define SDMA_PKT_WRITE_UNTILED_HEADER_SUB_OP(x) (((x) & SDMA_PKT_WRITE_UNTILED_HEADER_sub_op_mask) << SDMA_PKT_WRITE_UNTILED_HEADER_sub_op_shift) + +/*define for DST_ADDR_LO word*/ +/*define for dst_addr_31_0 field*/ +#define SDMA_PKT_WRITE_UNTILED_DST_ADDR_LO_dst_addr_31_0_offset 1 +#define SDMA_PKT_WRITE_UNTILED_DST_ADDR_LO_dst_addr_31_0_mask 0xFFFFFFFF +#define SDMA_PKT_WRITE_UNTILED_DST_ADDR_LO_dst_addr_31_0_shift 0 +#define SDMA_PKT_WRITE_UNTILED_DST_ADDR_LO_DST_ADDR_31_0(x) (((x) & SDMA_PKT_WRITE_UNTILED_DST_ADDR_LO_dst_addr_31_0_mask) << SDMA_PKT_WRITE_UNTILED_DST_ADDR_LO_dst_addr_31_0_shift) + +/*define for DST_ADDR_HI word*/ +/*define for dst_addr_63_32 field*/ +#define SDMA_PKT_WRITE_UNTILED_DST_ADDR_HI_dst_addr_63_32_offset 2 +#define SDMA_PKT_WRITE_UNTILED_DST_ADDR_HI_dst_addr_63_32_mask 0xFFFFFFFF +#define SDMA_PKT_WRITE_UNTILED_DST_ADDR_HI_dst_addr_63_32_shift 0 +#define SDMA_PKT_WRITE_UNTILED_DST_ADDR_HI_DST_ADDR_63_32(x) (((x) & SDMA_PKT_WRITE_UNTILED_DST_ADDR_HI_dst_addr_63_32_mask) << SDMA_PKT_WRITE_UNTILED_DST_ADDR_HI_dst_addr_63_32_shift) + +/*define for DW_3 word*/ +/*define for count field*/ +#define SDMA_PKT_WRITE_UNTILED_DW_3_count_offset 3 +#define SDMA_PKT_WRITE_UNTILED_DW_3_count_mask 0x003FFFFF +#define SDMA_PKT_WRITE_UNTILED_DW_3_count_shift 0 +#define SDMA_PKT_WRITE_UNTILED_DW_3_COUNT(x) (((x) & SDMA_PKT_WRITE_UNTILED_DW_3_count_mask) << SDMA_PKT_WRITE_UNTILED_DW_3_count_shift) + +/*define for sw field*/ +#define SDMA_PKT_WRITE_UNTILED_DW_3_sw_offset 3 +#define SDMA_PKT_WRITE_UNTILED_DW_3_sw_mask 0x00000003 +#define SDMA_PKT_WRITE_UNTILED_DW_3_sw_shift 24 +#define SDMA_PKT_WRITE_UNTILED_DW_3_SW(x) (((x) & SDMA_PKT_WRITE_UNTILED_DW_3_sw_mask) << SDMA_PKT_WRITE_UNTILED_DW_3_sw_shift) + +/*define for DATA0 word*/ +/*define for data0 field*/ +#define SDMA_PKT_WRITE_UNTILED_DATA0_data0_offset 4 +#define SDMA_PKT_WRITE_UNTILED_DATA0_data0_mask 0xFFFFFFFF +#define SDMA_PKT_WRITE_UNTILED_DATA0_data0_shift 0 +#define SDMA_PKT_WRITE_UNTILED_DATA0_DATA0(x) (((x) & SDMA_PKT_WRITE_UNTILED_DATA0_data0_mask) << SDMA_PKT_WRITE_UNTILED_DATA0_data0_shift) + + +/* +** Definitions for SDMA_PKT_WRITE_TILED packet +*/ + +/*define for HEADER word*/ +/*define for op field*/ +#define SDMA_PKT_WRITE_TILED_HEADER_op_offset 0 +#define SDMA_PKT_WRITE_TILED_HEADER_op_mask 0x000000FF +#define SDMA_PKT_WRITE_TILED_HEADER_op_shift 0 +#define SDMA_PKT_WRITE_TILED_HEADER_OP(x) (((x) & SDMA_PKT_WRITE_TILED_HEADER_op_mask) << SDMA_PKT_WRITE_TILED_HEADER_op_shift) + +/*define for sub_op field*/ +#define SDMA_PKT_WRITE_TILED_HEADER_sub_op_offset 0 +#define SDMA_PKT_WRITE_TILED_HEADER_sub_op_mask 0x000000FF +#define SDMA_PKT_WRITE_TILED_HEADER_sub_op_shift 8 +#define SDMA_PKT_WRITE_TILED_HEADER_SUB_OP(x) (((x) & SDMA_PKT_WRITE_TILED_HEADER_sub_op_mask) << SDMA_PKT_WRITE_TILED_HEADER_sub_op_shift) + +/*define for DST_ADDR_LO word*/ +/*define for dst_addr_31_0 field*/ +#define SDMA_PKT_WRITE_TILED_DST_ADDR_LO_dst_addr_31_0_offset 1 +#define SDMA_PKT_WRITE_TILED_DST_ADDR_LO_dst_addr_31_0_mask 0xFFFFFFFF +#define SDMA_PKT_WRITE_TILED_DST_ADDR_LO_dst_addr_31_0_shift 0 +#define SDMA_PKT_WRITE_TILED_DST_ADDR_LO_DST_ADDR_31_0(x) (((x) & SDMA_PKT_WRITE_TILED_DST_ADDR_LO_dst_addr_31_0_mask) << SDMA_PKT_WRITE_TILED_DST_ADDR_LO_dst_addr_31_0_shift) + +/*define for DST_ADDR_HI word*/ +/*define for dst_addr_63_32 field*/ +#define SDMA_PKT_WRITE_TILED_DST_ADDR_HI_dst_addr_63_32_offset 2 +#define SDMA_PKT_WRITE_TILED_DST_ADDR_HI_dst_addr_63_32_mask 0xFFFFFFFF +#define SDMA_PKT_WRITE_TILED_DST_ADDR_HI_dst_addr_63_32_shift 0 +#define SDMA_PKT_WRITE_TILED_DST_ADDR_HI_DST_ADDR_63_32(x) (((x) & SDMA_PKT_WRITE_TILED_DST_ADDR_HI_dst_addr_63_32_mask) << SDMA_PKT_WRITE_TILED_DST_ADDR_HI_dst_addr_63_32_shift) + +/*define for DW_3 word*/ +/*define for pitch_in_tile field*/ +#define SDMA_PKT_WRITE_TILED_DW_3_pitch_in_tile_offset 3 +#define SDMA_PKT_WRITE_TILED_DW_3_pitch_in_tile_mask 0x000007FF +#define SDMA_PKT_WRITE_TILED_DW_3_pitch_in_tile_shift 0 +#define SDMA_PKT_WRITE_TILED_DW_3_PITCH_IN_TILE(x) (((x) & SDMA_PKT_WRITE_TILED_DW_3_pitch_in_tile_mask) << SDMA_PKT_WRITE_TILED_DW_3_pitch_in_tile_shift) + +/*define for height field*/ +#define SDMA_PKT_WRITE_TILED_DW_3_height_offset 3 +#define SDMA_PKT_WRITE_TILED_DW_3_height_mask 0x00003FFF +#define SDMA_PKT_WRITE_TILED_DW_3_height_shift 16 +#define SDMA_PKT_WRITE_TILED_DW_3_HEIGHT(x) (((x) & SDMA_PKT_WRITE_TILED_DW_3_height_mask) << SDMA_PKT_WRITE_TILED_DW_3_height_shift) + +/*define for DW_4 word*/ +/*define for slice_pitch field*/ +#define SDMA_PKT_WRITE_TILED_DW_4_slice_pitch_offset 4 +#define SDMA_PKT_WRITE_TILED_DW_4_slice_pitch_mask 0x003FFFFF +#define SDMA_PKT_WRITE_TILED_DW_4_slice_pitch_shift 0 +#define SDMA_PKT_WRITE_TILED_DW_4_SLICE_PITCH(x) (((x) & SDMA_PKT_WRITE_TILED_DW_4_slice_pitch_mask) << SDMA_PKT_WRITE_TILED_DW_4_slice_pitch_shift) + +/*define for DW_5 word*/ +/*define for element_size field*/ +#define SDMA_PKT_WRITE_TILED_DW_5_element_size_offset 5 +#define SDMA_PKT_WRITE_TILED_DW_5_element_size_mask 0x00000007 +#define SDMA_PKT_WRITE_TILED_DW_5_element_size_shift 0 +#define SDMA_PKT_WRITE_TILED_DW_5_ELEMENT_SIZE(x) (((x) & SDMA_PKT_WRITE_TILED_DW_5_element_size_mask) << SDMA_PKT_WRITE_TILED_DW_5_element_size_shift) + +/*define for array_mode field*/ +#define SDMA_PKT_WRITE_TILED_DW_5_array_mode_offset 5 +#define SDMA_PKT_WRITE_TILED_DW_5_array_mode_mask 0x0000000F +#define SDMA_PKT_WRITE_TILED_DW_5_array_mode_shift 3 +#define SDMA_PKT_WRITE_TILED_DW_5_ARRAY_MODE(x) (((x) & SDMA_PKT_WRITE_TILED_DW_5_array_mode_mask) << SDMA_PKT_WRITE_TILED_DW_5_array_mode_shift) + +/*define for mit_mode field*/ +#define SDMA_PKT_WRITE_TILED_DW_5_mit_mode_offset 5 +#define SDMA_PKT_WRITE_TILED_DW_5_mit_mode_mask 0x00000007 +#define SDMA_PKT_WRITE_TILED_DW_5_mit_mode_shift 8 +#define SDMA_PKT_WRITE_TILED_DW_5_MIT_MODE(x) (((x) & SDMA_PKT_WRITE_TILED_DW_5_mit_mode_mask) << SDMA_PKT_WRITE_TILED_DW_5_mit_mode_shift) + +/*define for tilesplit_size field*/ +#define SDMA_PKT_WRITE_TILED_DW_5_tilesplit_size_offset 5 +#define SDMA_PKT_WRITE_TILED_DW_5_tilesplit_size_mask 0x00000007 +#define SDMA_PKT_WRITE_TILED_DW_5_tilesplit_size_shift 11 +#define SDMA_PKT_WRITE_TILED_DW_5_TILESPLIT_SIZE(x) (((x) & SDMA_PKT_WRITE_TILED_DW_5_tilesplit_size_mask) << SDMA_PKT_WRITE_TILED_DW_5_tilesplit_size_shift) + +/*define for bank_w field*/ +#define SDMA_PKT_WRITE_TILED_DW_5_bank_w_offset 5 +#define SDMA_PKT_WRITE_TILED_DW_5_bank_w_mask 0x00000003 +#define SDMA_PKT_WRITE_TILED_DW_5_bank_w_shift 15 +#define SDMA_PKT_WRITE_TILED_DW_5_BANK_W(x) (((x) & SDMA_PKT_WRITE_TILED_DW_5_bank_w_mask) << SDMA_PKT_WRITE_TILED_DW_5_bank_w_shift) + +/*define for bank_h field*/ +#define SDMA_PKT_WRITE_TILED_DW_5_bank_h_offset 5 +#define SDMA_PKT_WRITE_TILED_DW_5_bank_h_mask 0x00000003 +#define SDMA_PKT_WRITE_TILED_DW_5_bank_h_shift 18 +#define SDMA_PKT_WRITE_TILED_DW_5_BANK_H(x) (((x) & SDMA_PKT_WRITE_TILED_DW_5_bank_h_mask) << SDMA_PKT_WRITE_TILED_DW_5_bank_h_shift) + +/*define for num_bank field*/ +#define SDMA_PKT_WRITE_TILED_DW_5_num_bank_offset 5 +#define SDMA_PKT_WRITE_TILED_DW_5_num_bank_mask 0x00000003 +#define SDMA_PKT_WRITE_TILED_DW_5_num_bank_shift 21 +#define SDMA_PKT_WRITE_TILED_DW_5_NUM_BANK(x) (((x) & SDMA_PKT_WRITE_TILED_DW_5_num_bank_mask) << SDMA_PKT_WRITE_TILED_DW_5_num_bank_shift) + +/*define for mat_aspt field*/ +#define SDMA_PKT_WRITE_TILED_DW_5_mat_aspt_offset 5 +#define SDMA_PKT_WRITE_TILED_DW_5_mat_aspt_mask 0x00000003 +#define SDMA_PKT_WRITE_TILED_DW_5_mat_aspt_shift 24 +#define SDMA_PKT_WRITE_TILED_DW_5_MAT_ASPT(x) (((x) & SDMA_PKT_WRITE_TILED_DW_5_mat_aspt_mask) << SDMA_PKT_WRITE_TILED_DW_5_mat_aspt_shift) + +/*define for pipe_config field*/ +#define SDMA_PKT_WRITE_TILED_DW_5_pipe_config_offset 5 +#define SDMA_PKT_WRITE_TILED_DW_5_pipe_config_mask 0x0000001F +#define SDMA_PKT_WRITE_TILED_DW_5_pipe_config_shift 26 +#define SDMA_PKT_WRITE_TILED_DW_5_PIPE_CONFIG(x) (((x) & SDMA_PKT_WRITE_TILED_DW_5_pipe_config_mask) << SDMA_PKT_WRITE_TILED_DW_5_pipe_config_shift) + +/*define for DW_6 word*/ +/*define for x field*/ +#define SDMA_PKT_WRITE_TILED_DW_6_x_offset 6 +#define SDMA_PKT_WRITE_TILED_DW_6_x_mask 0x00003FFF +#define SDMA_PKT_WRITE_TILED_DW_6_x_shift 0 +#define SDMA_PKT_WRITE_TILED_DW_6_X(x) (((x) & SDMA_PKT_WRITE_TILED_DW_6_x_mask) << SDMA_PKT_WRITE_TILED_DW_6_x_shift) + +/*define for y field*/ +#define SDMA_PKT_WRITE_TILED_DW_6_y_offset 6 +#define SDMA_PKT_WRITE_TILED_DW_6_y_mask 0x00003FFF +#define SDMA_PKT_WRITE_TILED_DW_6_y_shift 16 +#define SDMA_PKT_WRITE_TILED_DW_6_Y(x) (((x) & SDMA_PKT_WRITE_TILED_DW_6_y_mask) << SDMA_PKT_WRITE_TILED_DW_6_y_shift) + +/*define for DW_7 word*/ +/*define for z field*/ +#define SDMA_PKT_WRITE_TILED_DW_7_z_offset 7 +#define SDMA_PKT_WRITE_TILED_DW_7_z_mask 0x00000FFF +#define SDMA_PKT_WRITE_TILED_DW_7_z_shift 0 +#define SDMA_PKT_WRITE_TILED_DW_7_Z(x) (((x) & SDMA_PKT_WRITE_TILED_DW_7_z_mask) << SDMA_PKT_WRITE_TILED_DW_7_z_shift) + +/*define for sw field*/ +#define SDMA_PKT_WRITE_TILED_DW_7_sw_offset 7 +#define SDMA_PKT_WRITE_TILED_DW_7_sw_mask 0x00000003 +#define SDMA_PKT_WRITE_TILED_DW_7_sw_shift 24 +#define SDMA_PKT_WRITE_TILED_DW_7_SW(x) (((x) & SDMA_PKT_WRITE_TILED_DW_7_sw_mask) << SDMA_PKT_WRITE_TILED_DW_7_sw_shift) + +/*define for COUNT word*/ +/*define for count field*/ +#define SDMA_PKT_WRITE_TILED_COUNT_count_offset 8 +#define SDMA_PKT_WRITE_TILED_COUNT_count_mask 0x003FFFFF +#define SDMA_PKT_WRITE_TILED_COUNT_count_shift 0 +#define SDMA_PKT_WRITE_TILED_COUNT_COUNT(x) (((x) & SDMA_PKT_WRITE_TILED_COUNT_count_mask) << SDMA_PKT_WRITE_TILED_COUNT_count_shift) + +/*define for DATA0 word*/ +/*define for data0 field*/ +#define SDMA_PKT_WRITE_TILED_DATA0_data0_offset 9 +#define SDMA_PKT_WRITE_TILED_DATA0_data0_mask 0xFFFFFFFF +#define SDMA_PKT_WRITE_TILED_DATA0_data0_shift 0 +#define SDMA_PKT_WRITE_TILED_DATA0_DATA0(x) (((x) & SDMA_PKT_WRITE_TILED_DATA0_data0_mask) << SDMA_PKT_WRITE_TILED_DATA0_data0_shift) + + +/* +** Definitions for SDMA_PKT_WRITE_INCR packet +*/ + +/*define for HEADER word*/ +/*define for op field*/ +#define SDMA_PKT_WRITE_INCR_HEADER_op_offset 0 +#define SDMA_PKT_WRITE_INCR_HEADER_op_mask 0x000000FF +#define SDMA_PKT_WRITE_INCR_HEADER_op_shift 0 +#define SDMA_PKT_WRITE_INCR_HEADER_OP(x) (((x) & SDMA_PKT_WRITE_INCR_HEADER_op_mask) << SDMA_PKT_WRITE_INCR_HEADER_op_shift) + +/*define for sub_op field*/ +#define SDMA_PKT_WRITE_INCR_HEADER_sub_op_offset 0 +#define SDMA_PKT_WRITE_INCR_HEADER_sub_op_mask 0x000000FF +#define SDMA_PKT_WRITE_INCR_HEADER_sub_op_shift 8 +#define SDMA_PKT_WRITE_INCR_HEADER_SUB_OP(x) (((x) & SDMA_PKT_WRITE_INCR_HEADER_sub_op_mask) << SDMA_PKT_WRITE_INCR_HEADER_sub_op_shift) + +/*define for DST_ADDR_LO word*/ +/*define for dst_addr_31_0 field*/ +#define SDMA_PKT_WRITE_INCR_DST_ADDR_LO_dst_addr_31_0_offset 1 +#define SDMA_PKT_WRITE_INCR_DST_ADDR_LO_dst_addr_31_0_mask 0xFFFFFFFF +#define SDMA_PKT_WRITE_INCR_DST_ADDR_LO_dst_addr_31_0_shift 0 +#define SDMA_PKT_WRITE_INCR_DST_ADDR_LO_DST_ADDR_31_0(x) (((x) & SDMA_PKT_WRITE_INCR_DST_ADDR_LO_dst_addr_31_0_mask) << SDMA_PKT_WRITE_INCR_DST_ADDR_LO_dst_addr_31_0_shift) + +/*define for DST_ADDR_HI word*/ +/*define for dst_addr_63_32 field*/ +#define SDMA_PKT_WRITE_INCR_DST_ADDR_HI_dst_addr_63_32_offset 2 +#define SDMA_PKT_WRITE_INCR_DST_ADDR_HI_dst_addr_63_32_mask 0xFFFFFFFF +#define SDMA_PKT_WRITE_INCR_DST_ADDR_HI_dst_addr_63_32_shift 0 +#define SDMA_PKT_WRITE_INCR_DST_ADDR_HI_DST_ADDR_63_32(x) (((x) & SDMA_PKT_WRITE_INCR_DST_ADDR_HI_dst_addr_63_32_mask) << SDMA_PKT_WRITE_INCR_DST_ADDR_HI_dst_addr_63_32_shift) + +/*define for MASK_DW0 word*/ +/*define for mask_dw0 field*/ +#define SDMA_PKT_WRITE_INCR_MASK_DW0_mask_dw0_offset 3 +#define SDMA_PKT_WRITE_INCR_MASK_DW0_mask_dw0_mask 0xFFFFFFFF +#define SDMA_PKT_WRITE_INCR_MASK_DW0_mask_dw0_shift 0 +#define SDMA_PKT_WRITE_INCR_MASK_DW0_MASK_DW0(x) (((x) & SDMA_PKT_WRITE_INCR_MASK_DW0_mask_dw0_mask) << SDMA_PKT_WRITE_INCR_MASK_DW0_mask_dw0_shift) + +/*define for MASK_DW1 word*/ +/*define for mask_dw1 field*/ +#define SDMA_PKT_WRITE_INCR_MASK_DW1_mask_dw1_offset 4 +#define SDMA_PKT_WRITE_INCR_MASK_DW1_mask_dw1_mask 0xFFFFFFFF +#define SDMA_PKT_WRITE_INCR_MASK_DW1_mask_dw1_shift 0 +#define SDMA_PKT_WRITE_INCR_MASK_DW1_MASK_DW1(x) (((x) & SDMA_PKT_WRITE_INCR_MASK_DW1_mask_dw1_mask) << SDMA_PKT_WRITE_INCR_MASK_DW1_mask_dw1_shift) + +/*define for INIT_DW0 word*/ +/*define for init_dw0 field*/ +#define SDMA_PKT_WRITE_INCR_INIT_DW0_init_dw0_offset 5 +#define SDMA_PKT_WRITE_INCR_INIT_DW0_init_dw0_mask 0xFFFFFFFF +#define SDMA_PKT_WRITE_INCR_INIT_DW0_init_dw0_shift 0 +#define SDMA_PKT_WRITE_INCR_INIT_DW0_INIT_DW0(x) (((x) & SDMA_PKT_WRITE_INCR_INIT_DW0_init_dw0_mask) << SDMA_PKT_WRITE_INCR_INIT_DW0_init_dw0_shift) + +/*define for INIT_DW1 word*/ +/*define for init_dw1 field*/ +#define SDMA_PKT_WRITE_INCR_INIT_DW1_init_dw1_offset 6 +#define SDMA_PKT_WRITE_INCR_INIT_DW1_init_dw1_mask 0xFFFFFFFF +#define SDMA_PKT_WRITE_INCR_INIT_DW1_init_dw1_shift 0 +#define SDMA_PKT_WRITE_INCR_INIT_DW1_INIT_DW1(x) (((x) & SDMA_PKT_WRITE_INCR_INIT_DW1_init_dw1_mask) << SDMA_PKT_WRITE_INCR_INIT_DW1_init_dw1_shift) + +/*define for INCR_DW0 word*/ +/*define for incr_dw0 field*/ +#define SDMA_PKT_WRITE_INCR_INCR_DW0_incr_dw0_offset 7 +#define SDMA_PKT_WRITE_INCR_INCR_DW0_incr_dw0_mask 0xFFFFFFFF +#define SDMA_PKT_WRITE_INCR_INCR_DW0_incr_dw0_shift 0 +#define SDMA_PKT_WRITE_INCR_INCR_DW0_INCR_DW0(x) (((x) & SDMA_PKT_WRITE_INCR_INCR_DW0_incr_dw0_mask) << SDMA_PKT_WRITE_INCR_INCR_DW0_incr_dw0_shift) + +/*define for INCR_DW1 word*/ +/*define for incr_dw1 field*/ +#define SDMA_PKT_WRITE_INCR_INCR_DW1_incr_dw1_offset 8 +#define SDMA_PKT_WRITE_INCR_INCR_DW1_incr_dw1_mask 0xFFFFFFFF +#define SDMA_PKT_WRITE_INCR_INCR_DW1_incr_dw1_shift 0 +#define SDMA_PKT_WRITE_INCR_INCR_DW1_INCR_DW1(x) (((x) & SDMA_PKT_WRITE_INCR_INCR_DW1_incr_dw1_mask) << SDMA_PKT_WRITE_INCR_INCR_DW1_incr_dw1_shift) + +/*define for COUNT word*/ +/*define for count field*/ +#define SDMA_PKT_WRITE_INCR_COUNT_count_offset 9 +#define SDMA_PKT_WRITE_INCR_COUNT_count_mask 0x0007FFFF +#define SDMA_PKT_WRITE_INCR_COUNT_count_shift 0 +#define SDMA_PKT_WRITE_INCR_COUNT_COUNT(x) (((x) & SDMA_PKT_WRITE_INCR_COUNT_count_mask) << SDMA_PKT_WRITE_INCR_COUNT_count_shift) + + +/* +** Definitions for SDMA_PKT_INDIRECT packet +*/ + +/*define for HEADER word*/ +/*define for op field*/ +#define SDMA_PKT_INDIRECT_HEADER_op_offset 0 +#define SDMA_PKT_INDIRECT_HEADER_op_mask 0x000000FF +#define SDMA_PKT_INDIRECT_HEADER_op_shift 0 +#define SDMA_PKT_INDIRECT_HEADER_OP(x) (((x) & SDMA_PKT_INDIRECT_HEADER_op_mask) << SDMA_PKT_INDIRECT_HEADER_op_shift) + +/*define for sub_op field*/ +#define SDMA_PKT_INDIRECT_HEADER_sub_op_offset 0 +#define SDMA_PKT_INDIRECT_HEADER_sub_op_mask 0x000000FF +#define SDMA_PKT_INDIRECT_HEADER_sub_op_shift 8 +#define SDMA_PKT_INDIRECT_HEADER_SUB_OP(x) (((x) & SDMA_PKT_INDIRECT_HEADER_sub_op_mask) << SDMA_PKT_INDIRECT_HEADER_sub_op_shift) + +/*define for vmid field*/ +#define SDMA_PKT_INDIRECT_HEADER_vmid_offset 0 +#define SDMA_PKT_INDIRECT_HEADER_vmid_mask 0x0000000F +#define SDMA_PKT_INDIRECT_HEADER_vmid_shift 16 +#define SDMA_PKT_INDIRECT_HEADER_VMID(x) (((x) & SDMA_PKT_INDIRECT_HEADER_vmid_mask) << SDMA_PKT_INDIRECT_HEADER_vmid_shift) + +/*define for BASE_LO word*/ +/*define for ib_base_31_0 field*/ +#define SDMA_PKT_INDIRECT_BASE_LO_ib_base_31_0_offset 1 +#define SDMA_PKT_INDIRECT_BASE_LO_ib_base_31_0_mask 0xFFFFFFFF +#define SDMA_PKT_INDIRECT_BASE_LO_ib_base_31_0_shift 0 +#define SDMA_PKT_INDIRECT_BASE_LO_IB_BASE_31_0(x) (((x) & SDMA_PKT_INDIRECT_BASE_LO_ib_base_31_0_mask) << SDMA_PKT_INDIRECT_BASE_LO_ib_base_31_0_shift) + +/*define for BASE_HI word*/ +/*define for ib_base_63_32 field*/ +#define SDMA_PKT_INDIRECT_BASE_HI_ib_base_63_32_offset 2 +#define SDMA_PKT_INDIRECT_BASE_HI_ib_base_63_32_mask 0xFFFFFFFF +#define SDMA_PKT_INDIRECT_BASE_HI_ib_base_63_32_shift 0 +#define SDMA_PKT_INDIRECT_BASE_HI_IB_BASE_63_32(x) (((x) & SDMA_PKT_INDIRECT_BASE_HI_ib_base_63_32_mask) << SDMA_PKT_INDIRECT_BASE_HI_ib_base_63_32_shift) + +/*define for IB_SIZE word*/ +/*define for ib_size field*/ +#define SDMA_PKT_INDIRECT_IB_SIZE_ib_size_offset 3 +#define SDMA_PKT_INDIRECT_IB_SIZE_ib_size_mask 0x000FFFFF +#define SDMA_PKT_INDIRECT_IB_SIZE_ib_size_shift 0 +#define SDMA_PKT_INDIRECT_IB_SIZE_IB_SIZE(x) (((x) & SDMA_PKT_INDIRECT_IB_SIZE_ib_size_mask) << SDMA_PKT_INDIRECT_IB_SIZE_ib_size_shift) + +/*define for CSA_ADDR_LO word*/ +/*define for csa_addr_31_0 field*/ +#define SDMA_PKT_INDIRECT_CSA_ADDR_LO_csa_addr_31_0_offset 4 +#define SDMA_PKT_INDIRECT_CSA_ADDR_LO_csa_addr_31_0_mask 0xFFFFFFFF +#define SDMA_PKT_INDIRECT_CSA_ADDR_LO_csa_addr_31_0_shift 0 +#define SDMA_PKT_INDIRECT_CSA_ADDR_LO_CSA_ADDR_31_0(x) (((x) & SDMA_PKT_INDIRECT_CSA_ADDR_LO_csa_addr_31_0_mask) << SDMA_PKT_INDIRECT_CSA_ADDR_LO_csa_addr_31_0_shift) + +/*define for CSA_ADDR_HI word*/ +/*define for csa_addr_63_32 field*/ +#define SDMA_PKT_INDIRECT_CSA_ADDR_HI_csa_addr_63_32_offset 5 +#define SDMA_PKT_INDIRECT_CSA_ADDR_HI_csa_addr_63_32_mask 0xFFFFFFFF +#define SDMA_PKT_INDIRECT_CSA_ADDR_HI_csa_addr_63_32_shift 0 +#define SDMA_PKT_INDIRECT_CSA_ADDR_HI_CSA_ADDR_63_32(x) (((x) & SDMA_PKT_INDIRECT_CSA_ADDR_HI_csa_addr_63_32_mask) << SDMA_PKT_INDIRECT_CSA_ADDR_HI_csa_addr_63_32_shift) + + +/* +** Definitions for SDMA_PKT_SEMAPHORE packet +*/ + +/*define for HEADER word*/ +/*define for op field*/ +#define SDMA_PKT_SEMAPHORE_HEADER_op_offset 0 +#define SDMA_PKT_SEMAPHORE_HEADER_op_mask 0x000000FF +#define SDMA_PKT_SEMAPHORE_HEADER_op_shift 0 +#define SDMA_PKT_SEMAPHORE_HEADER_OP(x) (((x) & SDMA_PKT_SEMAPHORE_HEADER_op_mask) << SDMA_PKT_SEMAPHORE_HEADER_op_shift) + +/*define for sub_op field*/ +#define SDMA_PKT_SEMAPHORE_HEADER_sub_op_offset 0 +#define SDMA_PKT_SEMAPHORE_HEADER_sub_op_mask 0x000000FF +#define SDMA_PKT_SEMAPHORE_HEADER_sub_op_shift 8 +#define SDMA_PKT_SEMAPHORE_HEADER_SUB_OP(x) (((x) & SDMA_PKT_SEMAPHORE_HEADER_sub_op_mask) << SDMA_PKT_SEMAPHORE_HEADER_sub_op_shift) + +/*define for write_one field*/ +#define SDMA_PKT_SEMAPHORE_HEADER_write_one_offset 0 +#define SDMA_PKT_SEMAPHORE_HEADER_write_one_mask 0x00000001 +#define SDMA_PKT_SEMAPHORE_HEADER_write_one_shift 29 +#define SDMA_PKT_SEMAPHORE_HEADER_WRITE_ONE(x) (((x) & SDMA_PKT_SEMAPHORE_HEADER_write_one_mask) << SDMA_PKT_SEMAPHORE_HEADER_write_one_shift) + +/*define for signal field*/ +#define SDMA_PKT_SEMAPHORE_HEADER_signal_offset 0 +#define SDMA_PKT_SEMAPHORE_HEADER_signal_mask 0x00000001 +#define SDMA_PKT_SEMAPHORE_HEADER_signal_shift 30 +#define SDMA_PKT_SEMAPHORE_HEADER_SIGNAL(x) (((x) & SDMA_PKT_SEMAPHORE_HEADER_signal_mask) << SDMA_PKT_SEMAPHORE_HEADER_signal_shift) + +/*define for mailbox field*/ +#define SDMA_PKT_SEMAPHORE_HEADER_mailbox_offset 0 +#define SDMA_PKT_SEMAPHORE_HEADER_mailbox_mask 0x00000001 +#define SDMA_PKT_SEMAPHORE_HEADER_mailbox_shift 31 +#define SDMA_PKT_SEMAPHORE_HEADER_MAILBOX(x) (((x) & SDMA_PKT_SEMAPHORE_HEADER_mailbox_mask) << SDMA_PKT_SEMAPHORE_HEADER_mailbox_shift) + +/*define for ADDR_LO word*/ +/*define for addr_31_0 field*/ +#define SDMA_PKT_SEMAPHORE_ADDR_LO_addr_31_0_offset 1 +#define SDMA_PKT_SEMAPHORE_ADDR_LO_addr_31_0_mask 0xFFFFFFFF +#define SDMA_PKT_SEMAPHORE_ADDR_LO_addr_31_0_shift 0 +#define SDMA_PKT_SEMAPHORE_ADDR_LO_ADDR_31_0(x) (((x) & SDMA_PKT_SEMAPHORE_ADDR_LO_addr_31_0_mask) << SDMA_PKT_SEMAPHORE_ADDR_LO_addr_31_0_shift) + +/*define for ADDR_HI word*/ +/*define for addr_63_32 field*/ +#define SDMA_PKT_SEMAPHORE_ADDR_HI_addr_63_32_offset 2 +#define SDMA_PKT_SEMAPHORE_ADDR_HI_addr_63_32_mask 0xFFFFFFFF +#define SDMA_PKT_SEMAPHORE_ADDR_HI_addr_63_32_shift 0 +#define SDMA_PKT_SEMAPHORE_ADDR_HI_ADDR_63_32(x) (((x) & SDMA_PKT_SEMAPHORE_ADDR_HI_addr_63_32_mask) << SDMA_PKT_SEMAPHORE_ADDR_HI_addr_63_32_shift) + + +/* +** Definitions for SDMA_PKT_FENCE packet +*/ + +/*define for HEADER word*/ +/*define for op field*/ +#define SDMA_PKT_FENCE_HEADER_op_offset 0 +#define SDMA_PKT_FENCE_HEADER_op_mask 0x000000FF +#define SDMA_PKT_FENCE_HEADER_op_shift 0 +#define SDMA_PKT_FENCE_HEADER_OP(x) (((x) & SDMA_PKT_FENCE_HEADER_op_mask) << SDMA_PKT_FENCE_HEADER_op_shift) + +/*define for sub_op field*/ +#define SDMA_PKT_FENCE_HEADER_sub_op_offset 0 +#define SDMA_PKT_FENCE_HEADER_sub_op_mask 0x000000FF +#define SDMA_PKT_FENCE_HEADER_sub_op_shift 8 +#define SDMA_PKT_FENCE_HEADER_SUB_OP(x) (((x) & SDMA_PKT_FENCE_HEADER_sub_op_mask) << SDMA_PKT_FENCE_HEADER_sub_op_shift) + +/*define for ADDR_LO word*/ +/*define for addr_31_0 field*/ +#define SDMA_PKT_FENCE_ADDR_LO_addr_31_0_offset 1 +#define SDMA_PKT_FENCE_ADDR_LO_addr_31_0_mask 0xFFFFFFFF +#define SDMA_PKT_FENCE_ADDR_LO_addr_31_0_shift 0 +#define SDMA_PKT_FENCE_ADDR_LO_ADDR_31_0(x) (((x) & SDMA_PKT_FENCE_ADDR_LO_addr_31_0_mask) << SDMA_PKT_FENCE_ADDR_LO_addr_31_0_shift) + +/*define for ADDR_HI word*/ +/*define for addr_63_32 field*/ +#define SDMA_PKT_FENCE_ADDR_HI_addr_63_32_offset 2 +#define SDMA_PKT_FENCE_ADDR_HI_addr_63_32_mask 0xFFFFFFFF +#define SDMA_PKT_FENCE_ADDR_HI_addr_63_32_shift 0 +#define SDMA_PKT_FENCE_ADDR_HI_ADDR_63_32(x) (((x) & SDMA_PKT_FENCE_ADDR_HI_addr_63_32_mask) << SDMA_PKT_FENCE_ADDR_HI_addr_63_32_shift) + +/*define for DATA word*/ +/*define for data field*/ +#define SDMA_PKT_FENCE_DATA_data_offset 3 +#define SDMA_PKT_FENCE_DATA_data_mask 0xFFFFFFFF +#define SDMA_PKT_FENCE_DATA_data_shift 0 +#define SDMA_PKT_FENCE_DATA_DATA(x) (((x) & SDMA_PKT_FENCE_DATA_data_mask) << SDMA_PKT_FENCE_DATA_data_shift) + + +/* +** Definitions for SDMA_PKT_SRBM_WRITE packet +*/ + +/*define for HEADER word*/ +/*define for op field*/ +#define SDMA_PKT_SRBM_WRITE_HEADER_op_offset 0 +#define SDMA_PKT_SRBM_WRITE_HEADER_op_mask 0x000000FF +#define SDMA_PKT_SRBM_WRITE_HEADER_op_shift 0 +#define SDMA_PKT_SRBM_WRITE_HEADER_OP(x) (((x) & SDMA_PKT_SRBM_WRITE_HEADER_op_mask) << SDMA_PKT_SRBM_WRITE_HEADER_op_shift) + +/*define for sub_op field*/ +#define SDMA_PKT_SRBM_WRITE_HEADER_sub_op_offset 0 +#define SDMA_PKT_SRBM_WRITE_HEADER_sub_op_mask 0x000000FF +#define SDMA_PKT_SRBM_WRITE_HEADER_sub_op_shift 8 +#define SDMA_PKT_SRBM_WRITE_HEADER_SUB_OP(x) (((x) & SDMA_PKT_SRBM_WRITE_HEADER_sub_op_mask) << SDMA_PKT_SRBM_WRITE_HEADER_sub_op_shift) + +/*define for byte_en field*/ +#define SDMA_PKT_SRBM_WRITE_HEADER_byte_en_offset 0 +#define SDMA_PKT_SRBM_WRITE_HEADER_byte_en_mask 0x0000000F +#define SDMA_PKT_SRBM_WRITE_HEADER_byte_en_shift 28 +#define SDMA_PKT_SRBM_WRITE_HEADER_BYTE_EN(x) (((x) & SDMA_PKT_SRBM_WRITE_HEADER_byte_en_mask) << SDMA_PKT_SRBM_WRITE_HEADER_byte_en_shift) + +/*define for ADDR word*/ +/*define for addr field*/ +#define SDMA_PKT_SRBM_WRITE_ADDR_addr_offset 1 +#define SDMA_PKT_SRBM_WRITE_ADDR_addr_mask 0x0000FFFF +#define SDMA_PKT_SRBM_WRITE_ADDR_addr_shift 0 +#define SDMA_PKT_SRBM_WRITE_ADDR_ADDR(x) (((x) & SDMA_PKT_SRBM_WRITE_ADDR_addr_mask) << SDMA_PKT_SRBM_WRITE_ADDR_addr_shift) + +/*define for DATA word*/ +/*define for data field*/ +#define SDMA_PKT_SRBM_WRITE_DATA_data_offset 2 +#define SDMA_PKT_SRBM_WRITE_DATA_data_mask 0xFFFFFFFF +#define SDMA_PKT_SRBM_WRITE_DATA_data_shift 0 +#define SDMA_PKT_SRBM_WRITE_DATA_DATA(x) (((x) & SDMA_PKT_SRBM_WRITE_DATA_data_mask) << SDMA_PKT_SRBM_WRITE_DATA_data_shift) + + +/* +** Definitions for SDMA_PKT_PRE_EXE packet +*/ + +/*define for HEADER word*/ +/*define for op field*/ +#define SDMA_PKT_PRE_EXE_HEADER_op_offset 0 +#define SDMA_PKT_PRE_EXE_HEADER_op_mask 0x000000FF +#define SDMA_PKT_PRE_EXE_HEADER_op_shift 0 +#define SDMA_PKT_PRE_EXE_HEADER_OP(x) (((x) & SDMA_PKT_PRE_EXE_HEADER_op_mask) << SDMA_PKT_PRE_EXE_HEADER_op_shift) + +/*define for sub_op field*/ +#define SDMA_PKT_PRE_EXE_HEADER_sub_op_offset 0 +#define SDMA_PKT_PRE_EXE_HEADER_sub_op_mask 0x000000FF +#define SDMA_PKT_PRE_EXE_HEADER_sub_op_shift 8 +#define SDMA_PKT_PRE_EXE_HEADER_SUB_OP(x) (((x) & SDMA_PKT_PRE_EXE_HEADER_sub_op_mask) << SDMA_PKT_PRE_EXE_HEADER_sub_op_shift) + +/*define for dev_sel field*/ +#define SDMA_PKT_PRE_EXE_HEADER_dev_sel_offset 0 +#define SDMA_PKT_PRE_EXE_HEADER_dev_sel_mask 0x000000FF +#define SDMA_PKT_PRE_EXE_HEADER_dev_sel_shift 16 +#define SDMA_PKT_PRE_EXE_HEADER_DEV_SEL(x) (((x) & SDMA_PKT_PRE_EXE_HEADER_dev_sel_mask) << SDMA_PKT_PRE_EXE_HEADER_dev_sel_shift) + +/*define for EXEC_COUNT word*/ +/*define for exec_count field*/ +#define SDMA_PKT_PRE_EXE_EXEC_COUNT_exec_count_offset 1 +#define SDMA_PKT_PRE_EXE_EXEC_COUNT_exec_count_mask 0x00003FFF +#define SDMA_PKT_PRE_EXE_EXEC_COUNT_exec_count_shift 0 +#define SDMA_PKT_PRE_EXE_EXEC_COUNT_EXEC_COUNT(x) (((x) & SDMA_PKT_PRE_EXE_EXEC_COUNT_exec_count_mask) << SDMA_PKT_PRE_EXE_EXEC_COUNT_exec_count_shift) + + +/* +** Definitions for SDMA_PKT_COND_EXE packet +*/ + +/*define for HEADER word*/ +/*define for op field*/ +#define SDMA_PKT_COND_EXE_HEADER_op_offset 0 +#define SDMA_PKT_COND_EXE_HEADER_op_mask 0x000000FF +#define SDMA_PKT_COND_EXE_HEADER_op_shift 0 +#define SDMA_PKT_COND_EXE_HEADER_OP(x) (((x) & SDMA_PKT_COND_EXE_HEADER_op_mask) << SDMA_PKT_COND_EXE_HEADER_op_shift) + +/*define for sub_op field*/ +#define SDMA_PKT_COND_EXE_HEADER_sub_op_offset 0 +#define SDMA_PKT_COND_EXE_HEADER_sub_op_mask 0x000000FF +#define SDMA_PKT_COND_EXE_HEADER_sub_op_shift 8 +#define SDMA_PKT_COND_EXE_HEADER_SUB_OP(x) (((x) & SDMA_PKT_COND_EXE_HEADER_sub_op_mask) << SDMA_PKT_COND_EXE_HEADER_sub_op_shift) + +/*define for ADDR_LO word*/ +/*define for addr_31_0 field*/ +#define SDMA_PKT_COND_EXE_ADDR_LO_addr_31_0_offset 1 +#define SDMA_PKT_COND_EXE_ADDR_LO_addr_31_0_mask 0xFFFFFFFF +#define SDMA_PKT_COND_EXE_ADDR_LO_addr_31_0_shift 0 +#define SDMA_PKT_COND_EXE_ADDR_LO_ADDR_31_0(x) (((x) & SDMA_PKT_COND_EXE_ADDR_LO_addr_31_0_mask) << SDMA_PKT_COND_EXE_ADDR_LO_addr_31_0_shift) + +/*define for ADDR_HI word*/ +/*define for addr_63_32 field*/ +#define SDMA_PKT_COND_EXE_ADDR_HI_addr_63_32_offset 2 +#define SDMA_PKT_COND_EXE_ADDR_HI_addr_63_32_mask 0xFFFFFFFF +#define SDMA_PKT_COND_EXE_ADDR_HI_addr_63_32_shift 0 +#define SDMA_PKT_COND_EXE_ADDR_HI_ADDR_63_32(x) (((x) & SDMA_PKT_COND_EXE_ADDR_HI_addr_63_32_mask) << SDMA_PKT_COND_EXE_ADDR_HI_addr_63_32_shift) + +/*define for REFERENCE word*/ +/*define for reference field*/ +#define SDMA_PKT_COND_EXE_REFERENCE_reference_offset 3 +#define SDMA_PKT_COND_EXE_REFERENCE_reference_mask 0xFFFFFFFF +#define SDMA_PKT_COND_EXE_REFERENCE_reference_shift 0 +#define SDMA_PKT_COND_EXE_REFERENCE_REFERENCE(x) (((x) & SDMA_PKT_COND_EXE_REFERENCE_reference_mask) << SDMA_PKT_COND_EXE_REFERENCE_reference_shift) + +/*define for EXEC_COUNT word*/ +/*define for exec_count field*/ +#define SDMA_PKT_COND_EXE_EXEC_COUNT_exec_count_offset 4 +#define SDMA_PKT_COND_EXE_EXEC_COUNT_exec_count_mask 0x00003FFF +#define SDMA_PKT_COND_EXE_EXEC_COUNT_exec_count_shift 0 +#define SDMA_PKT_COND_EXE_EXEC_COUNT_EXEC_COUNT(x) (((x) & SDMA_PKT_COND_EXE_EXEC_COUNT_exec_count_mask) << SDMA_PKT_COND_EXE_EXEC_COUNT_exec_count_shift) + + +/* +** Definitions for SDMA_PKT_CONSTANT_FILL packet +*/ + +/*define for HEADER word*/ +/*define for op field*/ +#define SDMA_PKT_CONSTANT_FILL_HEADER_op_offset 0 +#define SDMA_PKT_CONSTANT_FILL_HEADER_op_mask 0x000000FF +#define SDMA_PKT_CONSTANT_FILL_HEADER_op_shift 0 +#define SDMA_PKT_CONSTANT_FILL_HEADER_OP(x) (((x) & SDMA_PKT_CONSTANT_FILL_HEADER_op_mask) << SDMA_PKT_CONSTANT_FILL_HEADER_op_shift) + +/*define for sub_op field*/ +#define SDMA_PKT_CONSTANT_FILL_HEADER_sub_op_offset 0 +#define SDMA_PKT_CONSTANT_FILL_HEADER_sub_op_mask 0x000000FF +#define SDMA_PKT_CONSTANT_FILL_HEADER_sub_op_shift 8 +#define SDMA_PKT_CONSTANT_FILL_HEADER_SUB_OP(x) (((x) & SDMA_PKT_CONSTANT_FILL_HEADER_sub_op_mask) << SDMA_PKT_CONSTANT_FILL_HEADER_sub_op_shift) + +/*define for sw field*/ +#define SDMA_PKT_CONSTANT_FILL_HEADER_sw_offset 0 +#define SDMA_PKT_CONSTANT_FILL_HEADER_sw_mask 0x00000003 +#define SDMA_PKT_CONSTANT_FILL_HEADER_sw_shift 16 +#define SDMA_PKT_CONSTANT_FILL_HEADER_SW(x) (((x) & SDMA_PKT_CONSTANT_FILL_HEADER_sw_mask) << SDMA_PKT_CONSTANT_FILL_HEADER_sw_shift) + +/*define for fillsize field*/ +#define SDMA_PKT_CONSTANT_FILL_HEADER_fillsize_offset 0 +#define SDMA_PKT_CONSTANT_FILL_HEADER_fillsize_mask 0x00000003 +#define SDMA_PKT_CONSTANT_FILL_HEADER_fillsize_shift 30 +#define SDMA_PKT_CONSTANT_FILL_HEADER_FILLSIZE(x) (((x) & SDMA_PKT_CONSTANT_FILL_HEADER_fillsize_mask) << SDMA_PKT_CONSTANT_FILL_HEADER_fillsize_shift) + +/*define for DST_ADDR_LO word*/ +/*define for dst_addr_31_0 field*/ +#define SDMA_PKT_CONSTANT_FILL_DST_ADDR_LO_dst_addr_31_0_offset 1 +#define SDMA_PKT_CONSTANT_FILL_DST_ADDR_LO_dst_addr_31_0_mask 0xFFFFFFFF +#define SDMA_PKT_CONSTANT_FILL_DST_ADDR_LO_dst_addr_31_0_shift 0 +#define SDMA_PKT_CONSTANT_FILL_DST_ADDR_LO_DST_ADDR_31_0(x) (((x) & SDMA_PKT_CONSTANT_FILL_DST_ADDR_LO_dst_addr_31_0_mask) << SDMA_PKT_CONSTANT_FILL_DST_ADDR_LO_dst_addr_31_0_shift) + +/*define for DST_ADDR_HI word*/ +/*define for dst_addr_63_32 field*/ +#define SDMA_PKT_CONSTANT_FILL_DST_ADDR_HI_dst_addr_63_32_offset 2 +#define SDMA_PKT_CONSTANT_FILL_DST_ADDR_HI_dst_addr_63_32_mask 0xFFFFFFFF +#define SDMA_PKT_CONSTANT_FILL_DST_ADDR_HI_dst_addr_63_32_shift 0 +#define SDMA_PKT_CONSTANT_FILL_DST_ADDR_HI_DST_ADDR_63_32(x) (((x) & SDMA_PKT_CONSTANT_FILL_DST_ADDR_HI_dst_addr_63_32_mask) << SDMA_PKT_CONSTANT_FILL_DST_ADDR_HI_dst_addr_63_32_shift) + +/*define for DATA word*/ +/*define for src_data_31_0 field*/ +#define SDMA_PKT_CONSTANT_FILL_DATA_src_data_31_0_offset 3 +#define SDMA_PKT_CONSTANT_FILL_DATA_src_data_31_0_mask 0xFFFFFFFF +#define SDMA_PKT_CONSTANT_FILL_DATA_src_data_31_0_shift 0 +#define SDMA_PKT_CONSTANT_FILL_DATA_SRC_DATA_31_0(x) (((x) & SDMA_PKT_CONSTANT_FILL_DATA_src_data_31_0_mask) << SDMA_PKT_CONSTANT_FILL_DATA_src_data_31_0_shift) + +/*define for COUNT word*/ +/*define for count field*/ +#define SDMA_PKT_CONSTANT_FILL_COUNT_count_offset 4 +#define SDMA_PKT_CONSTANT_FILL_COUNT_count_mask 0x003FFFFF +#define SDMA_PKT_CONSTANT_FILL_COUNT_count_shift 0 +#define SDMA_PKT_CONSTANT_FILL_COUNT_COUNT(x) (((x) & SDMA_PKT_CONSTANT_FILL_COUNT_count_mask) << SDMA_PKT_CONSTANT_FILL_COUNT_count_shift) + + +/* +** Definitions for SDMA_PKT_POLL_REGMEM packet +*/ + +/*define for HEADER word*/ +/*define for op field*/ +#define SDMA_PKT_POLL_REGMEM_HEADER_op_offset 0 +#define SDMA_PKT_POLL_REGMEM_HEADER_op_mask 0x000000FF +#define SDMA_PKT_POLL_REGMEM_HEADER_op_shift 0 +#define SDMA_PKT_POLL_REGMEM_HEADER_OP(x) (((x) & SDMA_PKT_POLL_REGMEM_HEADER_op_mask) << SDMA_PKT_POLL_REGMEM_HEADER_op_shift) + +/*define for sub_op field*/ +#define SDMA_PKT_POLL_REGMEM_HEADER_sub_op_offset 0 +#define SDMA_PKT_POLL_REGMEM_HEADER_sub_op_mask 0x000000FF +#define SDMA_PKT_POLL_REGMEM_HEADER_sub_op_shift 8 +#define SDMA_PKT_POLL_REGMEM_HEADER_SUB_OP(x) (((x) & SDMA_PKT_POLL_REGMEM_HEADER_sub_op_mask) << SDMA_PKT_POLL_REGMEM_HEADER_sub_op_shift) + +/*define for hdp_flush field*/ +#define SDMA_PKT_POLL_REGMEM_HEADER_hdp_flush_offset 0 +#define SDMA_PKT_POLL_REGMEM_HEADER_hdp_flush_mask 0x00000001 +#define SDMA_PKT_POLL_REGMEM_HEADER_hdp_flush_shift 26 +#define SDMA_PKT_POLL_REGMEM_HEADER_HDP_FLUSH(x) (((x) & SDMA_PKT_POLL_REGMEM_HEADER_hdp_flush_mask) << SDMA_PKT_POLL_REGMEM_HEADER_hdp_flush_shift) + +/*define for func field*/ +#define SDMA_PKT_POLL_REGMEM_HEADER_func_offset 0 +#define SDMA_PKT_POLL_REGMEM_HEADER_func_mask 0x00000007 +#define SDMA_PKT_POLL_REGMEM_HEADER_func_shift 28 +#define SDMA_PKT_POLL_REGMEM_HEADER_FUNC(x) (((x) & SDMA_PKT_POLL_REGMEM_HEADER_func_mask) << SDMA_PKT_POLL_REGMEM_HEADER_func_shift) + +/*define for mem_poll field*/ +#define SDMA_PKT_POLL_REGMEM_HEADER_mem_poll_offset 0 +#define SDMA_PKT_POLL_REGMEM_HEADER_mem_poll_mask 0x00000001 +#define SDMA_PKT_POLL_REGMEM_HEADER_mem_poll_shift 31 +#define SDMA_PKT_POLL_REGMEM_HEADER_MEM_POLL(x) (((x) & SDMA_PKT_POLL_REGMEM_HEADER_mem_poll_mask) << SDMA_PKT_POLL_REGMEM_HEADER_mem_poll_shift) + +/*define for ADDR_LO word*/ +/*define for addr_31_0 field*/ +#define SDMA_PKT_POLL_REGMEM_ADDR_LO_addr_31_0_offset 1 +#define SDMA_PKT_POLL_REGMEM_ADDR_LO_addr_31_0_mask 0xFFFFFFFF +#define SDMA_PKT_POLL_REGMEM_ADDR_LO_addr_31_0_shift 0 +#define SDMA_PKT_POLL_REGMEM_ADDR_LO_ADDR_31_0(x) (((x) & SDMA_PKT_POLL_REGMEM_ADDR_LO_addr_31_0_mask) << SDMA_PKT_POLL_REGMEM_ADDR_LO_addr_31_0_shift) + +/*define for ADDR_HI word*/ +/*define for addr_63_32 field*/ +#define SDMA_PKT_POLL_REGMEM_ADDR_HI_addr_63_32_offset 2 +#define SDMA_PKT_POLL_REGMEM_ADDR_HI_addr_63_32_mask 0xFFFFFFFF +#define SDMA_PKT_POLL_REGMEM_ADDR_HI_addr_63_32_shift 0 +#define SDMA_PKT_POLL_REGMEM_ADDR_HI_ADDR_63_32(x) (((x) & SDMA_PKT_POLL_REGMEM_ADDR_HI_addr_63_32_mask) << SDMA_PKT_POLL_REGMEM_ADDR_HI_addr_63_32_shift) + +/*define for VALUE word*/ +/*define for value field*/ +#define SDMA_PKT_POLL_REGMEM_VALUE_value_offset 3 +#define SDMA_PKT_POLL_REGMEM_VALUE_value_mask 0xFFFFFFFF +#define SDMA_PKT_POLL_REGMEM_VALUE_value_shift 0 +#define SDMA_PKT_POLL_REGMEM_VALUE_VALUE(x) (((x) & SDMA_PKT_POLL_REGMEM_VALUE_value_mask) << SDMA_PKT_POLL_REGMEM_VALUE_value_shift) + +/*define for MASK word*/ +/*define for mask field*/ +#define SDMA_PKT_POLL_REGMEM_MASK_mask_offset 4 +#define SDMA_PKT_POLL_REGMEM_MASK_mask_mask 0xFFFFFFFF +#define SDMA_PKT_POLL_REGMEM_MASK_mask_shift 0 +#define SDMA_PKT_POLL_REGMEM_MASK_MASK(x) (((x) & SDMA_PKT_POLL_REGMEM_MASK_mask_mask) << SDMA_PKT_POLL_REGMEM_MASK_mask_shift) + +/*define for DW5 word*/ +/*define for interval field*/ +#define SDMA_PKT_POLL_REGMEM_DW5_interval_offset 5 +#define SDMA_PKT_POLL_REGMEM_DW5_interval_mask 0x0000FFFF +#define SDMA_PKT_POLL_REGMEM_DW5_interval_shift 0 +#define SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(x) (((x) & SDMA_PKT_POLL_REGMEM_DW5_interval_mask) << SDMA_PKT_POLL_REGMEM_DW5_interval_shift) + +/*define for retry_count field*/ +#define SDMA_PKT_POLL_REGMEM_DW5_retry_count_offset 5 +#define SDMA_PKT_POLL_REGMEM_DW5_retry_count_mask 0x00000FFF +#define SDMA_PKT_POLL_REGMEM_DW5_retry_count_shift 16 +#define SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(x) (((x) & SDMA_PKT_POLL_REGMEM_DW5_retry_count_mask) << SDMA_PKT_POLL_REGMEM_DW5_retry_count_shift) + + +/* +** Definitions for SDMA_PKT_ATOMIC packet +*/ + +/*define for HEADER word*/ +/*define for op field*/ +#define SDMA_PKT_ATOMIC_HEADER_op_offset 0 +#define SDMA_PKT_ATOMIC_HEADER_op_mask 0x000000FF +#define SDMA_PKT_ATOMIC_HEADER_op_shift 0 +#define SDMA_PKT_ATOMIC_HEADER_OP(x) (((x) & SDMA_PKT_ATOMIC_HEADER_op_mask) << SDMA_PKT_ATOMIC_HEADER_op_shift) + +/*define for loop field*/ +#define SDMA_PKT_ATOMIC_HEADER_loop_offset 0 +#define SDMA_PKT_ATOMIC_HEADER_loop_mask 0x00000001 +#define SDMA_PKT_ATOMIC_HEADER_loop_shift 16 +#define SDMA_PKT_ATOMIC_HEADER_LOOP(x) (((x) & SDMA_PKT_ATOMIC_HEADER_loop_mask) << SDMA_PKT_ATOMIC_HEADER_loop_shift) + +/*define for atomic_op field*/ +#define SDMA_PKT_ATOMIC_HEADER_atomic_op_offset 0 +#define SDMA_PKT_ATOMIC_HEADER_atomic_op_mask 0x0000007F +#define SDMA_PKT_ATOMIC_HEADER_atomic_op_shift 25 +#define SDMA_PKT_ATOMIC_HEADER_ATOMIC_OP(x) (((x) & SDMA_PKT_ATOMIC_HEADER_atomic_op_mask) << SDMA_PKT_ATOMIC_HEADER_atomic_op_shift) + +/*define for ADDR_LO word*/ +/*define for addr_31_0 field*/ +#define SDMA_PKT_ATOMIC_ADDR_LO_addr_31_0_offset 1 +#define SDMA_PKT_ATOMIC_ADDR_LO_addr_31_0_mask 0xFFFFFFFF +#define SDMA_PKT_ATOMIC_ADDR_LO_addr_31_0_shift 0 +#define SDMA_PKT_ATOMIC_ADDR_LO_ADDR_31_0(x) (((x) & SDMA_PKT_ATOMIC_ADDR_LO_addr_31_0_mask) << SDMA_PKT_ATOMIC_ADDR_LO_addr_31_0_shift) + +/*define for ADDR_HI word*/ +/*define for addr_63_32 field*/ +#define SDMA_PKT_ATOMIC_ADDR_HI_addr_63_32_offset 2 +#define SDMA_PKT_ATOMIC_ADDR_HI_addr_63_32_mask 0xFFFFFFFF +#define SDMA_PKT_ATOMIC_ADDR_HI_addr_63_32_shift 0 +#define SDMA_PKT_ATOMIC_ADDR_HI_ADDR_63_32(x) (((x) & SDMA_PKT_ATOMIC_ADDR_HI_addr_63_32_mask) << SDMA_PKT_ATOMIC_ADDR_HI_addr_63_32_shift) + +/*define for SRC_DATA_LO word*/ +/*define for src_data_31_0 field*/ +#define SDMA_PKT_ATOMIC_SRC_DATA_LO_src_data_31_0_offset 3 +#define SDMA_PKT_ATOMIC_SRC_DATA_LO_src_data_31_0_mask 0xFFFFFFFF +#define SDMA_PKT_ATOMIC_SRC_DATA_LO_src_data_31_0_shift 0 +#define SDMA_PKT_ATOMIC_SRC_DATA_LO_SRC_DATA_31_0(x) (((x) & SDMA_PKT_ATOMIC_SRC_DATA_LO_src_data_31_0_mask) << SDMA_PKT_ATOMIC_SRC_DATA_LO_src_data_31_0_shift) + +/*define for SRC_DATA_HI word*/ +/*define for src_data_63_32 field*/ +#define SDMA_PKT_ATOMIC_SRC_DATA_HI_src_data_63_32_offset 4 +#define SDMA_PKT_ATOMIC_SRC_DATA_HI_src_data_63_32_mask 0xFFFFFFFF +#define SDMA_PKT_ATOMIC_SRC_DATA_HI_src_data_63_32_shift 0 +#define SDMA_PKT_ATOMIC_SRC_DATA_HI_SRC_DATA_63_32(x) (((x) & SDMA_PKT_ATOMIC_SRC_DATA_HI_src_data_63_32_mask) << SDMA_PKT_ATOMIC_SRC_DATA_HI_src_data_63_32_shift) + +/*define for CMP_DATA_LO word*/ +/*define for cmp_data_31_0 field*/ +#define SDMA_PKT_ATOMIC_CMP_DATA_LO_cmp_data_31_0_offset 5 +#define SDMA_PKT_ATOMIC_CMP_DATA_LO_cmp_data_31_0_mask 0xFFFFFFFF +#define SDMA_PKT_ATOMIC_CMP_DATA_LO_cmp_data_31_0_shift 0 +#define SDMA_PKT_ATOMIC_CMP_DATA_LO_CMP_DATA_31_0(x) (((x) & SDMA_PKT_ATOMIC_CMP_DATA_LO_cmp_data_31_0_mask) << SDMA_PKT_ATOMIC_CMP_DATA_LO_cmp_data_31_0_shift) + +/*define for CMP_DATA_HI word*/ +/*define for cmp_data_63_32 field*/ +#define SDMA_PKT_ATOMIC_CMP_DATA_HI_cmp_data_63_32_offset 6 +#define SDMA_PKT_ATOMIC_CMP_DATA_HI_cmp_data_63_32_mask 0xFFFFFFFF +#define SDMA_PKT_ATOMIC_CMP_DATA_HI_cmp_data_63_32_shift 0 +#define SDMA_PKT_ATOMIC_CMP_DATA_HI_CMP_DATA_63_32(x) (((x) & SDMA_PKT_ATOMIC_CMP_DATA_HI_cmp_data_63_32_mask) << SDMA_PKT_ATOMIC_CMP_DATA_HI_cmp_data_63_32_shift) + +/*define for LOOP_INTERVAL word*/ +/*define for loop_interval field*/ +#define SDMA_PKT_ATOMIC_LOOP_INTERVAL_loop_interval_offset 7 +#define SDMA_PKT_ATOMIC_LOOP_INTERVAL_loop_interval_mask 0x00001FFF +#define SDMA_PKT_ATOMIC_LOOP_INTERVAL_loop_interval_shift 0 +#define SDMA_PKT_ATOMIC_LOOP_INTERVAL_LOOP_INTERVAL(x) (((x) & SDMA_PKT_ATOMIC_LOOP_INTERVAL_loop_interval_mask) << SDMA_PKT_ATOMIC_LOOP_INTERVAL_loop_interval_shift) + + +/* +** Definitions for SDMA_PKT_TIMESTAMP_SET packet +*/ + +/*define for HEADER word*/ +/*define for op field*/ +#define SDMA_PKT_TIMESTAMP_SET_HEADER_op_offset 0 +#define SDMA_PKT_TIMESTAMP_SET_HEADER_op_mask 0x000000FF +#define SDMA_PKT_TIMESTAMP_SET_HEADER_op_shift 0 +#define SDMA_PKT_TIMESTAMP_SET_HEADER_OP(x) (((x) & SDMA_PKT_TIMESTAMP_SET_HEADER_op_mask) << SDMA_PKT_TIMESTAMP_SET_HEADER_op_shift) + +/*define for sub_op field*/ +#define SDMA_PKT_TIMESTAMP_SET_HEADER_sub_op_offset 0 +#define SDMA_PKT_TIMESTAMP_SET_HEADER_sub_op_mask 0x000000FF +#define SDMA_PKT_TIMESTAMP_SET_HEADER_sub_op_shift 8 +#define SDMA_PKT_TIMESTAMP_SET_HEADER_SUB_OP(x) (((x) & SDMA_PKT_TIMESTAMP_SET_HEADER_sub_op_mask) << SDMA_PKT_TIMESTAMP_SET_HEADER_sub_op_shift) + +/*define for INIT_DATA_LO word*/ +/*define for init_data_31_0 field*/ +#define SDMA_PKT_TIMESTAMP_SET_INIT_DATA_LO_init_data_31_0_offset 1 +#define SDMA_PKT_TIMESTAMP_SET_INIT_DATA_LO_init_data_31_0_mask 0xFFFFFFFF +#define SDMA_PKT_TIMESTAMP_SET_INIT_DATA_LO_init_data_31_0_shift 0 +#define SDMA_PKT_TIMESTAMP_SET_INIT_DATA_LO_INIT_DATA_31_0(x) (((x) & SDMA_PKT_TIMESTAMP_SET_INIT_DATA_LO_init_data_31_0_mask) << SDMA_PKT_TIMESTAMP_SET_INIT_DATA_LO_init_data_31_0_shift) + +/*define for INIT_DATA_HI word*/ +/*define for init_data_63_32 field*/ +#define SDMA_PKT_TIMESTAMP_SET_INIT_DATA_HI_init_data_63_32_offset 2 +#define SDMA_PKT_TIMESTAMP_SET_INIT_DATA_HI_init_data_63_32_mask 0xFFFFFFFF +#define SDMA_PKT_TIMESTAMP_SET_INIT_DATA_HI_init_data_63_32_shift 0 +#define SDMA_PKT_TIMESTAMP_SET_INIT_DATA_HI_INIT_DATA_63_32(x) (((x) & SDMA_PKT_TIMESTAMP_SET_INIT_DATA_HI_init_data_63_32_mask) << SDMA_PKT_TIMESTAMP_SET_INIT_DATA_HI_init_data_63_32_shift) + + +/* +** Definitions for SDMA_PKT_TIMESTAMP_GET packet +*/ + +/*define for HEADER word*/ +/*define for op field*/ +#define SDMA_PKT_TIMESTAMP_GET_HEADER_op_offset 0 +#define SDMA_PKT_TIMESTAMP_GET_HEADER_op_mask 0x000000FF +#define SDMA_PKT_TIMESTAMP_GET_HEADER_op_shift 0 +#define SDMA_PKT_TIMESTAMP_GET_HEADER_OP(x) (((x) & SDMA_PKT_TIMESTAMP_GET_HEADER_op_mask) << SDMA_PKT_TIMESTAMP_GET_HEADER_op_shift) + +/*define for sub_op field*/ +#define SDMA_PKT_TIMESTAMP_GET_HEADER_sub_op_offset 0 +#define SDMA_PKT_TIMESTAMP_GET_HEADER_sub_op_mask 0x000000FF +#define SDMA_PKT_TIMESTAMP_GET_HEADER_sub_op_shift 8 +#define SDMA_PKT_TIMESTAMP_GET_HEADER_SUB_OP(x) (((x) & SDMA_PKT_TIMESTAMP_GET_HEADER_sub_op_mask) << SDMA_PKT_TIMESTAMP_GET_HEADER_sub_op_shift) + +/*define for WRITE_ADDR_LO word*/ +/*define for write_addr_31_3 field*/ +#define SDMA_PKT_TIMESTAMP_GET_WRITE_ADDR_LO_write_addr_31_3_offset 1 +#define SDMA_PKT_TIMESTAMP_GET_WRITE_ADDR_LO_write_addr_31_3_mask 0x1FFFFFFF +#define SDMA_PKT_TIMESTAMP_GET_WRITE_ADDR_LO_write_addr_31_3_shift 3 +#define SDMA_PKT_TIMESTAMP_GET_WRITE_ADDR_LO_WRITE_ADDR_31_3(x) (((x) & SDMA_PKT_TIMESTAMP_GET_WRITE_ADDR_LO_write_addr_31_3_mask) << SDMA_PKT_TIMESTAMP_GET_WRITE_ADDR_LO_write_addr_31_3_shift) + +/*define for WRITE_ADDR_HI word*/ +/*define for write_addr_63_32 field*/ +#define SDMA_PKT_TIMESTAMP_GET_WRITE_ADDR_HI_write_addr_63_32_offset 2 +#define SDMA_PKT_TIMESTAMP_GET_WRITE_ADDR_HI_write_addr_63_32_mask 0xFFFFFFFF +#define SDMA_PKT_TIMESTAMP_GET_WRITE_ADDR_HI_write_addr_63_32_shift 0 +#define SDMA_PKT_TIMESTAMP_GET_WRITE_ADDR_HI_WRITE_ADDR_63_32(x) (((x) & SDMA_PKT_TIMESTAMP_GET_WRITE_ADDR_HI_write_addr_63_32_mask) << SDMA_PKT_TIMESTAMP_GET_WRITE_ADDR_HI_write_addr_63_32_shift) + + +/* +** Definitions for SDMA_PKT_TIMESTAMP_GET_GLOBAL packet +*/ + +/*define for HEADER word*/ +/*define for op field*/ +#define SDMA_PKT_TIMESTAMP_GET_GLOBAL_HEADER_op_offset 0 +#define SDMA_PKT_TIMESTAMP_GET_GLOBAL_HEADER_op_mask 0x000000FF +#define SDMA_PKT_TIMESTAMP_GET_GLOBAL_HEADER_op_shift 0 +#define SDMA_PKT_TIMESTAMP_GET_GLOBAL_HEADER_OP(x) (((x) & SDMA_PKT_TIMESTAMP_GET_GLOBAL_HEADER_op_mask) << SDMA_PKT_TIMESTAMP_GET_GLOBAL_HEADER_op_shift) + +/*define for sub_op field*/ +#define SDMA_PKT_TIMESTAMP_GET_GLOBAL_HEADER_sub_op_offset 0 +#define SDMA_PKT_TIMESTAMP_GET_GLOBAL_HEADER_sub_op_mask 0x000000FF +#define SDMA_PKT_TIMESTAMP_GET_GLOBAL_HEADER_sub_op_shift 8 +#define SDMA_PKT_TIMESTAMP_GET_GLOBAL_HEADER_SUB_OP(x) (((x) & SDMA_PKT_TIMESTAMP_GET_GLOBAL_HEADER_sub_op_mask) << SDMA_PKT_TIMESTAMP_GET_GLOBAL_HEADER_sub_op_shift) + +/*define for WRITE_ADDR_LO word*/ +/*define for write_addr_31_3 field*/ +#define SDMA_PKT_TIMESTAMP_GET_GLOBAL_WRITE_ADDR_LO_write_addr_31_3_offset 1 +#define SDMA_PKT_TIMESTAMP_GET_GLOBAL_WRITE_ADDR_LO_write_addr_31_3_mask 0x1FFFFFFF +#define SDMA_PKT_TIMESTAMP_GET_GLOBAL_WRITE_ADDR_LO_write_addr_31_3_shift 3 +#define SDMA_PKT_TIMESTAMP_GET_GLOBAL_WRITE_ADDR_LO_WRITE_ADDR_31_3(x) (((x) & SDMA_PKT_TIMESTAMP_GET_GLOBAL_WRITE_ADDR_LO_write_addr_31_3_mask) << SDMA_PKT_TIMESTAMP_GET_GLOBAL_WRITE_ADDR_LO_write_addr_31_3_shift) + +/*define for WRITE_ADDR_HI word*/ +/*define for write_addr_63_32 field*/ +#define SDMA_PKT_TIMESTAMP_GET_GLOBAL_WRITE_ADDR_HI_write_addr_63_32_offset 2 +#define SDMA_PKT_TIMESTAMP_GET_GLOBAL_WRITE_ADDR_HI_write_addr_63_32_mask 0xFFFFFFFF +#define SDMA_PKT_TIMESTAMP_GET_GLOBAL_WRITE_ADDR_HI_write_addr_63_32_shift 0 +#define SDMA_PKT_TIMESTAMP_GET_GLOBAL_WRITE_ADDR_HI_WRITE_ADDR_63_32(x) (((x) & SDMA_PKT_TIMESTAMP_GET_GLOBAL_WRITE_ADDR_HI_write_addr_63_32_mask) << SDMA_PKT_TIMESTAMP_GET_GLOBAL_WRITE_ADDR_HI_write_addr_63_32_shift) + + +/* +** Definitions for SDMA_PKT_TRAP packet +*/ + +/*define for HEADER word*/ +/*define for op field*/ +#define SDMA_PKT_TRAP_HEADER_op_offset 0 +#define SDMA_PKT_TRAP_HEADER_op_mask 0x000000FF +#define SDMA_PKT_TRAP_HEADER_op_shift 0 +#define SDMA_PKT_TRAP_HEADER_OP(x) (((x) & SDMA_PKT_TRAP_HEADER_op_mask) << SDMA_PKT_TRAP_HEADER_op_shift) + +/*define for sub_op field*/ +#define SDMA_PKT_TRAP_HEADER_sub_op_offset 0 +#define SDMA_PKT_TRAP_HEADER_sub_op_mask 0x000000FF +#define SDMA_PKT_TRAP_HEADER_sub_op_shift 8 +#define SDMA_PKT_TRAP_HEADER_SUB_OP(x) (((x) & SDMA_PKT_TRAP_HEADER_sub_op_mask) << SDMA_PKT_TRAP_HEADER_sub_op_shift) + +/*define for INT_CONTEXT word*/ +/*define for int_context field*/ +#define SDMA_PKT_TRAP_INT_CONTEXT_int_context_offset 1 +#define SDMA_PKT_TRAP_INT_CONTEXT_int_context_mask 0x0FFFFFFF +#define SDMA_PKT_TRAP_INT_CONTEXT_int_context_shift 0 +#define SDMA_PKT_TRAP_INT_CONTEXT_INT_CONTEXT(x) (((x) & SDMA_PKT_TRAP_INT_CONTEXT_int_context_mask) << SDMA_PKT_TRAP_INT_CONTEXT_int_context_shift) + + +/* +** Definitions for SDMA_PKT_NOP packet +*/ + +/*define for HEADER word*/ +/*define for op field*/ +#define SDMA_PKT_NOP_HEADER_op_offset 0 +#define SDMA_PKT_NOP_HEADER_op_mask 0x000000FF +#define SDMA_PKT_NOP_HEADER_op_shift 0 +#define SDMA_PKT_NOP_HEADER_OP(x) (((x) & SDMA_PKT_NOP_HEADER_op_mask) << SDMA_PKT_NOP_HEADER_op_shift) + +/*define for sub_op field*/ +#define SDMA_PKT_NOP_HEADER_sub_op_offset 0 +#define SDMA_PKT_NOP_HEADER_sub_op_mask 0x000000FF +#define SDMA_PKT_NOP_HEADER_sub_op_shift 8 +#define SDMA_PKT_NOP_HEADER_SUB_OP(x) (((x) & SDMA_PKT_NOP_HEADER_sub_op_mask) << SDMA_PKT_NOP_HEADER_sub_op_shift) + + +#endif /* __TONGA_SDMA_PKT_OPEN_H_ */ diff --git a/drivers/gpu/drm/amd/amdgpu/tonga_smc.c b/drivers/gpu/drm/amd/amdgpu/tonga_smc.c new file mode 100644 index 000000000000..5fc53a40c7ac --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/tonga_smc.c @@ -0,0 +1,852 @@ +/* + * Copyright 2014 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#include +#include "drmP.h" +#include "amdgpu.h" +#include "tonga_ppsmc.h" +#include "tonga_smumgr.h" +#include "smu_ucode_xfer_vi.h" +#include "amdgpu_ucode.h" + +#include "smu/smu_7_1_2_d.h" +#include "smu/smu_7_1_2_sh_mask.h" + +#define TONGA_SMC_SIZE 0x20000 + +static int tonga_set_smc_sram_address(struct amdgpu_device *adev, uint32_t smc_address, uint32_t limit) +{ + uint32_t val; + + if (smc_address & 3) + return -EINVAL; + + if ((smc_address + 3) > limit) + return -EINVAL; + + WREG32(mmSMC_IND_INDEX_0, smc_address); + + val = RREG32(mmSMC_IND_ACCESS_CNTL); + val = REG_SET_FIELD(val, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 0); + WREG32(mmSMC_IND_ACCESS_CNTL, val); + + return 0; +} + +static int tonga_copy_bytes_to_smc(struct amdgpu_device *adev, uint32_t smc_start_address, const uint8_t *src, uint32_t byte_count, uint32_t limit) +{ + uint32_t addr; + uint32_t data, orig_data; + int result = 0; + uint32_t extra_shift; + unsigned long flags; + + if (smc_start_address & 3) + return -EINVAL; + + if ((smc_start_address + byte_count) > limit) + return -EINVAL; + + addr = smc_start_address; + + spin_lock_irqsave(&adev->smc_idx_lock, flags); + while (byte_count >= 4) { + /* Bytes are written into the SMC addres space with the MSB first */ + data = (src[0] << 24) + (src[1] << 16) + (src[2] << 8) + src[3]; + + result = tonga_set_smc_sram_address(adev, addr, limit); + + if (result) + goto out; + + WREG32(mmSMC_IND_DATA_0, data); + + src += 4; + byte_count -= 4; + addr += 4; + } + + if (0 != byte_count) { + /* Now write odd bytes left, do a read modify write cycle */ + data = 0; + + result = tonga_set_smc_sram_address(adev, addr, limit); + if (result) + goto out; + + orig_data = RREG32(mmSMC_IND_DATA_0); + extra_shift = 8 * (4 - byte_count); + + while (byte_count > 0) { + data = (data << 8) + *src++; + byte_count--; + } + + data <<= extra_shift; + data |= (orig_data & ~((~0UL) << extra_shift)); + + result = tonga_set_smc_sram_address(adev, addr, limit); + if (result) + goto out; + + WREG32(mmSMC_IND_DATA_0, data); + } + +out: + spin_unlock_irqrestore(&adev->smc_idx_lock, flags); + return result; +} + +static int tonga_program_jump_on_start(struct amdgpu_device *adev) +{ + static unsigned char data[] = {0xE0, 0x00, 0x80, 0x40}; + tonga_copy_bytes_to_smc(adev, 0x0, data, 4, sizeof(data)+1); + + return 0; +} + +static bool tonga_is_smc_ram_running(struct amdgpu_device *adev) +{ + uint32_t val = RREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0); + val = REG_GET_FIELD(val, SMC_SYSCON_CLOCK_CNTL_0, ck_disable); + + return ((0 == val) && (0x20100 <= RREG32_SMC(ixSMC_PC_C))); +} + +static int wait_smu_response(struct amdgpu_device *adev) +{ + int i; + uint32_t val; + + for (i = 0; i < adev->usec_timeout; i++) { + val = RREG32(mmSMC_RESP_0); + if (REG_GET_FIELD(val, SMC_RESP_0, SMC_RESP)) + break; + udelay(1); + } + + if (i == adev->usec_timeout) + return -EINVAL; + + return 0; +} + +static int tonga_send_msg_to_smc_offset(struct amdgpu_device *adev) +{ + if (wait_smu_response(adev)) { + DRM_ERROR("Failed to send previous message\n"); + return -EINVAL; + } + + WREG32(mmSMC_MSG_ARG_0, 0x20000); + WREG32(mmSMC_MESSAGE_0, PPSMC_MSG_Test); + + if (wait_smu_response(adev)) { + DRM_ERROR("Failed to send message\n"); + return -EINVAL; + } + + return 0; +} + +static int tonga_send_msg_to_smc(struct amdgpu_device *adev, PPSMC_Msg msg) +{ + if (!tonga_is_smc_ram_running(adev)) + { + return -EINVAL;; + } + + if (wait_smu_response(adev)) { + DRM_ERROR("Failed to send previous message\n"); + return -EINVAL; + } + + WREG32(mmSMC_MESSAGE_0, msg); + + if (wait_smu_response(adev)) { + DRM_ERROR("Failed to send message\n"); + return -EINVAL; + } + + return 0; +} + +static int tonga_send_msg_to_smc_without_waiting(struct amdgpu_device *adev, + PPSMC_Msg msg) +{ + if (wait_smu_response(adev)) { + DRM_ERROR("Failed to send previous message\n"); + return -EINVAL; + } + + WREG32(mmSMC_MESSAGE_0, msg); + + return 0; +} + +static int tonga_send_msg_to_smc_with_parameter(struct amdgpu_device *adev, + PPSMC_Msg msg, + uint32_t parameter) +{ + if (!tonga_is_smc_ram_running(adev)) + return -EINVAL; + + if (wait_smu_response(adev)) { + DRM_ERROR("Failed to send previous message\n"); + return -EINVAL; + } + + WREG32(mmSMC_MSG_ARG_0, parameter); + + return tonga_send_msg_to_smc(adev, msg); +} + +static int tonga_send_msg_to_smc_with_parameter_without_waiting( + struct amdgpu_device *adev, + PPSMC_Msg msg, uint32_t parameter) +{ + if (wait_smu_response(adev)) { + DRM_ERROR("Failed to send previous message\n"); + return -EINVAL; + } + + WREG32(mmSMC_MSG_ARG_0, parameter); + + return tonga_send_msg_to_smc_without_waiting(adev, msg); +} + +#if 0 /* not used yet */ +static int tonga_wait_for_smc_inactive(struct amdgpu_device *adev) +{ + int i; + uint32_t val; + + if (!tonga_is_smc_ram_running(adev)) + return -EINVAL; + + for (i = 0; i < adev->usec_timeout; i++) { + val = RREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0); + if (REG_GET_FIELD(val, SMC_SYSCON_CLOCK_CNTL_0, cken) == 0) + break; + udelay(1); + } + + if (i == adev->usec_timeout) + return -EINVAL; + + return 0; +} +#endif + +static int tonga_smu_upload_firmware_image(struct amdgpu_device *adev) +{ + const struct smc_firmware_header_v1_0 *hdr; + uint32_t ucode_size; + uint32_t ucode_start_address; + const uint8_t *src; + uint32_t val; + uint32_t byte_count; + uint32_t *data; + unsigned long flags; + + if (!adev->pm.fw) + return -EINVAL; + + hdr = (const struct smc_firmware_header_v1_0 *)adev->pm.fw->data; + amdgpu_ucode_print_smc_hdr(&hdr->header); + + adev->pm.fw_version = le32_to_cpu(hdr->header.ucode_version); + ucode_size = le32_to_cpu(hdr->header.ucode_size_bytes); + ucode_start_address = le32_to_cpu(hdr->ucode_start_addr); + src = (const uint8_t *) + (adev->pm.fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes)); + + if (ucode_size & 3) { + DRM_ERROR("SMC ucode is not 4 bytes aligned\n"); + return -EINVAL; + } + + if (ucode_size > TONGA_SMC_SIZE) { + DRM_ERROR("SMC address is beyond the SMC RAM area\n"); + return -EINVAL; + } + + spin_lock_irqsave(&adev->smc_idx_lock, flags); + WREG32(mmSMC_IND_INDEX_0, ucode_start_address); + + val = RREG32(mmSMC_IND_ACCESS_CNTL); + val = REG_SET_FIELD(val, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 1); + WREG32(mmSMC_IND_ACCESS_CNTL, val); + + byte_count = ucode_size; + data = (uint32_t *)src; + for (; byte_count >= 4; data++, byte_count -= 4) + WREG32(mmSMC_IND_DATA_0, data[0]); + + val = RREG32(mmSMC_IND_ACCESS_CNTL); + val = REG_SET_FIELD(val, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 0); + WREG32(mmSMC_IND_ACCESS_CNTL, val); + spin_unlock_irqrestore(&adev->smc_idx_lock, flags); + + return 0; +} + +#if 0 /* not used yet */ +static int tonga_read_smc_sram_dword(struct amdgpu_device *adev, + uint32_t smc_address, + uint32_t *value, + uint32_t limit) +{ + int result; + unsigned long flags; + + spin_lock_irqsave(&adev->smc_idx_lock, flags); + result = tonga_set_smc_sram_address(adev, smc_address, limit); + if (result == 0) + *value = RREG32(mmSMC_IND_DATA_0); + spin_unlock_irqrestore(&adev->smc_idx_lock, flags); + return result; +} + +static int tonga_write_smc_sram_dword(struct amdgpu_device *adev, + uint32_t smc_address, + uint32_t value, + uint32_t limit) +{ + int result; + unsigned long flags; + + spin_lock_irqsave(&adev->smc_idx_lock, flags); + result = tonga_set_smc_sram_address(adev, smc_address, limit); + if (result == 0) + WREG32(mmSMC_IND_DATA_0, value); + spin_unlock_irqrestore(&adev->smc_idx_lock, flags); + return result; +} + +static int tonga_smu_stop_smc(struct amdgpu_device *adev) +{ + uint32_t val = RREG32_SMC(ixSMC_SYSCON_RESET_CNTL); + val = REG_SET_FIELD(val, SMC_SYSCON_RESET_CNTL, rst_reg, 1); + WREG32_SMC(ixSMC_SYSCON_RESET_CNTL, val); + + val = RREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0); + val = REG_SET_FIELD(val, SMC_SYSCON_CLOCK_CNTL_0, ck_disable, 1); + WREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0, val); + + return 0; +} +#endif + +static enum AMDGPU_UCODE_ID tonga_convert_fw_type(uint32_t fw_type) +{ + switch (fw_type) { + case UCODE_ID_SDMA0: + return AMDGPU_UCODE_ID_SDMA0; + case UCODE_ID_SDMA1: + return AMDGPU_UCODE_ID_SDMA1; + case UCODE_ID_CP_CE: + return AMDGPU_UCODE_ID_CP_CE; + case UCODE_ID_CP_PFP: + return AMDGPU_UCODE_ID_CP_PFP; + case UCODE_ID_CP_ME: + return AMDGPU_UCODE_ID_CP_ME; + case UCODE_ID_CP_MEC: + case UCODE_ID_CP_MEC_JT1: + return AMDGPU_UCODE_ID_CP_MEC1; + case UCODE_ID_CP_MEC_JT2: + return AMDGPU_UCODE_ID_CP_MEC2; + case UCODE_ID_RLC_G: + return AMDGPU_UCODE_ID_RLC_G; + default: + DRM_ERROR("ucode type is out of range!\n"); + return AMDGPU_UCODE_ID_MAXIMUM; + } +} + +static int tonga_smu_populate_single_firmware_entry(struct amdgpu_device *adev, + uint32_t fw_type, + struct SMU_Entry *entry) +{ + enum AMDGPU_UCODE_ID id = tonga_convert_fw_type(fw_type); + struct amdgpu_firmware_info *ucode = &adev->firmware.ucode[id]; + const struct gfx_firmware_header_v1_0 *header = NULL; + uint64_t gpu_addr; + uint32_t data_size; + + if (ucode->fw == NULL) + return -EINVAL; + + gpu_addr = ucode->mc_addr; + header = (const struct gfx_firmware_header_v1_0 *)ucode->fw->data; + data_size = le32_to_cpu(header->header.ucode_size_bytes); + + if ((fw_type == UCODE_ID_CP_MEC_JT1) || + (fw_type == UCODE_ID_CP_MEC_JT2)) { + gpu_addr += le32_to_cpu(header->jt_offset) << 2; + data_size = le32_to_cpu(header->jt_size) << 2; + } + + entry->version = (uint16_t)le32_to_cpu(header->header.ucode_version); + entry->id = (uint16_t)fw_type; + entry->image_addr_high = upper_32_bits(gpu_addr); + entry->image_addr_low = lower_32_bits(gpu_addr); + entry->meta_data_addr_high = 0; + entry->meta_data_addr_low = 0; + entry->data_size_byte = data_size; + entry->num_register_entries = 0; + + if (fw_type == UCODE_ID_RLC_G) + entry->flags = 1; + else + entry->flags = 0; + + return 0; +} + +static int tonga_smu_request_load_fw(struct amdgpu_device *adev) +{ + struct tonga_smu_private_data *private = (struct tonga_smu_private_data *)adev->smu.priv; + struct SMU_DRAMData_TOC *toc; + uint32_t fw_to_load; + + WREG32_SMC(ixSOFT_REGISTERS_TABLE_28, 0); + + tonga_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_SMU_DRAM_ADDR_HI, private->smu_buffer_addr_high); + tonga_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_SMU_DRAM_ADDR_LO, private->smu_buffer_addr_low); + + toc = (struct SMU_DRAMData_TOC *)private->header; + toc->num_entries = 0; + toc->structure_version = 1; + + if (!adev->firmware.smu_load) + return 0; + + if (tonga_smu_populate_single_firmware_entry(adev, UCODE_ID_RLC_G, + &toc->entry[toc->num_entries++])) { + DRM_ERROR("Failed to get firmware entry for RLC\n"); + return -EINVAL; + } + + if (tonga_smu_populate_single_firmware_entry(adev, UCODE_ID_CP_CE, + &toc->entry[toc->num_entries++])) { + DRM_ERROR("Failed to get firmware entry for CE\n"); + return -EINVAL; + } + + if (tonga_smu_populate_single_firmware_entry(adev, UCODE_ID_CP_PFP, + &toc->entry[toc->num_entries++])) { + DRM_ERROR("Failed to get firmware entry for PFP\n"); + return -EINVAL; + } + + if (tonga_smu_populate_single_firmware_entry(adev, UCODE_ID_CP_ME, + &toc->entry[toc->num_entries++])) { + DRM_ERROR("Failed to get firmware entry for ME\n"); + return -EINVAL; + } + + if (tonga_smu_populate_single_firmware_entry(adev, UCODE_ID_CP_MEC, + &toc->entry[toc->num_entries++])) { + DRM_ERROR("Failed to get firmware entry for MEC\n"); + return -EINVAL; + } + + if (tonga_smu_populate_single_firmware_entry(adev, UCODE_ID_CP_MEC_JT1, + &toc->entry[toc->num_entries++])) { + DRM_ERROR("Failed to get firmware entry for MEC_JT1\n"); + return -EINVAL; + } + + if (tonga_smu_populate_single_firmware_entry(adev, UCODE_ID_CP_MEC_JT2, + &toc->entry[toc->num_entries++])) { + DRM_ERROR("Failed to get firmware entry for MEC_JT2\n"); + return -EINVAL; + } + + if (tonga_smu_populate_single_firmware_entry(adev, UCODE_ID_SDMA0, + &toc->entry[toc->num_entries++])) { + DRM_ERROR("Failed to get firmware entry for SDMA0\n"); + return -EINVAL; + } + + if (tonga_smu_populate_single_firmware_entry(adev, UCODE_ID_SDMA1, + &toc->entry[toc->num_entries++])) { + DRM_ERROR("Failed to get firmware entry for SDMA1\n"); + return -EINVAL; + } + + tonga_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_DRV_DRAM_ADDR_HI, private->header_addr_high); + tonga_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_DRV_DRAM_ADDR_LO, private->header_addr_low); + + fw_to_load = UCODE_ID_RLC_G_MASK | + UCODE_ID_SDMA0_MASK | + UCODE_ID_SDMA1_MASK | + UCODE_ID_CP_CE_MASK | + UCODE_ID_CP_ME_MASK | + UCODE_ID_CP_PFP_MASK | + UCODE_ID_CP_MEC_MASK; + + if (tonga_send_msg_to_smc_with_parameter_without_waiting(adev, PPSMC_MSG_LoadUcodes, fw_to_load)) { + DRM_ERROR("Fail to request SMU load ucode\n"); + return -EINVAL; + } + + return 0; +} + +static uint32_t tonga_smu_get_mask_for_fw_type(uint32_t fw_type) +{ + switch (fw_type) { + case AMDGPU_UCODE_ID_SDMA0: + return UCODE_ID_SDMA0_MASK; + case AMDGPU_UCODE_ID_SDMA1: + return UCODE_ID_SDMA1_MASK; + case AMDGPU_UCODE_ID_CP_CE: + return UCODE_ID_CP_CE_MASK; + case AMDGPU_UCODE_ID_CP_PFP: + return UCODE_ID_CP_PFP_MASK; + case AMDGPU_UCODE_ID_CP_ME: + return UCODE_ID_CP_ME_MASK; + case AMDGPU_UCODE_ID_CP_MEC1: + return UCODE_ID_CP_MEC_MASK; + case AMDGPU_UCODE_ID_CP_MEC2: + return UCODE_ID_CP_MEC_MASK; + case AMDGPU_UCODE_ID_RLC_G: + return UCODE_ID_RLC_G_MASK; + default: + DRM_ERROR("ucode type is out of range!\n"); + return 0; + } +} + +static int tonga_smu_check_fw_load_finish(struct amdgpu_device *adev, + uint32_t fw_type) +{ + uint32_t fw_mask = tonga_smu_get_mask_for_fw_type(fw_type); + int i; + + for (i = 0; i < adev->usec_timeout; i++) { + if (fw_mask == (RREG32_SMC(ixSOFT_REGISTERS_TABLE_28) & fw_mask)) + break; + udelay(1); + } + + if (i == adev->usec_timeout) { + DRM_ERROR("check firmware loading failed\n"); + return -EINVAL; + } + + return 0; +} + +static int tonga_smu_start_in_protection_mode(struct amdgpu_device *adev) +{ + int result; + uint32_t val; + int i; + + /* Assert reset */ + val = RREG32_SMC(ixSMC_SYSCON_RESET_CNTL); + val = REG_SET_FIELD(val, SMC_SYSCON_RESET_CNTL, rst_reg, 1); + WREG32_SMC(ixSMC_SYSCON_RESET_CNTL, val); + + result = tonga_smu_upload_firmware_image(adev); + if (result) + return result; + + /* Clear status */ + WREG32_SMC(ixSMU_STATUS, 0); + + /* Enable clock */ + val = RREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0); + val = REG_SET_FIELD(val, SMC_SYSCON_CLOCK_CNTL_0, ck_disable, 0); + WREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0, val); + + /* De-assert reset */ + val = RREG32_SMC(ixSMC_SYSCON_RESET_CNTL); + val = REG_SET_FIELD(val, SMC_SYSCON_RESET_CNTL, rst_reg, 0); + WREG32_SMC(ixSMC_SYSCON_RESET_CNTL, val); + + /* Set SMU Auto Start */ + val = RREG32_SMC(ixSMU_INPUT_DATA); + val = REG_SET_FIELD(val, SMU_INPUT_DATA, AUTO_START, 1); + WREG32_SMC(ixSMU_INPUT_DATA, val); + + /* Clear firmware interrupt enable flag */ + WREG32_SMC(ixFIRMWARE_FLAGS, 0); + + for (i = 0; i < adev->usec_timeout; i++) { + val = RREG32_SMC(ixRCU_UC_EVENTS); + if (REG_GET_FIELD(val, RCU_UC_EVENTS, INTERRUPTS_ENABLED)) + break; + udelay(1); + } + + if (i == adev->usec_timeout) { + DRM_ERROR("Interrupt is not enabled by firmware\n"); + return -EINVAL; + } + + /* Call Test SMU message with 0x20000 offset + * to trigger SMU start + */ + tonga_send_msg_to_smc_offset(adev); + + /* Wait for done bit to be set */ + for (i = 0; i < adev->usec_timeout; i++) { + val = RREG32_SMC(ixSMU_STATUS); + if (REG_GET_FIELD(val, SMU_STATUS, SMU_DONE)) + break; + udelay(1); + } + + if (i == adev->usec_timeout) { + DRM_ERROR("Timeout for SMU start\n"); + return -EINVAL; + } + + /* Check pass/failed indicator */ + val = RREG32_SMC(ixSMU_STATUS); + if (!REG_GET_FIELD(val, SMU_STATUS, SMU_PASS)) { + DRM_ERROR("SMU Firmware start failed\n"); + return -EINVAL; + } + + /* Wait for firmware to initialize */ + for (i = 0; i < adev->usec_timeout; i++) { + val = RREG32_SMC(ixFIRMWARE_FLAGS); + if(REG_GET_FIELD(val, FIRMWARE_FLAGS, INTERRUPTS_ENABLED)) + break; + udelay(1); + } + + if (i == adev->usec_timeout) { + DRM_ERROR("SMU firmware initialization failed\n"); + return -EINVAL; + } + + return 0; +} + +static int tonga_smu_start_in_non_protection_mode(struct amdgpu_device *adev) +{ + int i, result; + uint32_t val; + + /* wait for smc boot up */ + for (i = 0; i < adev->usec_timeout; i++) { + val = RREG32_SMC(ixRCU_UC_EVENTS); + val = REG_GET_FIELD(val, RCU_UC_EVENTS, boot_seq_done); + if (val) + break; + udelay(1); + } + + if (i == adev->usec_timeout) { + DRM_ERROR("SMC boot sequence is not completed\n"); + return -EINVAL; + } + + /* Clear firmware interrupt enable flag */ + WREG32_SMC(ixFIRMWARE_FLAGS, 0); + + /* Assert reset */ + val = RREG32_SMC(ixSMC_SYSCON_RESET_CNTL); + val = REG_SET_FIELD(val, SMC_SYSCON_RESET_CNTL, rst_reg, 1); + WREG32_SMC(ixSMC_SYSCON_RESET_CNTL, val); + + result = tonga_smu_upload_firmware_image(adev); + if (result) + return result; + + /* Set smc instruct start point at 0x0 */ + tonga_program_jump_on_start(adev); + + /* Enable clock */ + val = RREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0); + val = REG_SET_FIELD(val, SMC_SYSCON_CLOCK_CNTL_0, ck_disable, 0); + WREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0, val); + + /* De-assert reset */ + val = RREG32_SMC(ixSMC_SYSCON_RESET_CNTL); + val = REG_SET_FIELD(val, SMC_SYSCON_RESET_CNTL, rst_reg, 0); + WREG32_SMC(ixSMC_SYSCON_RESET_CNTL, val); + + /* Wait for firmware to initialize */ + for (i = 0; i < adev->usec_timeout; i++) { + val = RREG32_SMC(ixFIRMWARE_FLAGS); + if (REG_GET_FIELD(val, FIRMWARE_FLAGS, INTERRUPTS_ENABLED)) + break; + udelay(1); + } + + if (i == adev->usec_timeout) { + DRM_ERROR("Timeout for SMC firmware initialization\n"); + return -EINVAL; + } + + return 0; +} + +int tonga_smu_start(struct amdgpu_device *adev) +{ + int result; + uint32_t val; + + if (!tonga_is_smc_ram_running(adev)) { + val = RREG32_SMC(ixSMU_FIRMWARE); + if (!REG_GET_FIELD(val, SMU_FIRMWARE, SMU_MODE)) { + result = tonga_smu_start_in_non_protection_mode(adev); + if (result) + return result; + } else { + result = tonga_smu_start_in_protection_mode(adev); + if (result) + return result; + } + } + + return tonga_smu_request_load_fw(adev); +} + +static const struct amdgpu_smumgr_funcs tonga_smumgr_funcs = { + .check_fw_load_finish = tonga_smu_check_fw_load_finish, + .request_smu_load_fw = NULL, + .request_smu_specific_fw = NULL, +}; + +int tonga_smu_init(struct amdgpu_device *adev) +{ + struct tonga_smu_private_data *private; + uint32_t image_size = ((sizeof(struct SMU_DRAMData_TOC) / 4096) + 1) * 4096; + uint32_t smu_internal_buffer_size = 200*4096; + struct amdgpu_bo **toc_buf = &adev->smu.toc_buf; + struct amdgpu_bo **smu_buf = &adev->smu.smu_buf; + uint64_t mc_addr; + void *toc_buf_ptr; + void *smu_buf_ptr; + int ret; + + private = kzalloc(sizeof(struct tonga_smu_private_data), GFP_KERNEL); + if (NULL == private) + return -ENOMEM; + + /* allocate firmware buffers */ + if (adev->firmware.smu_load) + amdgpu_ucode_init_bo(adev); + + adev->smu.priv = private; + adev->smu.fw_flags = 0; + + /* Allocate FW image data structure and header buffer */ + ret = amdgpu_bo_create(adev, image_size, PAGE_SIZE, + true, AMDGPU_GEM_DOMAIN_VRAM, 0, NULL, toc_buf); + if (ret) { + DRM_ERROR("Failed to allocate memory for TOC buffer\n"); + return -ENOMEM; + } + + /* Allocate buffer for SMU internal buffer */ + ret = amdgpu_bo_create(adev, smu_internal_buffer_size, PAGE_SIZE, + true, AMDGPU_GEM_DOMAIN_VRAM, 0, NULL, smu_buf); + if (ret) { + DRM_ERROR("Failed to allocate memory for SMU internal buffer\n"); + return -ENOMEM; + } + + /* Retrieve GPU address for header buffer and internal buffer */ + ret = amdgpu_bo_reserve(adev->smu.toc_buf, false); + if (ret) { + amdgpu_bo_unref(&adev->smu.toc_buf); + DRM_ERROR("Failed to reserve the TOC buffer\n"); + return -EINVAL; + } + + ret = amdgpu_bo_pin(adev->smu.toc_buf, AMDGPU_GEM_DOMAIN_VRAM, &mc_addr); + if (ret) { + amdgpu_bo_unreserve(adev->smu.toc_buf); + amdgpu_bo_unref(&adev->smu.toc_buf); + DRM_ERROR("Failed to pin the TOC buffer\n"); + return -EINVAL; + } + + ret = amdgpu_bo_kmap(*toc_buf, &toc_buf_ptr); + if (ret) { + amdgpu_bo_unreserve(adev->smu.toc_buf); + amdgpu_bo_unref(&adev->smu.toc_buf); + DRM_ERROR("Failed to map the TOC buffer\n"); + return -EINVAL; + } + + amdgpu_bo_unreserve(adev->smu.toc_buf); + private->header_addr_low = lower_32_bits(mc_addr); + private->header_addr_high = upper_32_bits(mc_addr); + private->header = toc_buf_ptr; + + ret = amdgpu_bo_reserve(adev->smu.smu_buf, false); + if (ret) { + amdgpu_bo_unref(&adev->smu.smu_buf); + amdgpu_bo_unref(&adev->smu.toc_buf); + DRM_ERROR("Failed to reserve the SMU internal buffer\n"); + return -EINVAL; + } + + ret = amdgpu_bo_pin(adev->smu.smu_buf, AMDGPU_GEM_DOMAIN_VRAM, &mc_addr); + if (ret) { + amdgpu_bo_unreserve(adev->smu.smu_buf); + amdgpu_bo_unref(&adev->smu.smu_buf); + amdgpu_bo_unref(&adev->smu.toc_buf); + DRM_ERROR("Failed to pin the SMU internal buffer\n"); + return -EINVAL; + } + + ret = amdgpu_bo_kmap(*smu_buf, &smu_buf_ptr); + if (ret) { + amdgpu_bo_unreserve(adev->smu.smu_buf); + amdgpu_bo_unref(&adev->smu.smu_buf); + amdgpu_bo_unref(&adev->smu.toc_buf); + DRM_ERROR("Failed to map the SMU internal buffer\n"); + return -EINVAL; + } + + amdgpu_bo_unreserve(adev->smu.smu_buf); + private->smu_buffer_addr_low = lower_32_bits(mc_addr); + private->smu_buffer_addr_high = upper_32_bits(mc_addr); + + adev->smu.smumgr_funcs = &tonga_smumgr_funcs; + + return 0; +} + +int tonga_smu_fini(struct amdgpu_device *adev) +{ + amdgpu_bo_unref(&adev->smu.toc_buf); + amdgpu_bo_unref(&adev->smu.smu_buf); + kfree(adev->smu.priv); + adev->smu.priv = NULL; + if (adev->firmware.fw_buf) + amdgpu_ucode_fini_bo(adev); + + return 0; +} diff --git a/drivers/gpu/drm/amd/amdgpu/tonga_smumgr.h b/drivers/gpu/drm/amd/amdgpu/tonga_smumgr.h new file mode 100644 index 000000000000..c031ff99fe3e --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/tonga_smumgr.h @@ -0,0 +1,42 @@ +/* + * Copyright 2014 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef TONGA_SMUMGR_H +#define TONGA_SMUMGR_H + +#include "tonga_ppsmc.h" + +int tonga_smu_init(struct amdgpu_device *adev); +int tonga_smu_fini(struct amdgpu_device *adev); +int tonga_smu_start(struct amdgpu_device *adev); + +struct tonga_smu_private_data +{ + uint8_t *header; + uint32_t smu_buffer_addr_high; + uint32_t smu_buffer_addr_low; + uint32_t header_addr_high; + uint32_t header_addr_low; +}; + +#endif diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c new file mode 100644 index 000000000000..f3b3026d5932 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c @@ -0,0 +1,830 @@ +/* + * Copyright 2014 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Christian König + */ + +#include +#include +#include "amdgpu.h" +#include "amdgpu_uvd.h" +#include "vid.h" +#include "uvd/uvd_5_0_d.h" +#include "uvd/uvd_5_0_sh_mask.h" +#include "oss/oss_2_0_d.h" +#include "oss/oss_2_0_sh_mask.h" + +static void uvd_v5_0_set_ring_funcs(struct amdgpu_device *adev); +static void uvd_v5_0_set_irq_funcs(struct amdgpu_device *adev); +static int uvd_v5_0_start(struct amdgpu_device *adev); +static void uvd_v5_0_stop(struct amdgpu_device *adev); + +/** + * uvd_v5_0_ring_get_rptr - get read pointer + * + * @ring: amdgpu_ring pointer + * + * Returns the current hardware read pointer + */ +static uint32_t uvd_v5_0_ring_get_rptr(struct amdgpu_ring *ring) +{ + struct amdgpu_device *adev = ring->adev; + + return RREG32(mmUVD_RBC_RB_RPTR); +} + +/** + * uvd_v5_0_ring_get_wptr - get write pointer + * + * @ring: amdgpu_ring pointer + * + * Returns the current hardware write pointer + */ +static uint32_t uvd_v5_0_ring_get_wptr(struct amdgpu_ring *ring) +{ + struct amdgpu_device *adev = ring->adev; + + return RREG32(mmUVD_RBC_RB_WPTR); +} + +/** + * uvd_v5_0_ring_set_wptr - set write pointer + * + * @ring: amdgpu_ring pointer + * + * Commits the write pointer to the hardware + */ +static void uvd_v5_0_ring_set_wptr(struct amdgpu_ring *ring) +{ + struct amdgpu_device *adev = ring->adev; + + WREG32(mmUVD_RBC_RB_WPTR, ring->wptr); +} + +static int uvd_v5_0_early_init(struct amdgpu_device *adev) +{ + uvd_v5_0_set_ring_funcs(adev); + uvd_v5_0_set_irq_funcs(adev); + + return 0; +} + +static int uvd_v5_0_sw_init(struct amdgpu_device *adev) +{ + struct amdgpu_ring *ring; + int r; + + /* UVD TRAP */ + r = amdgpu_irq_add_id(adev, 124, &adev->uvd.irq); + if (r) + return r; + + r = amdgpu_uvd_sw_init(adev); + if (r) + return r; + + r = amdgpu_uvd_resume(adev); + if (r) + return r; + + ring = &adev->uvd.ring; + sprintf(ring->name, "uvd"); + r = amdgpu_ring_init(adev, ring, 4096, CP_PACKET2, 0xf, + &adev->uvd.irq, 0, AMDGPU_RING_TYPE_UVD); + + return r; +} + +static int uvd_v5_0_sw_fini(struct amdgpu_device *adev) +{ + int r; + + r = amdgpu_uvd_suspend(adev); + if (r) + return r; + + r = amdgpu_uvd_sw_fini(adev); + if (r) + return r; + + return r; +} + +/** + * uvd_v5_0_hw_init - start and test UVD block + * + * @adev: amdgpu_device pointer + * + * Initialize the hardware, boot up the VCPU and do some testing + */ +static int uvd_v5_0_hw_init(struct amdgpu_device *adev) +{ + struct amdgpu_ring *ring = &adev->uvd.ring; + uint32_t tmp; + int r; + + /* raise clocks while booting up the VCPU */ + amdgpu_asic_set_uvd_clocks(adev, 53300, 40000); + + r = uvd_v5_0_start(adev); + if (r) + goto done; + + ring->ready = true; + r = amdgpu_ring_test_ring(ring); + if (r) { + ring->ready = false; + goto done; + } + + r = amdgpu_ring_lock(ring, 10); + if (r) { + DRM_ERROR("amdgpu: ring failed to lock UVD ring (%d).\n", r); + goto done; + } + + tmp = PACKET0(mmUVD_SEMA_WAIT_FAULT_TIMEOUT_CNTL, 0); + amdgpu_ring_write(ring, tmp); + amdgpu_ring_write(ring, 0xFFFFF); + + tmp = PACKET0(mmUVD_SEMA_WAIT_INCOMPLETE_TIMEOUT_CNTL, 0); + amdgpu_ring_write(ring, tmp); + amdgpu_ring_write(ring, 0xFFFFF); + + tmp = PACKET0(mmUVD_SEMA_SIGNAL_INCOMPLETE_TIMEOUT_CNTL, 0); + amdgpu_ring_write(ring, tmp); + amdgpu_ring_write(ring, 0xFFFFF); + + /* Clear timeout status bits */ + amdgpu_ring_write(ring, PACKET0(mmUVD_SEMA_TIMEOUT_STATUS, 0)); + amdgpu_ring_write(ring, 0x8); + + amdgpu_ring_write(ring, PACKET0(mmUVD_SEMA_CNTL, 0)); + amdgpu_ring_write(ring, 3); + + amdgpu_ring_unlock_commit(ring); + +done: + /* lower clocks again */ + amdgpu_asic_set_uvd_clocks(adev, 0, 0); + + if (!r) + DRM_INFO("UVD initialized successfully.\n"); + + return r; +} + +/** + * uvd_v5_0_hw_fini - stop the hardware block + * + * @adev: amdgpu_device pointer + * + * Stop the UVD block, mark ring as not ready any more + */ +static int uvd_v5_0_hw_fini(struct amdgpu_device *adev) +{ + struct amdgpu_ring *ring = &adev->uvd.ring; + + uvd_v5_0_stop(adev); + ring->ready = false; + + return 0; +} + +static int uvd_v5_0_suspend(struct amdgpu_device *adev) +{ + int r; + + r = uvd_v5_0_hw_fini(adev); + if (r) + return r; + + r = amdgpu_uvd_suspend(adev); + if (r) + return r; + + return r; +} + +static int uvd_v5_0_resume(struct amdgpu_device *adev) +{ + int r; + + r = amdgpu_uvd_resume(adev); + if (r) + return r; + + r = uvd_v5_0_hw_init(adev); + if (r) + return r; + + return r; +} + +/** + * uvd_v5_0_mc_resume - memory controller programming + * + * @adev: amdgpu_device pointer + * + * Let the UVD memory controller know it's offsets + */ +static void uvd_v5_0_mc_resume(struct amdgpu_device *adev) +{ + uint64_t offset; + uint32_t size; + + /* programm memory controller bits 0-27 */ + WREG32(mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW, + lower_32_bits(adev->uvd.gpu_addr)); + WREG32(mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH, + upper_32_bits(adev->uvd.gpu_addr)); + + offset = AMDGPU_UVD_FIRMWARE_OFFSET; + size = AMDGPU_GPU_PAGE_ALIGN(adev->uvd.fw->size + 4); + WREG32(mmUVD_VCPU_CACHE_OFFSET0, offset >> 3); + WREG32(mmUVD_VCPU_CACHE_SIZE0, size); + + offset += size; + size = AMDGPU_UVD_STACK_SIZE; + WREG32(mmUVD_VCPU_CACHE_OFFSET1, offset >> 3); + WREG32(mmUVD_VCPU_CACHE_SIZE1, size); + + offset += size; + size = AMDGPU_UVD_HEAP_SIZE; + WREG32(mmUVD_VCPU_CACHE_OFFSET2, offset >> 3); + WREG32(mmUVD_VCPU_CACHE_SIZE2, size); +} + +/** + * uvd_v5_0_start - start UVD block + * + * @adev: amdgpu_device pointer + * + * Setup and start the UVD block + */ +static int uvd_v5_0_start(struct amdgpu_device *adev) +{ + struct amdgpu_ring *ring = &adev->uvd.ring; + uint32_t rb_bufsz, tmp; + uint32_t lmi_swap_cntl; + uint32_t mp_swap_cntl; + int i, j, r; + + /*disable DPG */ + WREG32_P(mmUVD_POWER_STATUS, 0, ~(1 << 2)); + + /* disable byte swapping */ + lmi_swap_cntl = 0; + mp_swap_cntl = 0; + + uvd_v5_0_mc_resume(adev); + + /* disable clock gating */ + WREG32(mmUVD_CGC_GATE, 0); + + /* disable interupt */ + WREG32_P(mmUVD_MASTINT_EN, 0, ~(1 << 1)); + + /* stall UMC and register bus before resetting VCPU */ + WREG32_P(mmUVD_LMI_CTRL2, 1 << 8, ~(1 << 8)); + mdelay(1); + + /* put LMI, VCPU, RBC etc... into reset */ + WREG32(mmUVD_SOFT_RESET, UVD_SOFT_RESET__LMI_SOFT_RESET_MASK | + UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK | UVD_SOFT_RESET__LBSI_SOFT_RESET_MASK | + UVD_SOFT_RESET__RBC_SOFT_RESET_MASK | UVD_SOFT_RESET__CSM_SOFT_RESET_MASK | + UVD_SOFT_RESET__CXW_SOFT_RESET_MASK | UVD_SOFT_RESET__TAP_SOFT_RESET_MASK | + UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK); + mdelay(5); + + /* take UVD block out of reset */ + WREG32_P(mmSRBM_SOFT_RESET, 0, ~SRBM_SOFT_RESET__SOFT_RESET_UVD_MASK); + mdelay(5); + + /* initialize UVD memory controller */ + WREG32(mmUVD_LMI_CTRL, 0x40 | (1 << 8) | (1 << 13) | + (1 << 21) | (1 << 9) | (1 << 20)); + +#ifdef __BIG_ENDIAN + /* swap (8 in 32) RB and IB */ + lmi_swap_cntl = 0xa; + mp_swap_cntl = 0; +#endif + WREG32(mmUVD_LMI_SWAP_CNTL, lmi_swap_cntl); + WREG32(mmUVD_MP_SWAP_CNTL, mp_swap_cntl); + + WREG32(mmUVD_MPC_SET_MUXA0, 0x40c2040); + WREG32(mmUVD_MPC_SET_MUXA1, 0x0); + WREG32(mmUVD_MPC_SET_MUXB0, 0x40c2040); + WREG32(mmUVD_MPC_SET_MUXB1, 0x0); + WREG32(mmUVD_MPC_SET_ALU, 0); + WREG32(mmUVD_MPC_SET_MUX, 0x88); + + /* take all subblocks out of reset, except VCPU */ + WREG32(mmUVD_SOFT_RESET, UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK); + mdelay(5); + + /* enable VCPU clock */ + WREG32(mmUVD_VCPU_CNTL, 1 << 9); + + /* enable UMC */ + WREG32_P(mmUVD_LMI_CTRL2, 0, ~(1 << 8)); + + /* boot up the VCPU */ + WREG32(mmUVD_SOFT_RESET, 0); + mdelay(10); + + for (i = 0; i < 10; ++i) { + uint32_t status; + for (j = 0; j < 100; ++j) { + status = RREG32(mmUVD_STATUS); + if (status & 2) + break; + mdelay(10); + } + r = 0; + if (status & 2) + break; + + DRM_ERROR("UVD not responding, trying to reset the VCPU!!!\n"); + WREG32_P(mmUVD_SOFT_RESET, UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK, + ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK); + mdelay(10); + WREG32_P(mmUVD_SOFT_RESET, 0, ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK); + mdelay(10); + r = -1; + } + + if (r) { + DRM_ERROR("UVD not responding, giving up!!!\n"); + return r; + } + /* enable master interrupt */ + WREG32_P(mmUVD_MASTINT_EN, 3 << 1, ~(3 << 1)); + + /* clear the bit 4 of UVD_STATUS */ + WREG32_P(mmUVD_STATUS, 0, ~(2 << 1)); + + rb_bufsz = order_base_2(ring->ring_size); + tmp = 0; + tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BUFSZ, rb_bufsz); + tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1); + tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1); + tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_WPTR_POLL_EN, 0); + tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1); + tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1); + /* force RBC into idle state */ + WREG32(mmUVD_RBC_RB_CNTL, tmp); + + /* set the write pointer delay */ + WREG32(mmUVD_RBC_RB_WPTR_CNTL, 0); + + /* set the wb address */ + WREG32(mmUVD_RBC_RB_RPTR_ADDR, (upper_32_bits(ring->gpu_addr) >> 2)); + + /* programm the RB_BASE for ring buffer */ + WREG32(mmUVD_LMI_RBC_RB_64BIT_BAR_LOW, + lower_32_bits(ring->gpu_addr)); + WREG32(mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH, + upper_32_bits(ring->gpu_addr)); + + /* Initialize the ring buffer's read and write pointers */ + WREG32(mmUVD_RBC_RB_RPTR, 0); + + ring->wptr = RREG32(mmUVD_RBC_RB_RPTR); + WREG32(mmUVD_RBC_RB_WPTR, ring->wptr); + + WREG32_P(mmUVD_RBC_RB_CNTL, 0, ~UVD_RBC_RB_CNTL__RB_NO_FETCH_MASK); + + return 0; +} + +/** + * uvd_v5_0_stop - stop UVD block + * + * @adev: amdgpu_device pointer + * + * stop the UVD block + */ +static void uvd_v5_0_stop(struct amdgpu_device *adev) +{ + /* force RBC into idle state */ + WREG32(mmUVD_RBC_RB_CNTL, 0x11010101); + + /* Stall UMC and register bus before resetting VCPU */ + WREG32_P(mmUVD_LMI_CTRL2, 1 << 8, ~(1 << 8)); + mdelay(1); + + /* put VCPU into reset */ + WREG32(mmUVD_SOFT_RESET, UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK); + mdelay(5); + + /* disable VCPU clock */ + WREG32(mmUVD_VCPU_CNTL, 0x0); + + /* Unstall UMC and register bus */ + WREG32_P(mmUVD_LMI_CTRL2, 0, ~(1 << 8)); +} + +/** + * uvd_v5_0_ring_emit_fence - emit an fence & trap command + * + * @ring: amdgpu_ring pointer + * @fence: fence to emit + * + * Write a fence and a trap command to the ring. + */ +static void uvd_v5_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq, + bool write64bit) +{ + WARN_ON(write64bit); + + amdgpu_ring_write(ring, PACKET0(mmUVD_CONTEXT_ID, 0)); + amdgpu_ring_write(ring, seq); + amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0)); + amdgpu_ring_write(ring, addr & 0xffffffff); + amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0)); + amdgpu_ring_write(ring, upper_32_bits(addr) & 0xff); + amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0)); + amdgpu_ring_write(ring, 0); + + amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0)); + amdgpu_ring_write(ring, 0); + amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0)); + amdgpu_ring_write(ring, 0); + amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0)); + amdgpu_ring_write(ring, 2); +} + +/** + * uvd_v5_0_ring_emit_semaphore - emit semaphore command + * + * @ring: amdgpu_ring pointer + * @semaphore: semaphore to emit commands for + * @emit_wait: true if we should emit a wait command + * + * Emit a semaphore command (either wait or signal) to the UVD ring. + */ +static bool uvd_v5_0_ring_emit_semaphore(struct amdgpu_ring *ring, + struct amdgpu_semaphore *semaphore, + bool emit_wait) +{ + uint64_t addr = semaphore->gpu_addr; + + amdgpu_ring_write(ring, PACKET0(mmUVD_SEMA_ADDR_LOW, 0)); + amdgpu_ring_write(ring, (addr >> 3) & 0x000FFFFF); + + amdgpu_ring_write(ring, PACKET0(mmUVD_SEMA_ADDR_HIGH, 0)); + amdgpu_ring_write(ring, (addr >> 23) & 0x000FFFFF); + + amdgpu_ring_write(ring, PACKET0(mmUVD_SEMA_CMD, 0)); + amdgpu_ring_write(ring, 0x80 | (emit_wait ? 1 : 0)); + + return true; +} + +/** + * uvd_v5_0_ring_test_ring - register write test + * + * @ring: amdgpu_ring pointer + * + * Test if we can successfully write to the context register + */ +static int uvd_v5_0_ring_test_ring(struct amdgpu_ring *ring) +{ + struct amdgpu_device *adev = ring->adev; + uint32_t tmp = 0; + unsigned i; + int r; + + WREG32(mmUVD_CONTEXT_ID, 0xCAFEDEAD); + r = amdgpu_ring_lock(ring, 3); + if (r) { + DRM_ERROR("amdgpu: cp failed to lock ring %d (%d).\n", + ring->idx, r); + return r; + } + amdgpu_ring_write(ring, PACKET0(mmUVD_CONTEXT_ID, 0)); + amdgpu_ring_write(ring, 0xDEADBEEF); + amdgpu_ring_unlock_commit(ring); + for (i = 0; i < adev->usec_timeout; i++) { + tmp = RREG32(mmUVD_CONTEXT_ID); + if (tmp == 0xDEADBEEF) + break; + DRM_UDELAY(1); + } + + if (i < adev->usec_timeout) { + DRM_INFO("ring test on %d succeeded in %d usecs\n", + ring->idx, i); + } else { + DRM_ERROR("amdgpu: ring %d test failed (0x%08X)\n", + ring->idx, tmp); + r = -EINVAL; + } + return r; +} + +/** + * uvd_v5_0_ring_emit_ib - execute indirect buffer + * + * @ring: amdgpu_ring pointer + * @ib: indirect buffer to execute + * + * Write ring commands to execute the indirect buffer + */ +static void uvd_v5_0_ring_emit_ib(struct amdgpu_ring *ring, + struct amdgpu_ib *ib) +{ + amdgpu_ring_write(ring, PACKET0(mmUVD_LMI_RBC_IB_64BIT_BAR_LOW, 0)); + amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr)); + amdgpu_ring_write(ring, PACKET0(mmUVD_LMI_RBC_IB_64BIT_BAR_HIGH, 0)); + amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr)); + amdgpu_ring_write(ring, PACKET0(mmUVD_RBC_IB_SIZE, 0)); + amdgpu_ring_write(ring, ib->length_dw); +} + +/** + * uvd_v5_0_ring_test_ib - test ib execution + * + * @ring: amdgpu_ring pointer + * + * Test if we can successfully execute an IB + */ +static int uvd_v5_0_ring_test_ib(struct amdgpu_ring *ring) +{ + struct amdgpu_device *adev = ring->adev; + struct amdgpu_fence *fence = NULL; + int r; + + r = amdgpu_asic_set_uvd_clocks(adev, 53300, 40000); + if (r) { + DRM_ERROR("amdgpu: failed to raise UVD clocks (%d).\n", r); + return r; + } + + r = amdgpu_uvd_get_create_msg(ring, 1, NULL); + if (r) { + DRM_ERROR("amdgpu: failed to get create msg (%d).\n", r); + goto error; + } + + r = amdgpu_uvd_get_destroy_msg(ring, 1, &fence); + if (r) { + DRM_ERROR("amdgpu: failed to get destroy ib (%d).\n", r); + goto error; + } + + r = amdgpu_fence_wait(fence, false); + if (r) { + DRM_ERROR("amdgpu: fence wait failed (%d).\n", r); + goto error; + } + DRM_INFO("ib test on ring %d succeeded\n", ring->idx); +error: + amdgpu_fence_unref(&fence); + amdgpu_asic_set_uvd_clocks(adev, 0, 0); + return r; +} + +static bool uvd_v5_0_is_idle(struct amdgpu_device *adev) +{ + return !(RREG32(mmSRBM_STATUS) & SRBM_STATUS__UVD_BUSY_MASK); +} + +static int uvd_v5_0_wait_for_idle(struct amdgpu_device *adev) +{ + unsigned i; + + for (i = 0; i < adev->usec_timeout; i++) { + if (!(RREG32(mmSRBM_STATUS) & SRBM_STATUS__UVD_BUSY_MASK)) + return 0; + } + return -ETIMEDOUT; +} + +static int uvd_v5_0_soft_reset(struct amdgpu_device *adev) +{ + uvd_v5_0_stop(adev); + + WREG32_P(mmSRBM_SOFT_RESET, SRBM_SOFT_RESET__SOFT_RESET_UVD_MASK, + ~SRBM_SOFT_RESET__SOFT_RESET_UVD_MASK); + mdelay(5); + + return uvd_v5_0_start(adev); +} + +static void uvd_v5_0_print_status(struct amdgpu_device *adev) +{ + dev_info(adev->dev, "UVD 5.0 registers\n"); + dev_info(adev->dev, " UVD_SEMA_ADDR_LOW=0x%08X\n", + RREG32(mmUVD_SEMA_ADDR_LOW)); + dev_info(adev->dev, " UVD_SEMA_ADDR_HIGH=0x%08X\n", + RREG32(mmUVD_SEMA_ADDR_HIGH)); + dev_info(adev->dev, " UVD_SEMA_CMD=0x%08X\n", + RREG32(mmUVD_SEMA_CMD)); + dev_info(adev->dev, " UVD_GPCOM_VCPU_CMD=0x%08X\n", + RREG32(mmUVD_GPCOM_VCPU_CMD)); + dev_info(adev->dev, " UVD_GPCOM_VCPU_DATA0=0x%08X\n", + RREG32(mmUVD_GPCOM_VCPU_DATA0)); + dev_info(adev->dev, " UVD_GPCOM_VCPU_DATA1=0x%08X\n", + RREG32(mmUVD_GPCOM_VCPU_DATA1)); + dev_info(adev->dev, " UVD_ENGINE_CNTL=0x%08X\n", + RREG32(mmUVD_ENGINE_CNTL)); + dev_info(adev->dev, " UVD_UDEC_ADDR_CONFIG=0x%08X\n", + RREG32(mmUVD_UDEC_ADDR_CONFIG)); + dev_info(adev->dev, " UVD_UDEC_DB_ADDR_CONFIG=0x%08X\n", + RREG32(mmUVD_UDEC_DB_ADDR_CONFIG)); + dev_info(adev->dev, " UVD_UDEC_DBW_ADDR_CONFIG=0x%08X\n", + RREG32(mmUVD_UDEC_DBW_ADDR_CONFIG)); + dev_info(adev->dev, " UVD_SEMA_CNTL=0x%08X\n", + RREG32(mmUVD_SEMA_CNTL)); + dev_info(adev->dev, " UVD_LMI_EXT40_ADDR=0x%08X\n", + RREG32(mmUVD_LMI_EXT40_ADDR)); + dev_info(adev->dev, " UVD_CTX_INDEX=0x%08X\n", + RREG32(mmUVD_CTX_INDEX)); + dev_info(adev->dev, " UVD_CTX_DATA=0x%08X\n", + RREG32(mmUVD_CTX_DATA)); + dev_info(adev->dev, " UVD_CGC_GATE=0x%08X\n", + RREG32(mmUVD_CGC_GATE)); + dev_info(adev->dev, " UVD_CGC_CTRL=0x%08X\n", + RREG32(mmUVD_CGC_CTRL)); + dev_info(adev->dev, " UVD_LMI_CTRL2=0x%08X\n", + RREG32(mmUVD_LMI_CTRL2)); + dev_info(adev->dev, " UVD_MASTINT_EN=0x%08X\n", + RREG32(mmUVD_MASTINT_EN)); + dev_info(adev->dev, " UVD_LMI_ADDR_EXT=0x%08X\n", + RREG32(mmUVD_LMI_ADDR_EXT)); + dev_info(adev->dev, " UVD_LMI_CTRL=0x%08X\n", + RREG32(mmUVD_LMI_CTRL)); + dev_info(adev->dev, " UVD_LMI_SWAP_CNTL=0x%08X\n", + RREG32(mmUVD_LMI_SWAP_CNTL)); + dev_info(adev->dev, " UVD_MP_SWAP_CNTL=0x%08X\n", + RREG32(mmUVD_MP_SWAP_CNTL)); + dev_info(adev->dev, " UVD_MPC_SET_MUXA0=0x%08X\n", + RREG32(mmUVD_MPC_SET_MUXA0)); + dev_info(adev->dev, " UVD_MPC_SET_MUXA1=0x%08X\n", + RREG32(mmUVD_MPC_SET_MUXA1)); + dev_info(adev->dev, " UVD_MPC_SET_MUXB0=0x%08X\n", + RREG32(mmUVD_MPC_SET_MUXB0)); + dev_info(adev->dev, " UVD_MPC_SET_MUXB1=0x%08X\n", + RREG32(mmUVD_MPC_SET_MUXB1)); + dev_info(adev->dev, " UVD_MPC_SET_MUX=0x%08X\n", + RREG32(mmUVD_MPC_SET_MUX)); + dev_info(adev->dev, " UVD_MPC_SET_ALU=0x%08X\n", + RREG32(mmUVD_MPC_SET_ALU)); + dev_info(adev->dev, " UVD_VCPU_CACHE_OFFSET0=0x%08X\n", + RREG32(mmUVD_VCPU_CACHE_OFFSET0)); + dev_info(adev->dev, " UVD_VCPU_CACHE_SIZE0=0x%08X\n", + RREG32(mmUVD_VCPU_CACHE_SIZE0)); + dev_info(adev->dev, " UVD_VCPU_CACHE_OFFSET1=0x%08X\n", + RREG32(mmUVD_VCPU_CACHE_OFFSET1)); + dev_info(adev->dev, " UVD_VCPU_CACHE_SIZE1=0x%08X\n", + RREG32(mmUVD_VCPU_CACHE_SIZE1)); + dev_info(adev->dev, " UVD_VCPU_CACHE_OFFSET2=0x%08X\n", + RREG32(mmUVD_VCPU_CACHE_OFFSET2)); + dev_info(adev->dev, " UVD_VCPU_CACHE_SIZE2=0x%08X\n", + RREG32(mmUVD_VCPU_CACHE_SIZE2)); + dev_info(adev->dev, " UVD_VCPU_CNTL=0x%08X\n", + RREG32(mmUVD_VCPU_CNTL)); + dev_info(adev->dev, " UVD_SOFT_RESET=0x%08X\n", + RREG32(mmUVD_SOFT_RESET)); + dev_info(adev->dev, " UVD_LMI_RBC_IB_64BIT_BAR_LOW=0x%08X\n", + RREG32(mmUVD_LMI_RBC_IB_64BIT_BAR_LOW)); + dev_info(adev->dev, " UVD_LMI_RBC_IB_64BIT_BAR_HIGH=0x%08X\n", + RREG32(mmUVD_LMI_RBC_IB_64BIT_BAR_HIGH)); + dev_info(adev->dev, " UVD_RBC_IB_SIZE=0x%08X\n", + RREG32(mmUVD_RBC_IB_SIZE)); + dev_info(adev->dev, " UVD_LMI_RBC_RB_64BIT_BAR_LOW=0x%08X\n", + RREG32(mmUVD_LMI_RBC_RB_64BIT_BAR_LOW)); + dev_info(adev->dev, " UVD_LMI_RBC_RB_64BIT_BAR_HIGH=0x%08X\n", + RREG32(mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH)); + dev_info(adev->dev, " UVD_RBC_RB_RPTR=0x%08X\n", + RREG32(mmUVD_RBC_RB_RPTR)); + dev_info(adev->dev, " UVD_RBC_RB_WPTR=0x%08X\n", + RREG32(mmUVD_RBC_RB_WPTR)); + dev_info(adev->dev, " UVD_RBC_RB_WPTR_CNTL=0x%08X\n", + RREG32(mmUVD_RBC_RB_WPTR_CNTL)); + dev_info(adev->dev, " UVD_RBC_RB_CNTL=0x%08X\n", + RREG32(mmUVD_RBC_RB_CNTL)); + dev_info(adev->dev, " UVD_STATUS=0x%08X\n", + RREG32(mmUVD_STATUS)); + dev_info(adev->dev, " UVD_SEMA_TIMEOUT_STATUS=0x%08X\n", + RREG32(mmUVD_SEMA_TIMEOUT_STATUS)); + dev_info(adev->dev, " UVD_SEMA_WAIT_INCOMPLETE_TIMEOUT_CNTL=0x%08X\n", + RREG32(mmUVD_SEMA_WAIT_INCOMPLETE_TIMEOUT_CNTL)); + dev_info(adev->dev, " UVD_SEMA_WAIT_FAULT_TIMEOUT_CNTL=0x%08X\n", + RREG32(mmUVD_SEMA_WAIT_FAULT_TIMEOUT_CNTL)); + dev_info(adev->dev, " UVD_SEMA_SIGNAL_INCOMPLETE_TIMEOUT_CNTL=0x%08X\n", + RREG32(mmUVD_SEMA_SIGNAL_INCOMPLETE_TIMEOUT_CNTL)); + dev_info(adev->dev, " UVD_CONTEXT_ID=0x%08X\n", + RREG32(mmUVD_CONTEXT_ID)); +} + +static int uvd_v5_0_set_interrupt_state(struct amdgpu_device *adev, + struct amdgpu_irq_src *source, + unsigned type, + enum amdgpu_interrupt_state state) +{ + // TODO + return 0; +} + +static int uvd_v5_0_process_interrupt(struct amdgpu_device *adev, + struct amdgpu_irq_src *source, + struct amdgpu_iv_entry *entry) +{ + DRM_DEBUG("IH: UVD TRAP\n"); + amdgpu_fence_process(&adev->uvd.ring); + return 0; +} + +static int uvd_v5_0_set_clockgating_state(struct amdgpu_device *adev, + enum amdgpu_clockgating_state state) +{ + //TODO + + return 0; +} + +static int uvd_v5_0_set_powergating_state(struct amdgpu_device *adev, + enum amdgpu_powergating_state state) +{ + /* This doesn't actually powergate the UVD block. + * That's done in the dpm code via the SMC. This + * just re-inits the block as necessary. The actual + * gating still happens in the dpm code. We should + * revisit this when there is a cleaner line between + * the smc and the hw blocks + */ + if (state == AMDGPU_PG_STATE_GATE) { + uvd_v5_0_stop(adev); + return 0; + } else { + return uvd_v5_0_start(adev); + } +} + +const struct amdgpu_ip_funcs uvd_v5_0_ip_funcs = { + .early_init = uvd_v5_0_early_init, + .late_init = NULL, + .sw_init = uvd_v5_0_sw_init, + .sw_fini = uvd_v5_0_sw_fini, + .hw_init = uvd_v5_0_hw_init, + .hw_fini = uvd_v5_0_hw_fini, + .suspend = uvd_v5_0_suspend, + .resume = uvd_v5_0_resume, + .is_idle = uvd_v5_0_is_idle, + .wait_for_idle = uvd_v5_0_wait_for_idle, + .soft_reset = uvd_v5_0_soft_reset, + .print_status = uvd_v5_0_print_status, + .set_clockgating_state = uvd_v5_0_set_clockgating_state, + .set_powergating_state = uvd_v5_0_set_powergating_state, +}; + +static const struct amdgpu_ring_funcs uvd_v5_0_ring_funcs = { + .get_rptr = uvd_v5_0_ring_get_rptr, + .get_wptr = uvd_v5_0_ring_get_wptr, + .set_wptr = uvd_v5_0_ring_set_wptr, + .parse_cs = amdgpu_uvd_ring_parse_cs, + .emit_ib = uvd_v5_0_ring_emit_ib, + .emit_fence = uvd_v5_0_ring_emit_fence, + .emit_semaphore = uvd_v5_0_ring_emit_semaphore, + .test_ring = uvd_v5_0_ring_test_ring, + .test_ib = uvd_v5_0_ring_test_ib, + .is_lockup = amdgpu_ring_test_lockup, +}; + +static void uvd_v5_0_set_ring_funcs(struct amdgpu_device *adev) +{ + adev->uvd.ring.funcs = &uvd_v5_0_ring_funcs; +} + +static const struct amdgpu_irq_src_funcs uvd_v5_0_irq_funcs = { + .set = uvd_v5_0_set_interrupt_state, + .process = uvd_v5_0_process_interrupt, +}; + +static void uvd_v5_0_set_irq_funcs(struct amdgpu_device *adev) +{ + adev->uvd.irq.num_types = 1; + adev->uvd.irq.funcs = &uvd_v5_0_irq_funcs; +} diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.h b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.h new file mode 100644 index 000000000000..7d7a15296383 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.h @@ -0,0 +1,29 @@ +/* + * Copyright 2014 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef __UVD_V5_0_H__ +#define __UVD_V5_0_H__ + +extern const struct amdgpu_ip_funcs uvd_v5_0_ip_funcs; + +#endif diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c new file mode 100644 index 000000000000..f59942d5c50e --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c @@ -0,0 +1,810 @@ +/* + * Copyright 2014 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Christian König + */ + +#include +#include +#include "amdgpu.h" +#include "amdgpu_uvd.h" +#include "vid.h" +#include "uvd/uvd_6_0_d.h" +#include "uvd/uvd_6_0_sh_mask.h" +#include "oss/oss_2_0_d.h" +#include "oss/oss_2_0_sh_mask.h" + +static void uvd_v6_0_set_ring_funcs(struct amdgpu_device *adev); +static void uvd_v6_0_set_irq_funcs(struct amdgpu_device *adev); +static int uvd_v6_0_start(struct amdgpu_device *adev); +static void uvd_v6_0_stop(struct amdgpu_device *adev); + +/** + * uvd_v6_0_ring_get_rptr - get read pointer + * + * @ring: amdgpu_ring pointer + * + * Returns the current hardware read pointer + */ +static uint32_t uvd_v6_0_ring_get_rptr(struct amdgpu_ring *ring) +{ + struct amdgpu_device *adev = ring->adev; + + return RREG32(mmUVD_RBC_RB_RPTR); +} + +/** + * uvd_v6_0_ring_get_wptr - get write pointer + * + * @ring: amdgpu_ring pointer + * + * Returns the current hardware write pointer + */ +static uint32_t uvd_v6_0_ring_get_wptr(struct amdgpu_ring *ring) +{ + struct amdgpu_device *adev = ring->adev; + + return RREG32(mmUVD_RBC_RB_WPTR); +} + +/** + * uvd_v6_0_ring_set_wptr - set write pointer + * + * @ring: amdgpu_ring pointer + * + * Commits the write pointer to the hardware + */ +static void uvd_v6_0_ring_set_wptr(struct amdgpu_ring *ring) +{ + struct amdgpu_device *adev = ring->adev; + + WREG32(mmUVD_RBC_RB_WPTR, ring->wptr); +} + +static int uvd_v6_0_early_init(struct amdgpu_device *adev) +{ + uvd_v6_0_set_ring_funcs(adev); + uvd_v6_0_set_irq_funcs(adev); + + return 0; +} + +static int uvd_v6_0_sw_init(struct amdgpu_device *adev) +{ + struct amdgpu_ring *ring; + int r; + + /* UVD TRAP */ + r = amdgpu_irq_add_id(adev, 124, &adev->uvd.irq); + if (r) + return r; + + r = amdgpu_uvd_sw_init(adev); + if (r) + return r; + + r = amdgpu_uvd_resume(adev); + if (r) + return r; + + ring = &adev->uvd.ring; + sprintf(ring->name, "uvd"); + r = amdgpu_ring_init(adev, ring, 4096, CP_PACKET2, 0xf, + &adev->uvd.irq, 0, AMDGPU_RING_TYPE_UVD); + + return r; +} + +static int uvd_v6_0_sw_fini(struct amdgpu_device *adev) +{ + int r; + + r = amdgpu_uvd_suspend(adev); + if (r) + return r; + + r = amdgpu_uvd_sw_fini(adev); + if (r) + return r; + + return r; +} + +/** + * uvd_v6_0_hw_init - start and test UVD block + * + * @adev: amdgpu_device pointer + * + * Initialize the hardware, boot up the VCPU and do some testing + */ +static int uvd_v6_0_hw_init(struct amdgpu_device *adev) +{ + struct amdgpu_ring *ring = &adev->uvd.ring; + uint32_t tmp; + int r; + + r = uvd_v6_0_start(adev); + if (r) + goto done; + + ring->ready = true; + r = amdgpu_ring_test_ring(ring); + if (r) { + ring->ready = false; + goto done; + } + + r = amdgpu_ring_lock(ring, 10); + if (r) { + DRM_ERROR("amdgpu: ring failed to lock UVD ring (%d).\n", r); + goto done; + } + + tmp = PACKET0(mmUVD_SEMA_WAIT_FAULT_TIMEOUT_CNTL, 0); + amdgpu_ring_write(ring, tmp); + amdgpu_ring_write(ring, 0xFFFFF); + + tmp = PACKET0(mmUVD_SEMA_WAIT_INCOMPLETE_TIMEOUT_CNTL, 0); + amdgpu_ring_write(ring, tmp); + amdgpu_ring_write(ring, 0xFFFFF); + + tmp = PACKET0(mmUVD_SEMA_SIGNAL_INCOMPLETE_TIMEOUT_CNTL, 0); + amdgpu_ring_write(ring, tmp); + amdgpu_ring_write(ring, 0xFFFFF); + + /* Clear timeout status bits */ + amdgpu_ring_write(ring, PACKET0(mmUVD_SEMA_TIMEOUT_STATUS, 0)); + amdgpu_ring_write(ring, 0x8); + + amdgpu_ring_write(ring, PACKET0(mmUVD_SEMA_CNTL, 0)); + amdgpu_ring_write(ring, 3); + + amdgpu_ring_unlock_commit(ring); + +done: + if (!r) + DRM_INFO("UVD initialized successfully.\n"); + + return r; +} + +/** + * uvd_v6_0_hw_fini - stop the hardware block + * + * @adev: amdgpu_device pointer + * + * Stop the UVD block, mark ring as not ready any more + */ +static int uvd_v6_0_hw_fini(struct amdgpu_device *adev) +{ + struct amdgpu_ring *ring = &adev->uvd.ring; + + uvd_v6_0_stop(adev); + ring->ready = false; + + return 0; +} + +static int uvd_v6_0_suspend(struct amdgpu_device *adev) +{ + int r; + + r = uvd_v6_0_hw_fini(adev); + if (r) + return r; + + r = amdgpu_uvd_suspend(adev); + if (r) + return r; + + return r; +} + +static int uvd_v6_0_resume(struct amdgpu_device *adev) +{ + int r; + + r = amdgpu_uvd_resume(adev); + if (r) + return r; + + r = uvd_v6_0_hw_init(adev); + if (r) + return r; + + return r; +} + +/** + * uvd_v6_0_mc_resume - memory controller programming + * + * @adev: amdgpu_device pointer + * + * Let the UVD memory controller know it's offsets + */ +static void uvd_v6_0_mc_resume(struct amdgpu_device *adev) +{ + uint64_t offset; + uint32_t size; + + /* programm memory controller bits 0-27 */ + WREG32(mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW, + lower_32_bits(adev->uvd.gpu_addr)); + WREG32(mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH, + upper_32_bits(adev->uvd.gpu_addr)); + + offset = AMDGPU_UVD_FIRMWARE_OFFSET; + size = AMDGPU_GPU_PAGE_ALIGN(adev->uvd.fw->size + 4); + WREG32(mmUVD_VCPU_CACHE_OFFSET0, offset >> 3); + WREG32(mmUVD_VCPU_CACHE_SIZE0, size); + + offset += size; + size = AMDGPU_UVD_STACK_SIZE; + WREG32(mmUVD_VCPU_CACHE_OFFSET1, offset >> 3); + WREG32(mmUVD_VCPU_CACHE_SIZE1, size); + + offset += size; + size = AMDGPU_UVD_HEAP_SIZE; + WREG32(mmUVD_VCPU_CACHE_OFFSET2, offset >> 3); + WREG32(mmUVD_VCPU_CACHE_SIZE2, size); +} + +/** + * uvd_v6_0_start - start UVD block + * + * @adev: amdgpu_device pointer + * + * Setup and start the UVD block + */ +static int uvd_v6_0_start(struct amdgpu_device *adev) +{ + struct amdgpu_ring *ring = &adev->uvd.ring; + uint32_t rb_bufsz, tmp; + uint32_t lmi_swap_cntl; + uint32_t mp_swap_cntl; + int i, j, r; + + /*disable DPG */ + WREG32_P(mmUVD_POWER_STATUS, 0, ~(1 << 2)); + + /* disable byte swapping */ + lmi_swap_cntl = 0; + mp_swap_cntl = 0; + + uvd_v6_0_mc_resume(adev); + + /* disable clock gating */ + WREG32(mmUVD_CGC_GATE, 0); + + /* disable interupt */ + WREG32_P(mmUVD_MASTINT_EN, 0, ~(1 << 1)); + + /* stall UMC and register bus before resetting VCPU */ + WREG32_P(mmUVD_LMI_CTRL2, 1 << 8, ~(1 << 8)); + mdelay(1); + + /* put LMI, VCPU, RBC etc... into reset */ + WREG32(mmUVD_SOFT_RESET, UVD_SOFT_RESET__LMI_SOFT_RESET_MASK | + UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK | UVD_SOFT_RESET__LBSI_SOFT_RESET_MASK | + UVD_SOFT_RESET__RBC_SOFT_RESET_MASK | UVD_SOFT_RESET__CSM_SOFT_RESET_MASK | + UVD_SOFT_RESET__CXW_SOFT_RESET_MASK | UVD_SOFT_RESET__TAP_SOFT_RESET_MASK | + UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK); + mdelay(5); + + /* take UVD block out of reset */ + WREG32_P(mmSRBM_SOFT_RESET, 0, ~SRBM_SOFT_RESET__SOFT_RESET_UVD_MASK); + mdelay(5); + + /* initialize UVD memory controller */ + WREG32(mmUVD_LMI_CTRL, 0x40 | (1 << 8) | (1 << 13) | + (1 << 21) | (1 << 9) | (1 << 20)); + +#ifdef __BIG_ENDIAN + /* swap (8 in 32) RB and IB */ + lmi_swap_cntl = 0xa; + mp_swap_cntl = 0; +#endif + WREG32(mmUVD_LMI_SWAP_CNTL, lmi_swap_cntl); + WREG32(mmUVD_MP_SWAP_CNTL, mp_swap_cntl); + + WREG32(mmUVD_MPC_SET_MUXA0, 0x40c2040); + WREG32(mmUVD_MPC_SET_MUXA1, 0x0); + WREG32(mmUVD_MPC_SET_MUXB0, 0x40c2040); + WREG32(mmUVD_MPC_SET_MUXB1, 0x0); + WREG32(mmUVD_MPC_SET_ALU, 0); + WREG32(mmUVD_MPC_SET_MUX, 0x88); + + /* take all subblocks out of reset, except VCPU */ + WREG32(mmUVD_SOFT_RESET, UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK); + mdelay(5); + + /* enable VCPU clock */ + WREG32(mmUVD_VCPU_CNTL, 1 << 9); + + /* enable UMC */ + WREG32_P(mmUVD_LMI_CTRL2, 0, ~(1 << 8)); + + /* boot up the VCPU */ + WREG32(mmUVD_SOFT_RESET, 0); + mdelay(10); + + for (i = 0; i < 10; ++i) { + uint32_t status; + + for (j = 0; j < 100; ++j) { + status = RREG32(mmUVD_STATUS); + if (status & 2) + break; + mdelay(10); + } + r = 0; + if (status & 2) + break; + + DRM_ERROR("UVD not responding, trying to reset the VCPU!!!\n"); + WREG32_P(mmUVD_SOFT_RESET, UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK, + ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK); + mdelay(10); + WREG32_P(mmUVD_SOFT_RESET, 0, + ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK); + mdelay(10); + r = -1; + } + + if (r) { + DRM_ERROR("UVD not responding, giving up!!!\n"); + return r; + } + /* enable master interrupt */ + WREG32_P(mmUVD_MASTINT_EN, 3 << 1, ~(3 << 1)); + + /* clear the bit 4 of UVD_STATUS */ + WREG32_P(mmUVD_STATUS, 0, ~(2 << 1)); + + rb_bufsz = order_base_2(ring->ring_size); + tmp = 0; + tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BUFSZ, rb_bufsz); + tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1); + tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1); + tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_WPTR_POLL_EN, 0); + tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1); + tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1); + /* force RBC into idle state */ + WREG32(mmUVD_RBC_RB_CNTL, tmp); + + /* set the write pointer delay */ + WREG32(mmUVD_RBC_RB_WPTR_CNTL, 0); + + /* set the wb address */ + WREG32(mmUVD_RBC_RB_RPTR_ADDR, (upper_32_bits(ring->gpu_addr) >> 2)); + + /* programm the RB_BASE for ring buffer */ + WREG32(mmUVD_LMI_RBC_RB_64BIT_BAR_LOW, + lower_32_bits(ring->gpu_addr)); + WREG32(mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH, + upper_32_bits(ring->gpu_addr)); + + /* Initialize the ring buffer's read and write pointers */ + WREG32(mmUVD_RBC_RB_RPTR, 0); + + ring->wptr = RREG32(mmUVD_RBC_RB_RPTR); + WREG32(mmUVD_RBC_RB_WPTR, ring->wptr); + + WREG32_P(mmUVD_RBC_RB_CNTL, 0, ~UVD_RBC_RB_CNTL__RB_NO_FETCH_MASK); + + return 0; +} + +/** + * uvd_v6_0_stop - stop UVD block + * + * @adev: amdgpu_device pointer + * + * stop the UVD block + */ +static void uvd_v6_0_stop(struct amdgpu_device *adev) +{ + /* force RBC into idle state */ + WREG32(mmUVD_RBC_RB_CNTL, 0x11010101); + + /* Stall UMC and register bus before resetting VCPU */ + WREG32_P(mmUVD_LMI_CTRL2, 1 << 8, ~(1 << 8)); + mdelay(1); + + /* put VCPU into reset */ + WREG32(mmUVD_SOFT_RESET, UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK); + mdelay(5); + + /* disable VCPU clock */ + WREG32(mmUVD_VCPU_CNTL, 0x0); + + /* Unstall UMC and register bus */ + WREG32_P(mmUVD_LMI_CTRL2, 0, ~(1 << 8)); +} + +/** + * uvd_v6_0_ring_emit_fence - emit an fence & trap command + * + * @ring: amdgpu_ring pointer + * @fence: fence to emit + * + * Write a fence and a trap command to the ring. + */ +static void uvd_v6_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq, + bool write64bit) +{ + WARN_ON(write64bit); + + amdgpu_ring_write(ring, PACKET0(mmUVD_CONTEXT_ID, 0)); + amdgpu_ring_write(ring, seq); + amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0)); + amdgpu_ring_write(ring, addr & 0xffffffff); + amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0)); + amdgpu_ring_write(ring, upper_32_bits(addr) & 0xff); + amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0)); + amdgpu_ring_write(ring, 0); + + amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0)); + amdgpu_ring_write(ring, 0); + amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0)); + amdgpu_ring_write(ring, 0); + amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0)); + amdgpu_ring_write(ring, 2); +} + +/** + * uvd_v6_0_ring_emit_semaphore - emit semaphore command + * + * @ring: amdgpu_ring pointer + * @semaphore: semaphore to emit commands for + * @emit_wait: true if we should emit a wait command + * + * Emit a semaphore command (either wait or signal) to the UVD ring. + */ +static bool uvd_v6_0_ring_emit_semaphore(struct amdgpu_ring *ring, + struct amdgpu_semaphore *semaphore, + bool emit_wait) +{ + uint64_t addr = semaphore->gpu_addr; + + amdgpu_ring_write(ring, PACKET0(mmUVD_SEMA_ADDR_LOW, 0)); + amdgpu_ring_write(ring, (addr >> 3) & 0x000FFFFF); + + amdgpu_ring_write(ring, PACKET0(mmUVD_SEMA_ADDR_HIGH, 0)); + amdgpu_ring_write(ring, (addr >> 23) & 0x000FFFFF); + + amdgpu_ring_write(ring, PACKET0(mmUVD_SEMA_CMD, 0)); + amdgpu_ring_write(ring, 0x80 | (emit_wait ? 1 : 0)); + + return true; +} + +/** + * uvd_v6_0_ring_test_ring - register write test + * + * @ring: amdgpu_ring pointer + * + * Test if we can successfully write to the context register + */ +static int uvd_v6_0_ring_test_ring(struct amdgpu_ring *ring) +{ + struct amdgpu_device *adev = ring->adev; + uint32_t tmp = 0; + unsigned i; + int r; + + WREG32(mmUVD_CONTEXT_ID, 0xCAFEDEAD); + r = amdgpu_ring_lock(ring, 3); + if (r) { + DRM_ERROR("amdgpu: cp failed to lock ring %d (%d).\n", + ring->idx, r); + return r; + } + amdgpu_ring_write(ring, PACKET0(mmUVD_CONTEXT_ID, 0)); + amdgpu_ring_write(ring, 0xDEADBEEF); + amdgpu_ring_unlock_commit(ring); + for (i = 0; i < adev->usec_timeout; i++) { + tmp = RREG32(mmUVD_CONTEXT_ID); + if (tmp == 0xDEADBEEF) + break; + DRM_UDELAY(1); + } + + if (i < adev->usec_timeout) { + DRM_INFO("ring test on %d succeeded in %d usecs\n", + ring->idx, i); + } else { + DRM_ERROR("amdgpu: ring %d test failed (0x%08X)\n", + ring->idx, tmp); + r = -EINVAL; + } + return r; +} + +/** + * uvd_v6_0_ring_emit_ib - execute indirect buffer + * + * @ring: amdgpu_ring pointer + * @ib: indirect buffer to execute + * + * Write ring commands to execute the indirect buffer + */ +static void uvd_v6_0_ring_emit_ib(struct amdgpu_ring *ring, + struct amdgpu_ib *ib) +{ + amdgpu_ring_write(ring, PACKET0(mmUVD_LMI_RBC_IB_64BIT_BAR_LOW, 0)); + amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr)); + amdgpu_ring_write(ring, PACKET0(mmUVD_LMI_RBC_IB_64BIT_BAR_HIGH, 0)); + amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr)); + amdgpu_ring_write(ring, PACKET0(mmUVD_RBC_IB_SIZE, 0)); + amdgpu_ring_write(ring, ib->length_dw); +} + +/** + * uvd_v6_0_ring_test_ib - test ib execution + * + * @ring: amdgpu_ring pointer + * + * Test if we can successfully execute an IB + */ +static int uvd_v6_0_ring_test_ib(struct amdgpu_ring *ring) +{ + struct amdgpu_fence *fence = NULL; + int r; + + r = amdgpu_uvd_get_create_msg(ring, 1, NULL); + if (r) { + DRM_ERROR("amdgpu: failed to get create msg (%d).\n", r); + goto error; + } + + r = amdgpu_uvd_get_destroy_msg(ring, 1, &fence); + if (r) { + DRM_ERROR("amdgpu: failed to get destroy ib (%d).\n", r); + goto error; + } + + r = amdgpu_fence_wait(fence, false); + if (r) { + DRM_ERROR("amdgpu: fence wait failed (%d).\n", r); + goto error; + } + DRM_INFO("ib test on ring %d succeeded\n", ring->idx); +error: + amdgpu_fence_unref(&fence); + return r; +} + +static bool uvd_v6_0_is_idle(struct amdgpu_device *adev) +{ + return !(RREG32(mmSRBM_STATUS) & SRBM_STATUS__UVD_BUSY_MASK); +} + +static int uvd_v6_0_wait_for_idle(struct amdgpu_device *adev) +{ + unsigned i; + + for (i = 0; i < adev->usec_timeout; i++) { + if (!(RREG32(mmSRBM_STATUS) & SRBM_STATUS__UVD_BUSY_MASK)) + return 0; + } + return -ETIMEDOUT; +} + +static int uvd_v6_0_soft_reset(struct amdgpu_device *adev) +{ + uvd_v6_0_stop(adev); + + WREG32_P(mmSRBM_SOFT_RESET, SRBM_SOFT_RESET__SOFT_RESET_UVD_MASK, + ~SRBM_SOFT_RESET__SOFT_RESET_UVD_MASK); + mdelay(5); + + return uvd_v6_0_start(adev); +} + +static void uvd_v6_0_print_status(struct amdgpu_device *adev) +{ + dev_info(adev->dev, "UVD 6.0 registers\n"); + dev_info(adev->dev, " UVD_SEMA_ADDR_LOW=0x%08X\n", + RREG32(mmUVD_SEMA_ADDR_LOW)); + dev_info(adev->dev, " UVD_SEMA_ADDR_HIGH=0x%08X\n", + RREG32(mmUVD_SEMA_ADDR_HIGH)); + dev_info(adev->dev, " UVD_SEMA_CMD=0x%08X\n", + RREG32(mmUVD_SEMA_CMD)); + dev_info(adev->dev, " UVD_GPCOM_VCPU_CMD=0x%08X\n", + RREG32(mmUVD_GPCOM_VCPU_CMD)); + dev_info(adev->dev, " UVD_GPCOM_VCPU_DATA0=0x%08X\n", + RREG32(mmUVD_GPCOM_VCPU_DATA0)); + dev_info(adev->dev, " UVD_GPCOM_VCPU_DATA1=0x%08X\n", + RREG32(mmUVD_GPCOM_VCPU_DATA1)); + dev_info(adev->dev, " UVD_ENGINE_CNTL=0x%08X\n", + RREG32(mmUVD_ENGINE_CNTL)); + dev_info(adev->dev, " UVD_UDEC_ADDR_CONFIG=0x%08X\n", + RREG32(mmUVD_UDEC_ADDR_CONFIG)); + dev_info(adev->dev, " UVD_UDEC_DB_ADDR_CONFIG=0x%08X\n", + RREG32(mmUVD_UDEC_DB_ADDR_CONFIG)); + dev_info(adev->dev, " UVD_UDEC_DBW_ADDR_CONFIG=0x%08X\n", + RREG32(mmUVD_UDEC_DBW_ADDR_CONFIG)); + dev_info(adev->dev, " UVD_SEMA_CNTL=0x%08X\n", + RREG32(mmUVD_SEMA_CNTL)); + dev_info(adev->dev, " UVD_LMI_EXT40_ADDR=0x%08X\n", + RREG32(mmUVD_LMI_EXT40_ADDR)); + dev_info(adev->dev, " UVD_CTX_INDEX=0x%08X\n", + RREG32(mmUVD_CTX_INDEX)); + dev_info(adev->dev, " UVD_CTX_DATA=0x%08X\n", + RREG32(mmUVD_CTX_DATA)); + dev_info(adev->dev, " UVD_CGC_GATE=0x%08X\n", + RREG32(mmUVD_CGC_GATE)); + dev_info(adev->dev, " UVD_CGC_CTRL=0x%08X\n", + RREG32(mmUVD_CGC_CTRL)); + dev_info(adev->dev, " UVD_LMI_CTRL2=0x%08X\n", + RREG32(mmUVD_LMI_CTRL2)); + dev_info(adev->dev, " UVD_MASTINT_EN=0x%08X\n", + RREG32(mmUVD_MASTINT_EN)); + dev_info(adev->dev, " UVD_LMI_ADDR_EXT=0x%08X\n", + RREG32(mmUVD_LMI_ADDR_EXT)); + dev_info(adev->dev, " UVD_LMI_CTRL=0x%08X\n", + RREG32(mmUVD_LMI_CTRL)); + dev_info(adev->dev, " UVD_LMI_SWAP_CNTL=0x%08X\n", + RREG32(mmUVD_LMI_SWAP_CNTL)); + dev_info(adev->dev, " UVD_MP_SWAP_CNTL=0x%08X\n", + RREG32(mmUVD_MP_SWAP_CNTL)); + dev_info(adev->dev, " UVD_MPC_SET_MUXA0=0x%08X\n", + RREG32(mmUVD_MPC_SET_MUXA0)); + dev_info(adev->dev, " UVD_MPC_SET_MUXA1=0x%08X\n", + RREG32(mmUVD_MPC_SET_MUXA1)); + dev_info(adev->dev, " UVD_MPC_SET_MUXB0=0x%08X\n", + RREG32(mmUVD_MPC_SET_MUXB0)); + dev_info(adev->dev, " UVD_MPC_SET_MUXB1=0x%08X\n", + RREG32(mmUVD_MPC_SET_MUXB1)); + dev_info(adev->dev, " UVD_MPC_SET_MUX=0x%08X\n", + RREG32(mmUVD_MPC_SET_MUX)); + dev_info(adev->dev, " UVD_MPC_SET_ALU=0x%08X\n", + RREG32(mmUVD_MPC_SET_ALU)); + dev_info(adev->dev, " UVD_VCPU_CACHE_OFFSET0=0x%08X\n", + RREG32(mmUVD_VCPU_CACHE_OFFSET0)); + dev_info(adev->dev, " UVD_VCPU_CACHE_SIZE0=0x%08X\n", + RREG32(mmUVD_VCPU_CACHE_SIZE0)); + dev_info(adev->dev, " UVD_VCPU_CACHE_OFFSET1=0x%08X\n", + RREG32(mmUVD_VCPU_CACHE_OFFSET1)); + dev_info(adev->dev, " UVD_VCPU_CACHE_SIZE1=0x%08X\n", + RREG32(mmUVD_VCPU_CACHE_SIZE1)); + dev_info(adev->dev, " UVD_VCPU_CACHE_OFFSET2=0x%08X\n", + RREG32(mmUVD_VCPU_CACHE_OFFSET2)); + dev_info(adev->dev, " UVD_VCPU_CACHE_SIZE2=0x%08X\n", + RREG32(mmUVD_VCPU_CACHE_SIZE2)); + dev_info(adev->dev, " UVD_VCPU_CNTL=0x%08X\n", + RREG32(mmUVD_VCPU_CNTL)); + dev_info(adev->dev, " UVD_SOFT_RESET=0x%08X\n", + RREG32(mmUVD_SOFT_RESET)); + dev_info(adev->dev, " UVD_RBC_IB_SIZE=0x%08X\n", + RREG32(mmUVD_RBC_IB_SIZE)); + dev_info(adev->dev, " UVD_RBC_RB_RPTR=0x%08X\n", + RREG32(mmUVD_RBC_RB_RPTR)); + dev_info(adev->dev, " UVD_RBC_RB_WPTR=0x%08X\n", + RREG32(mmUVD_RBC_RB_WPTR)); + dev_info(adev->dev, " UVD_RBC_RB_WPTR_CNTL=0x%08X\n", + RREG32(mmUVD_RBC_RB_WPTR_CNTL)); + dev_info(adev->dev, " UVD_RBC_RB_CNTL=0x%08X\n", + RREG32(mmUVD_RBC_RB_CNTL)); + dev_info(adev->dev, " UVD_STATUS=0x%08X\n", + RREG32(mmUVD_STATUS)); + dev_info(adev->dev, " UVD_SEMA_TIMEOUT_STATUS=0x%08X\n", + RREG32(mmUVD_SEMA_TIMEOUT_STATUS)); + dev_info(adev->dev, " UVD_SEMA_WAIT_INCOMPLETE_TIMEOUT_CNTL=0x%08X\n", + RREG32(mmUVD_SEMA_WAIT_INCOMPLETE_TIMEOUT_CNTL)); + dev_info(adev->dev, " UVD_SEMA_WAIT_FAULT_TIMEOUT_CNTL=0x%08X\n", + RREG32(mmUVD_SEMA_WAIT_FAULT_TIMEOUT_CNTL)); + dev_info(adev->dev, " UVD_SEMA_SIGNAL_INCOMPLETE_TIMEOUT_CNTL=0x%08X\n", + RREG32(mmUVD_SEMA_SIGNAL_INCOMPLETE_TIMEOUT_CNTL)); + dev_info(adev->dev, " UVD_CONTEXT_ID=0x%08X\n", + RREG32(mmUVD_CONTEXT_ID)); +} + +static int uvd_v6_0_set_interrupt_state(struct amdgpu_device *adev, + struct amdgpu_irq_src *source, + unsigned type, + enum amdgpu_interrupt_state state) +{ + // TODO + return 0; +} + +static int uvd_v6_0_process_interrupt(struct amdgpu_device *adev, + struct amdgpu_irq_src *source, + struct amdgpu_iv_entry *entry) +{ + DRM_DEBUG("IH: UVD TRAP\n"); + amdgpu_fence_process(&adev->uvd.ring); + return 0; +} + +static int uvd_v6_0_set_clockgating_state(struct amdgpu_device *adev, + enum amdgpu_clockgating_state state) +{ + //TODO + + return 0; +} + +static int uvd_v6_0_set_powergating_state(struct amdgpu_device *adev, + enum amdgpu_powergating_state state) +{ + /* This doesn't actually powergate the UVD block. + * That's done in the dpm code via the SMC. This + * just re-inits the block as necessary. The actual + * gating still happens in the dpm code. We should + * revisit this when there is a cleaner line between + * the smc and the hw blocks + */ + if (state == AMDGPU_PG_STATE_GATE) { + uvd_v6_0_stop(adev); + return 0; + } else { + return uvd_v6_0_start(adev); + } +} + +const struct amdgpu_ip_funcs uvd_v6_0_ip_funcs = { + .early_init = uvd_v6_0_early_init, + .late_init = NULL, + .sw_init = uvd_v6_0_sw_init, + .sw_fini = uvd_v6_0_sw_fini, + .hw_init = uvd_v6_0_hw_init, + .hw_fini = uvd_v6_0_hw_fini, + .suspend = uvd_v6_0_suspend, + .resume = uvd_v6_0_resume, + .is_idle = uvd_v6_0_is_idle, + .wait_for_idle = uvd_v6_0_wait_for_idle, + .soft_reset = uvd_v6_0_soft_reset, + .print_status = uvd_v6_0_print_status, + .set_clockgating_state = uvd_v6_0_set_clockgating_state, + .set_powergating_state = uvd_v6_0_set_powergating_state, +}; + +static const struct amdgpu_ring_funcs uvd_v6_0_ring_funcs = { + .get_rptr = uvd_v6_0_ring_get_rptr, + .get_wptr = uvd_v6_0_ring_get_wptr, + .set_wptr = uvd_v6_0_ring_set_wptr, + .parse_cs = amdgpu_uvd_ring_parse_cs, + .emit_ib = uvd_v6_0_ring_emit_ib, + .emit_fence = uvd_v6_0_ring_emit_fence, + .emit_semaphore = uvd_v6_0_ring_emit_semaphore, + .test_ring = uvd_v6_0_ring_test_ring, + .test_ib = uvd_v6_0_ring_test_ib, + .is_lockup = amdgpu_ring_test_lockup, +}; + +static void uvd_v6_0_set_ring_funcs(struct amdgpu_device *adev) +{ + adev->uvd.ring.funcs = &uvd_v6_0_ring_funcs; +} + +static const struct amdgpu_irq_src_funcs uvd_v6_0_irq_funcs = { + .set = uvd_v6_0_set_interrupt_state, + .process = uvd_v6_0_process_interrupt, +}; + +static void uvd_v6_0_set_irq_funcs(struct amdgpu_device *adev) +{ + adev->uvd.irq.num_types = 1; + adev->uvd.irq.funcs = &uvd_v6_0_irq_funcs; +} diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.h b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.h new file mode 100644 index 000000000000..bc21afc8abac --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.h @@ -0,0 +1,29 @@ +/* + * Copyright 2014 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef __UVD_V6_0_H__ +#define __UVD_V6_0_H__ + +extern const struct amdgpu_ip_funcs uvd_v6_0_ip_funcs; + +#endif diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c new file mode 100644 index 000000000000..384c45e74053 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c @@ -0,0 +1,521 @@ +/* + * Copyright 2014 Advanced Micro Devices, Inc. + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE + * USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + * Authors: Christian König + */ + +#include +#include +#include "amdgpu.h" +#include "amdgpu_vce.h" +#include "vid.h" +#include "vce/vce_3_0_d.h" +#include "vce/vce_3_0_sh_mask.h" +#include "oss/oss_2_0_d.h" +#include "oss/oss_2_0_sh_mask.h" + +static void vce_v3_0_mc_resume(struct amdgpu_device *adev); +static void vce_v3_0_set_ring_funcs(struct amdgpu_device *adev); +static void vce_v3_0_set_irq_funcs(struct amdgpu_device *adev); + +/** + * vce_v3_0_ring_get_rptr - get read pointer + * + * @ring: amdgpu_ring pointer + * + * Returns the current hardware read pointer + */ +static uint32_t vce_v3_0_ring_get_rptr(struct amdgpu_ring *ring) +{ + struct amdgpu_device *adev = ring->adev; + + if (ring == &adev->vce.ring[0]) + return RREG32(mmVCE_RB_RPTR); + else + return RREG32(mmVCE_RB_RPTR2); +} + +/** + * vce_v3_0_ring_get_wptr - get write pointer + * + * @ring: amdgpu_ring pointer + * + * Returns the current hardware write pointer + */ +static uint32_t vce_v3_0_ring_get_wptr(struct amdgpu_ring *ring) +{ + struct amdgpu_device *adev = ring->adev; + + if (ring == &adev->vce.ring[0]) + return RREG32(mmVCE_RB_WPTR); + else + return RREG32(mmVCE_RB_WPTR2); +} + +/** + * vce_v3_0_ring_set_wptr - set write pointer + * + * @ring: amdgpu_ring pointer + * + * Commits the write pointer to the hardware + */ +static void vce_v3_0_ring_set_wptr(struct amdgpu_ring *ring) +{ + struct amdgpu_device *adev = ring->adev; + + if (ring == &adev->vce.ring[0]) + WREG32(mmVCE_RB_WPTR, ring->wptr); + else + WREG32(mmVCE_RB_WPTR2, ring->wptr); +} + +/** + * vce_v3_0_start - start VCE block + * + * @adev: amdgpu_device pointer + * + * Setup and start the VCE block + */ +static int vce_v3_0_start(struct amdgpu_device *adev) +{ + struct amdgpu_ring *ring; + int i, j, r; + + vce_v3_0_mc_resume(adev); + + /* set BUSY flag */ + WREG32_P(mmVCE_STATUS, 1, ~1); + + ring = &adev->vce.ring[0]; + WREG32(mmVCE_RB_RPTR, ring->wptr); + WREG32(mmVCE_RB_WPTR, ring->wptr); + WREG32(mmVCE_RB_BASE_LO, ring->gpu_addr); + WREG32(mmVCE_RB_BASE_HI, upper_32_bits(ring->gpu_addr)); + WREG32(mmVCE_RB_SIZE, ring->ring_size / 4); + + ring = &adev->vce.ring[1]; + WREG32(mmVCE_RB_RPTR2, ring->wptr); + WREG32(mmVCE_RB_WPTR2, ring->wptr); + WREG32(mmVCE_RB_BASE_LO2, ring->gpu_addr); + WREG32(mmVCE_RB_BASE_HI2, upper_32_bits(ring->gpu_addr)); + WREG32(mmVCE_RB_SIZE2, ring->ring_size / 4); + + WREG32_P(mmVCE_VCPU_CNTL, VCE_VCPU_CNTL__CLK_EN_MASK, ~VCE_VCPU_CNTL__CLK_EN_MASK); + + WREG32_P(mmVCE_SOFT_RESET, + VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK, + ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK); + + mdelay(100); + + WREG32_P(mmVCE_SOFT_RESET, 0, ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK); + + for (i = 0; i < 10; ++i) { + uint32_t status; + for (j = 0; j < 100; ++j) { + status = RREG32(mmVCE_STATUS); + if (status & 2) + break; + mdelay(10); + } + r = 0; + if (status & 2) + break; + + DRM_ERROR("VCE not responding, trying to reset the ECPU!!!\n"); + WREG32_P(mmVCE_SOFT_RESET, VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK, + ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK); + mdelay(10); + WREG32_P(mmVCE_SOFT_RESET, 0, ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK); + mdelay(10); + r = -1; + } + + /* clear BUSY flag */ + WREG32_P(mmVCE_STATUS, 0, ~1); + + if (r) { + DRM_ERROR("VCE not responding, giving up!!!\n"); + return r; + } + + return 0; +} + +static int vce_v3_0_early_init(struct amdgpu_device *adev) +{ + vce_v3_0_set_ring_funcs(adev); + vce_v3_0_set_irq_funcs(adev); + + return 0; +} + +static int vce_v3_0_sw_init(struct amdgpu_device *adev) +{ + struct amdgpu_ring *ring; + int r; + + /* VCE */ + r = amdgpu_irq_add_id(adev, 167, &adev->vce.irq); + if (r) + return r; + + r = amdgpu_vce_sw_init(adev); + if (r) + return r; + + r = amdgpu_vce_resume(adev); + if (r) + return r; + + ring = &adev->vce.ring[0]; + sprintf(ring->name, "vce0"); + r = amdgpu_ring_init(adev, ring, 4096, VCE_CMD_NO_OP, 0xf, + &adev->vce.irq, 0, AMDGPU_RING_TYPE_VCE); + if (r) + return r; + + ring = &adev->vce.ring[1]; + sprintf(ring->name, "vce1"); + r = amdgpu_ring_init(adev, ring, 4096, VCE_CMD_NO_OP, 0xf, + &adev->vce.irq, 0, AMDGPU_RING_TYPE_VCE); + if (r) + return r; + + return r; +} + +static int vce_v3_0_sw_fini(struct amdgpu_device *adev) +{ + int r; + + r = amdgpu_vce_suspend(adev); + if (r) + return r; + + r = amdgpu_vce_sw_fini(adev); + if (r) + return r; + + return r; +} + +static int vce_v3_0_hw_init(struct amdgpu_device *adev) +{ + struct amdgpu_ring *ring; + int r; + + r = vce_v3_0_start(adev); + if (r) + return r; + + ring = &adev->vce.ring[0]; + ring->ready = true; + r = amdgpu_ring_test_ring(ring); + if (r) { + ring->ready = false; + return r; + } + + ring = &adev->vce.ring[1]; + ring->ready = true; + r = amdgpu_ring_test_ring(ring); + if (r) { + ring->ready = false; + return r; + } + + DRM_INFO("VCE initialized successfully.\n"); + + return 0; +} + +static int vce_v3_0_hw_fini(struct amdgpu_device *adev) +{ + // TODO + return 0; +} + +static int vce_v3_0_suspend(struct amdgpu_device *adev) +{ + int r; + + r = vce_v3_0_hw_fini(adev); + if (r) + return r; + + r = amdgpu_vce_suspend(adev); + if (r) + return r; + + return r; +} + +static int vce_v3_0_resume(struct amdgpu_device *adev) +{ + int r; + + r = amdgpu_vce_resume(adev); + if (r) + return r; + + r = vce_v3_0_hw_init(adev); + if (r) + return r; + + return r; +} + +static void vce_v3_0_mc_resume(struct amdgpu_device *adev) +{ + uint32_t offset, size; + + WREG32_P(mmVCE_CLOCK_GATING_A, 0, ~(1 << 16)); + WREG32_P(mmVCE_UENC_CLOCK_GATING, 0x1FF000, ~0xFF9FF000); + WREG32_P(mmVCE_UENC_REG_CLOCK_GATING, 0x3F, ~0x3F); + WREG32(mmVCE_CLOCK_GATING_B, 0xf7); + + WREG32(mmVCE_LMI_CTRL, 0x00398000); + WREG32_P(mmVCE_LMI_CACHE_CTRL, 0x0, ~0x1); + WREG32(mmVCE_LMI_SWAP_CNTL, 0); + WREG32(mmVCE_LMI_SWAP_CNTL1, 0); + WREG32(mmVCE_LMI_VM_CTRL, 0); + + WREG32(mmVCE_LMI_VCPU_CACHE_40BIT_BAR, (adev->vce.gpu_addr >> 8)); + offset = AMDGPU_VCE_FIRMWARE_OFFSET; + size = AMDGPU_GPU_PAGE_ALIGN(adev->vce.fw->size); + WREG32(mmVCE_VCPU_CACHE_OFFSET0, offset & 0x7fffffff); + WREG32(mmVCE_VCPU_CACHE_SIZE0, size); + + offset += size; + size = AMDGPU_VCE_STACK_SIZE; + WREG32(mmVCE_VCPU_CACHE_OFFSET1, offset & 0x7fffffff); + WREG32(mmVCE_VCPU_CACHE_SIZE1, size); + + offset += size; + size = AMDGPU_VCE_HEAP_SIZE; + WREG32(mmVCE_VCPU_CACHE_OFFSET2, offset & 0x7fffffff); + WREG32(mmVCE_VCPU_CACHE_SIZE2, size); + + WREG32_P(mmVCE_LMI_CTRL2, 0x0, ~0x100); + + WREG32_P(mmVCE_SYS_INT_EN, VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK, + ~VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK); +} + +static bool vce_v3_0_is_idle(struct amdgpu_device *adev) +{ + return !(RREG32(mmSRBM_STATUS2) & SRBM_STATUS2__VCE_BUSY_MASK); +} + +static int vce_v3_0_wait_for_idle(struct amdgpu_device *adev) +{ + unsigned i; + + for (i = 0; i < adev->usec_timeout; i++) { + if (!(RREG32(mmSRBM_STATUS2) & SRBM_STATUS2__VCE_BUSY_MASK)) + return 0; + } + return -ETIMEDOUT; +} + +static int vce_v3_0_soft_reset(struct amdgpu_device *adev) +{ + WREG32_P(mmSRBM_SOFT_RESET, SRBM_SOFT_RESET__SOFT_RESET_VCE_MASK, + ~SRBM_SOFT_RESET__SOFT_RESET_VCE_MASK); + mdelay(5); + + return vce_v3_0_start(adev); +} + +static void vce_v3_0_print_status(struct amdgpu_device *adev) +{ + dev_info(adev->dev, "VCE 3.0 registers\n"); + dev_info(adev->dev, " VCE_STATUS=0x%08X\n", + RREG32(mmVCE_STATUS)); + dev_info(adev->dev, " VCE_VCPU_CNTL=0x%08X\n", + RREG32(mmVCE_VCPU_CNTL)); + dev_info(adev->dev, " VCE_VCPU_CACHE_OFFSET0=0x%08X\n", + RREG32(mmVCE_VCPU_CACHE_OFFSET0)); + dev_info(adev->dev, " VCE_VCPU_CACHE_SIZE0=0x%08X\n", + RREG32(mmVCE_VCPU_CACHE_SIZE0)); + dev_info(adev->dev, " VCE_VCPU_CACHE_OFFSET1=0x%08X\n", + RREG32(mmVCE_VCPU_CACHE_OFFSET1)); + dev_info(adev->dev, " VCE_VCPU_CACHE_SIZE1=0x%08X\n", + RREG32(mmVCE_VCPU_CACHE_SIZE1)); + dev_info(adev->dev, " VCE_VCPU_CACHE_OFFSET2=0x%08X\n", + RREG32(mmVCE_VCPU_CACHE_OFFSET2)); + dev_info(adev->dev, " VCE_VCPU_CACHE_SIZE2=0x%08X\n", + RREG32(mmVCE_VCPU_CACHE_SIZE2)); + dev_info(adev->dev, " VCE_SOFT_RESET=0x%08X\n", + RREG32(mmVCE_SOFT_RESET)); + dev_info(adev->dev, " VCE_RB_BASE_LO2=0x%08X\n", + RREG32(mmVCE_RB_BASE_LO2)); + dev_info(adev->dev, " VCE_RB_BASE_HI2=0x%08X\n", + RREG32(mmVCE_RB_BASE_HI2)); + dev_info(adev->dev, " VCE_RB_SIZE2=0x%08X\n", + RREG32(mmVCE_RB_SIZE2)); + dev_info(adev->dev, " VCE_RB_RPTR2=0x%08X\n", + RREG32(mmVCE_RB_RPTR2)); + dev_info(adev->dev, " VCE_RB_WPTR2=0x%08X\n", + RREG32(mmVCE_RB_WPTR2)); + dev_info(adev->dev, " VCE_RB_BASE_LO=0x%08X\n", + RREG32(mmVCE_RB_BASE_LO)); + dev_info(adev->dev, " VCE_RB_BASE_HI=0x%08X\n", + RREG32(mmVCE_RB_BASE_HI)); + dev_info(adev->dev, " VCE_RB_SIZE=0x%08X\n", + RREG32(mmVCE_RB_SIZE)); + dev_info(adev->dev, " VCE_RB_RPTR=0x%08X\n", + RREG32(mmVCE_RB_RPTR)); + dev_info(adev->dev, " VCE_RB_WPTR=0x%08X\n", + RREG32(mmVCE_RB_WPTR)); + dev_info(adev->dev, " VCE_CLOCK_GATING_A=0x%08X\n", + RREG32(mmVCE_CLOCK_GATING_A)); + dev_info(adev->dev, " VCE_CLOCK_GATING_B=0x%08X\n", + RREG32(mmVCE_CLOCK_GATING_B)); + dev_info(adev->dev, " VCE_UENC_CLOCK_GATING=0x%08X\n", + RREG32(mmVCE_UENC_CLOCK_GATING)); + dev_info(adev->dev, " VCE_UENC_REG_CLOCK_GATING=0x%08X\n", + RREG32(mmVCE_UENC_REG_CLOCK_GATING)); + dev_info(adev->dev, " VCE_SYS_INT_EN=0x%08X\n", + RREG32(mmVCE_SYS_INT_EN)); + dev_info(adev->dev, " VCE_LMI_CTRL2=0x%08X\n", + RREG32(mmVCE_LMI_CTRL2)); + dev_info(adev->dev, " VCE_LMI_CTRL=0x%08X\n", + RREG32(mmVCE_LMI_CTRL)); + dev_info(adev->dev, " VCE_LMI_VM_CTRL=0x%08X\n", + RREG32(mmVCE_LMI_VM_CTRL)); + dev_info(adev->dev, " VCE_LMI_SWAP_CNTL=0x%08X\n", + RREG32(mmVCE_LMI_SWAP_CNTL)); + dev_info(adev->dev, " VCE_LMI_SWAP_CNTL1=0x%08X\n", + RREG32(mmVCE_LMI_SWAP_CNTL1)); + dev_info(adev->dev, " VCE_LMI_CACHE_CTRL=0x%08X\n", + RREG32(mmVCE_LMI_CACHE_CTRL)); +} + +static int vce_v3_0_set_interrupt_state(struct amdgpu_device *adev, + struct amdgpu_irq_src *source, + unsigned type, + enum amdgpu_interrupt_state state) +{ + uint32_t val = 0; + + if (state == AMDGPU_IRQ_STATE_ENABLE) + val |= VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK; + + WREG32_P(mmVCE_SYS_INT_EN, val, ~VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK); + return 0; +} + +static int vce_v3_0_process_interrupt(struct amdgpu_device *adev, + struct amdgpu_irq_src *source, + struct amdgpu_iv_entry *entry) +{ + DRM_DEBUG("IH: VCE\n"); + switch (entry->src_data) { + case 0: + amdgpu_fence_process(&adev->vce.ring[0]); + break; + case 1: + amdgpu_fence_process(&adev->vce.ring[1]); + break; + default: + DRM_ERROR("Unhandled interrupt: %d %d\n", + entry->src_id, entry->src_data); + break; + } + + return 0; +} + +static int vce_v3_0_set_clockgating_state(struct amdgpu_device *adev, + enum amdgpu_clockgating_state state) +{ + //TODO + return 0; +} + +static int vce_v3_0_set_powergating_state(struct amdgpu_device *adev, + enum amdgpu_powergating_state state) +{ + /* This doesn't actually powergate the VCE block. + * That's done in the dpm code via the SMC. This + * just re-inits the block as necessary. The actual + * gating still happens in the dpm code. We should + * revisit this when there is a cleaner line between + * the smc and the hw blocks + */ + if (state == AMDGPU_PG_STATE_GATE) + /* XXX do we need a vce_v3_0_stop()? */ + return 0; + else + return vce_v3_0_start(adev); +} + +const struct amdgpu_ip_funcs vce_v3_0_ip_funcs = { + .early_init = vce_v3_0_early_init, + .late_init = NULL, + .sw_init = vce_v3_0_sw_init, + .sw_fini = vce_v3_0_sw_fini, + .hw_init = vce_v3_0_hw_init, + .hw_fini = vce_v3_0_hw_fini, + .suspend = vce_v3_0_suspend, + .resume = vce_v3_0_resume, + .is_idle = vce_v3_0_is_idle, + .wait_for_idle = vce_v3_0_wait_for_idle, + .soft_reset = vce_v3_0_soft_reset, + .print_status = vce_v3_0_print_status, + .set_clockgating_state = vce_v3_0_set_clockgating_state, + .set_powergating_state = vce_v3_0_set_powergating_state, +}; + +static const struct amdgpu_ring_funcs vce_v3_0_ring_funcs = { + .get_rptr = vce_v3_0_ring_get_rptr, + .get_wptr = vce_v3_0_ring_get_wptr, + .set_wptr = vce_v3_0_ring_set_wptr, + .parse_cs = amdgpu_vce_ring_parse_cs, + .emit_ib = amdgpu_vce_ring_emit_ib, + .emit_fence = amdgpu_vce_ring_emit_fence, + .emit_semaphore = amdgpu_vce_ring_emit_semaphore, + .test_ring = amdgpu_vce_ring_test_ring, + .test_ib = amdgpu_vce_ring_test_ib, + .is_lockup = amdgpu_ring_test_lockup, +}; + +static void vce_v3_0_set_ring_funcs(struct amdgpu_device *adev) +{ + adev->vce.ring[0].funcs = &vce_v3_0_ring_funcs; + adev->vce.ring[1].funcs = &vce_v3_0_ring_funcs; +} + +static const struct amdgpu_irq_src_funcs vce_v3_0_irq_funcs = { + .set = vce_v3_0_set_interrupt_state, + .process = vce_v3_0_process_interrupt, +}; + +static void vce_v3_0_set_irq_funcs(struct amdgpu_device *adev) +{ + adev->vce.irq.num_types = 1; + adev->vce.irq.funcs = &vce_v3_0_irq_funcs; +}; diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.h b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.h new file mode 100644 index 000000000000..f3c2ba92a1f1 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.h @@ -0,0 +1,29 @@ +/* + * Copyright 2014 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef __VCE_V3_0_H__ +#define __VCE_V3_0_H__ + +extern const struct amdgpu_ip_funcs vce_v3_0_ip_funcs; + +#endif diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c new file mode 100644 index 000000000000..20a159803983 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/vi.c @@ -0,0 +1,1373 @@ +/* + * Copyright 2014 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#include +#include +#include +#include "drmP.h" +#include "amdgpu.h" +#include "amdgpu_atombios.h" +#include "amdgpu_ih.h" +#include "amdgpu_uvd.h" +#include "amdgpu_vce.h" +#include "amdgpu_ucode.h" +#include "atom.h" + +#include "gmc/gmc_8_1_d.h" +#include "gmc/gmc_8_1_sh_mask.h" + +#include "oss/oss_3_0_d.h" +#include "oss/oss_3_0_sh_mask.h" + +#include "bif/bif_5_0_d.h" +#include "bif/bif_5_0_sh_mask.h" + +#include "gca/gfx_8_0_d.h" +#include "gca/gfx_8_0_sh_mask.h" + +#include "smu/smu_7_1_1_d.h" +#include "smu/smu_7_1_1_sh_mask.h" + +#include "uvd/uvd_5_0_d.h" +#include "uvd/uvd_5_0_sh_mask.h" + +#include "vce/vce_3_0_d.h" +#include "vce/vce_3_0_sh_mask.h" + +#include "dce/dce_10_0_d.h" +#include "dce/dce_10_0_sh_mask.h" + +#include "vid.h" +#include "vi.h" +#include "vi_dpm.h" +#include "gmc_v8_0.h" +#include "gfx_v8_0.h" +#include "sdma_v2_4.h" +#include "sdma_v3_0.h" +#include "dce_v10_0.h" +#include "dce_v11_0.h" +#include "iceland_ih.h" +#include "tonga_ih.h" +#include "cz_ih.h" +#include "uvd_v5_0.h" +#include "uvd_v6_0.h" +#include "vce_v3_0.h" + +/* + * Indirect registers accessor + */ +static u32 vi_pcie_rreg(struct amdgpu_device *adev, u32 reg) +{ + unsigned long flags; + u32 r; + + spin_lock_irqsave(&adev->pcie_idx_lock, flags); + WREG32(mmPCIE_INDEX, reg); + (void)RREG32(mmPCIE_INDEX); + r = RREG32(mmPCIE_DATA); + spin_unlock_irqrestore(&adev->pcie_idx_lock, flags); + return r; +} + +static void vi_pcie_wreg(struct amdgpu_device *adev, u32 reg, u32 v) +{ + unsigned long flags; + + spin_lock_irqsave(&adev->pcie_idx_lock, flags); + WREG32(mmPCIE_INDEX, reg); + (void)RREG32(mmPCIE_INDEX); + WREG32(mmPCIE_DATA, v); + (void)RREG32(mmPCIE_DATA); + spin_unlock_irqrestore(&adev->pcie_idx_lock, flags); +} + +static u32 vi_smc_rreg(struct amdgpu_device *adev, u32 reg) +{ + unsigned long flags; + u32 r; + + spin_lock_irqsave(&adev->smc_idx_lock, flags); + WREG32(mmSMC_IND_INDEX_0, (reg)); + r = RREG32(mmSMC_IND_DATA_0); + spin_unlock_irqrestore(&adev->smc_idx_lock, flags); + return r; +} + +static void vi_smc_wreg(struct amdgpu_device *adev, u32 reg, u32 v) +{ + unsigned long flags; + + spin_lock_irqsave(&adev->smc_idx_lock, flags); + WREG32(mmSMC_IND_INDEX_0, (reg)); + WREG32(mmSMC_IND_DATA_0, (v)); + spin_unlock_irqrestore(&adev->smc_idx_lock, flags); +} + +static u32 vi_uvd_ctx_rreg(struct amdgpu_device *adev, u32 reg) +{ + unsigned long flags; + u32 r; + + spin_lock_irqsave(&adev->uvd_ctx_idx_lock, flags); + WREG32(mmUVD_CTX_INDEX, ((reg) & 0x1ff)); + r = RREG32(mmUVD_CTX_DATA); + spin_unlock_irqrestore(&adev->uvd_ctx_idx_lock, flags); + return r; +} + +static void vi_uvd_ctx_wreg(struct amdgpu_device *adev, u32 reg, u32 v) +{ + unsigned long flags; + + spin_lock_irqsave(&adev->uvd_ctx_idx_lock, flags); + WREG32(mmUVD_CTX_INDEX, ((reg) & 0x1ff)); + WREG32(mmUVD_CTX_DATA, (v)); + spin_unlock_irqrestore(&adev->uvd_ctx_idx_lock, flags); +} + +static u32 vi_didt_rreg(struct amdgpu_device *adev, u32 reg) +{ + unsigned long flags; + u32 r; + + spin_lock_irqsave(&adev->didt_idx_lock, flags); + WREG32(mmDIDT_IND_INDEX, (reg)); + r = RREG32(mmDIDT_IND_DATA); + spin_unlock_irqrestore(&adev->didt_idx_lock, flags); + return r; +} + +static void vi_didt_wreg(struct amdgpu_device *adev, u32 reg, u32 v) +{ + unsigned long flags; + + spin_lock_irqsave(&adev->didt_idx_lock, flags); + WREG32(mmDIDT_IND_INDEX, (reg)); + WREG32(mmDIDT_IND_DATA, (v)); + spin_unlock_irqrestore(&adev->didt_idx_lock, flags); +} + +static const u32 tonga_mgcg_cgcg_init[] = +{ + mmCGTT_DRM_CLK_CTRL0, 0xffffffff, 0x00600100, + mmPCIE_INDEX, 0xffffffff, 0x0140001c, + mmPCIE_DATA, 0x000f0000, 0x00000000, + mmSMC_IND_INDEX_4, 0xffffffff, 0xC060000C, + mmSMC_IND_DATA_4, 0xc0000fff, 0x00000100, + mmXDMA_CLOCK_GATING_CNTL, 0xffffffff, 0x00000100, + mmXDMA_MEM_POWER_CNTL, 0x00000101, 0x00000000, + mmCGTT_DRM_CLK_CTRL0, 0xff000fff, 0x00000100, + mmHDP_XDP_CGTT_BLK_CTRL, 0xc0000fff, 0x00000104, +}; + +static const u32 iceland_mgcg_cgcg_init[] = +{ + mmPCIE_INDEX, 0xffffffff, ixPCIE_CNTL2, + mmPCIE_DATA, 0x000f0000, 0x00000000, + mmSMC_IND_INDEX_4, 0xffffffff, ixCGTT_ROM_CLK_CTRL0, + mmSMC_IND_DATA_4, 0xc0000fff, 0x00000100, + mmHDP_XDP_CGTT_BLK_CTRL, 0xc0000fff, 0x00000104, +}; + +static const u32 cz_mgcg_cgcg_init[] = +{ + mmCGTT_DRM_CLK_CTRL0, 0xffffffff, 0x00600100, + mmPCIE_INDEX, 0xffffffff, 0x0140001c, + mmPCIE_DATA, 0x000f0000, 0x00000000, + mmXDMA_CLOCK_GATING_CNTL, 0xffffffff, 0x00000100, + mmXDMA_MEM_POWER_CNTL, 0x00000101, 0x00000000, + mmCGTT_DRM_CLK_CTRL0, 0xff000fff, 0x00000100, + mmHDP_XDP_CGTT_BLK_CTRL, 0xc0000fff, 0x00000104, +}; + +static void vi_init_golden_registers(struct amdgpu_device *adev) +{ + /* Some of the registers might be dependent on GRBM_GFX_INDEX */ + mutex_lock(&adev->grbm_idx_mutex); + + switch (adev->asic_type) { + case CHIP_TOPAZ: + amdgpu_program_register_sequence(adev, + iceland_mgcg_cgcg_init, + (const u32)ARRAY_SIZE(iceland_mgcg_cgcg_init)); + break; + case CHIP_TONGA: + amdgpu_program_register_sequence(adev, + tonga_mgcg_cgcg_init, + (const u32)ARRAY_SIZE(tonga_mgcg_cgcg_init)); + break; + case CHIP_CARRIZO: + amdgpu_program_register_sequence(adev, + cz_mgcg_cgcg_init, + (const u32)ARRAY_SIZE(cz_mgcg_cgcg_init)); + break; + default: + break; + } + mutex_unlock(&adev->grbm_idx_mutex); +} + +/** + * vi_get_xclk - get the xclk + * + * @adev: amdgpu_device pointer + * + * Returns the reference clock used by the gfx engine + * (VI). + */ +static u32 vi_get_xclk(struct amdgpu_device *adev) +{ + u32 reference_clock = adev->clock.spll.reference_freq; + u32 tmp; + + if (adev->flags & AMDGPU_IS_APU) + return reference_clock; + + tmp = RREG32_SMC(ixCG_CLKPIN_CNTL_2); + if (REG_GET_FIELD(tmp, CG_CLKPIN_CNTL_2, MUX_TCLK_TO_XCLK)) + return 1000; + + tmp = RREG32_SMC(ixCG_CLKPIN_CNTL); + if (REG_GET_FIELD(tmp, CG_CLKPIN_CNTL, XTALIN_DIVIDE)) + return reference_clock / 4; + + return reference_clock; +} + +/** + * vi_srbm_select - select specific register instances + * + * @adev: amdgpu_device pointer + * @me: selected ME (micro engine) + * @pipe: pipe + * @queue: queue + * @vmid: VMID + * + * Switches the currently active registers instances. Some + * registers are instanced per VMID, others are instanced per + * me/pipe/queue combination. + */ +void vi_srbm_select(struct amdgpu_device *adev, + u32 me, u32 pipe, u32 queue, u32 vmid) +{ + u32 srbm_gfx_cntl = 0; + srbm_gfx_cntl = REG_SET_FIELD(srbm_gfx_cntl, SRBM_GFX_CNTL, PIPEID, pipe); + srbm_gfx_cntl = REG_SET_FIELD(srbm_gfx_cntl, SRBM_GFX_CNTL, MEID, me); + srbm_gfx_cntl = REG_SET_FIELD(srbm_gfx_cntl, SRBM_GFX_CNTL, VMID, vmid); + srbm_gfx_cntl = REG_SET_FIELD(srbm_gfx_cntl, SRBM_GFX_CNTL, QUEUEID, queue); + WREG32(mmSRBM_GFX_CNTL, srbm_gfx_cntl); +} + +static void vi_vga_set_state(struct amdgpu_device *adev, bool state) +{ + /* todo */ +} + +static bool vi_read_disabled_bios(struct amdgpu_device *adev) +{ + u32 bus_cntl; + u32 d1vga_control = 0; + u32 d2vga_control = 0; + u32 vga_render_control = 0; + u32 rom_cntl; + bool r; + + bus_cntl = RREG32(mmBUS_CNTL); + if (adev->mode_info.num_crtc) { + d1vga_control = RREG32(mmD1VGA_CONTROL); + d2vga_control = RREG32(mmD2VGA_CONTROL); + vga_render_control = RREG32(mmVGA_RENDER_CONTROL); + } + rom_cntl = RREG32_SMC(ixROM_CNTL); + + /* enable the rom */ + WREG32(mmBUS_CNTL, (bus_cntl & ~BUS_CNTL__BIOS_ROM_DIS_MASK)); + if (adev->mode_info.num_crtc) { + /* Disable VGA mode */ + WREG32(mmD1VGA_CONTROL, + (d1vga_control & ~(D1VGA_CONTROL__D1VGA_MODE_ENABLE_MASK | + D1VGA_CONTROL__D1VGA_TIMING_SELECT_MASK))); + WREG32(mmD2VGA_CONTROL, + (d2vga_control & ~(D2VGA_CONTROL__D2VGA_MODE_ENABLE_MASK | + D2VGA_CONTROL__D2VGA_TIMING_SELECT_MASK))); + WREG32(mmVGA_RENDER_CONTROL, + (vga_render_control & ~VGA_RENDER_CONTROL__VGA_VSTATUS_CNTL_MASK)); + } + WREG32_SMC(ixROM_CNTL, rom_cntl | ROM_CNTL__SCK_OVERWRITE_MASK); + + r = amdgpu_read_bios(adev); + + /* restore regs */ + WREG32(mmBUS_CNTL, bus_cntl); + if (adev->mode_info.num_crtc) { + WREG32(mmD1VGA_CONTROL, d1vga_control); + WREG32(mmD2VGA_CONTROL, d2vga_control); + WREG32(mmVGA_RENDER_CONTROL, vga_render_control); + } + WREG32_SMC(ixROM_CNTL, rom_cntl); + return r; +} +static struct amdgpu_allowed_register_entry tonga_allowed_read_registers[] = { + {mmGB_MACROTILE_MODE7, true}, +}; + +static struct amdgpu_allowed_register_entry cz_allowed_read_registers[] = { + {mmGB_TILE_MODE7, true}, + {mmGB_TILE_MODE12, true}, + {mmGB_TILE_MODE17, true}, + {mmGB_TILE_MODE23, true}, + {mmGB_MACROTILE_MODE7, true}, +}; + +static struct amdgpu_allowed_register_entry vi_allowed_read_registers[] = { + {mmGRBM_STATUS, false}, + {mmGB_ADDR_CONFIG, false}, + {mmMC_ARB_RAMCFG, false}, + {mmGB_TILE_MODE0, false}, + {mmGB_TILE_MODE1, false}, + {mmGB_TILE_MODE2, false}, + {mmGB_TILE_MODE3, false}, + {mmGB_TILE_MODE4, false}, + {mmGB_TILE_MODE5, false}, + {mmGB_TILE_MODE6, false}, + {mmGB_TILE_MODE7, false}, + {mmGB_TILE_MODE8, false}, + {mmGB_TILE_MODE9, false}, + {mmGB_TILE_MODE10, false}, + {mmGB_TILE_MODE11, false}, + {mmGB_TILE_MODE12, false}, + {mmGB_TILE_MODE13, false}, + {mmGB_TILE_MODE14, false}, + {mmGB_TILE_MODE15, false}, + {mmGB_TILE_MODE16, false}, + {mmGB_TILE_MODE17, false}, + {mmGB_TILE_MODE18, false}, + {mmGB_TILE_MODE19, false}, + {mmGB_TILE_MODE20, false}, + {mmGB_TILE_MODE21, false}, + {mmGB_TILE_MODE22, false}, + {mmGB_TILE_MODE23, false}, + {mmGB_TILE_MODE24, false}, + {mmGB_TILE_MODE25, false}, + {mmGB_TILE_MODE26, false}, + {mmGB_TILE_MODE27, false}, + {mmGB_TILE_MODE28, false}, + {mmGB_TILE_MODE29, false}, + {mmGB_TILE_MODE30, false}, + {mmGB_TILE_MODE31, false}, + {mmGB_MACROTILE_MODE0, false}, + {mmGB_MACROTILE_MODE1, false}, + {mmGB_MACROTILE_MODE2, false}, + {mmGB_MACROTILE_MODE3, false}, + {mmGB_MACROTILE_MODE4, false}, + {mmGB_MACROTILE_MODE5, false}, + {mmGB_MACROTILE_MODE6, false}, + {mmGB_MACROTILE_MODE7, false}, + {mmGB_MACROTILE_MODE8, false}, + {mmGB_MACROTILE_MODE9, false}, + {mmGB_MACROTILE_MODE10, false}, + {mmGB_MACROTILE_MODE11, false}, + {mmGB_MACROTILE_MODE12, false}, + {mmGB_MACROTILE_MODE13, false}, + {mmGB_MACROTILE_MODE14, false}, + {mmGB_MACROTILE_MODE15, false}, + {mmCC_RB_BACKEND_DISABLE, false, true}, + {mmGC_USER_RB_BACKEND_DISABLE, false, true}, + {mmGB_BACKEND_MAP, false, false}, + {mmPA_SC_RASTER_CONFIG, false, true}, + {mmPA_SC_RASTER_CONFIG_1, false, true}, +}; + +static uint32_t vi_read_indexed_register(struct amdgpu_device *adev, u32 se_num, + u32 sh_num, u32 reg_offset) +{ + uint32_t val; + + mutex_lock(&adev->grbm_idx_mutex); + if (se_num != 0xffffffff || sh_num != 0xffffffff) + gfx_v8_0_select_se_sh(adev, se_num, sh_num); + + val = RREG32(reg_offset); + + if (se_num != 0xffffffff || sh_num != 0xffffffff) + gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff); + mutex_unlock(&adev->grbm_idx_mutex); + return val; +} + +static int vi_read_register(struct amdgpu_device *adev, u32 se_num, + u32 sh_num, u32 reg_offset, u32 *value) +{ + struct amdgpu_allowed_register_entry *asic_register_table = NULL; + struct amdgpu_allowed_register_entry *asic_register_entry; + uint32_t size, i; + + *value = 0; + switch (adev->asic_type) { + case CHIP_TOPAZ: + asic_register_table = tonga_allowed_read_registers; + size = ARRAY_SIZE(tonga_allowed_read_registers); + break; + case CHIP_TONGA: + case CHIP_CARRIZO: + asic_register_table = cz_allowed_read_registers; + size = ARRAY_SIZE(cz_allowed_read_registers); + break; + default: + return -EINVAL; + } + + if (asic_register_table) { + for (i = 0; i < size; i++) { + asic_register_entry = asic_register_table + i; + if (reg_offset != asic_register_entry->reg_offset) + continue; + if (!asic_register_entry->untouched) + *value = asic_register_entry->grbm_indexed ? + vi_read_indexed_register(adev, se_num, + sh_num, reg_offset) : + RREG32(reg_offset); + return 0; + } + } + + for (i = 0; i < ARRAY_SIZE(vi_allowed_read_registers); i++) { + if (reg_offset != vi_allowed_read_registers[i].reg_offset) + continue; + + if (!vi_allowed_read_registers[i].untouched) + *value = vi_allowed_read_registers[i].grbm_indexed ? + vi_read_indexed_register(adev, se_num, + sh_num, reg_offset) : + RREG32(reg_offset); + return 0; + } + return -EINVAL; +} + +static void vi_print_gpu_status_regs(struct amdgpu_device *adev) +{ + dev_info(adev->dev, " GRBM_STATUS=0x%08X\n", + RREG32(mmGRBM_STATUS)); + dev_info(adev->dev, " GRBM_STATUS2=0x%08X\n", + RREG32(mmGRBM_STATUS2)); + dev_info(adev->dev, " GRBM_STATUS_SE0=0x%08X\n", + RREG32(mmGRBM_STATUS_SE0)); + dev_info(adev->dev, " GRBM_STATUS_SE1=0x%08X\n", + RREG32(mmGRBM_STATUS_SE1)); + dev_info(adev->dev, " GRBM_STATUS_SE2=0x%08X\n", + RREG32(mmGRBM_STATUS_SE2)); + dev_info(adev->dev, " GRBM_STATUS_SE3=0x%08X\n", + RREG32(mmGRBM_STATUS_SE3)); + dev_info(adev->dev, " SRBM_STATUS=0x%08X\n", + RREG32(mmSRBM_STATUS)); + dev_info(adev->dev, " SRBM_STATUS2=0x%08X\n", + RREG32(mmSRBM_STATUS2)); + dev_info(adev->dev, " SDMA0_STATUS_REG = 0x%08X\n", + RREG32(mmSDMA0_STATUS_REG + SDMA0_REGISTER_OFFSET)); + dev_info(adev->dev, " SDMA1_STATUS_REG = 0x%08X\n", + RREG32(mmSDMA0_STATUS_REG + SDMA1_REGISTER_OFFSET)); + dev_info(adev->dev, " CP_STAT = 0x%08x\n", RREG32(mmCP_STAT)); + dev_info(adev->dev, " CP_STALLED_STAT1 = 0x%08x\n", + RREG32(mmCP_STALLED_STAT1)); + dev_info(adev->dev, " CP_STALLED_STAT2 = 0x%08x\n", + RREG32(mmCP_STALLED_STAT2)); + dev_info(adev->dev, " CP_STALLED_STAT3 = 0x%08x\n", + RREG32(mmCP_STALLED_STAT3)); + dev_info(adev->dev, " CP_CPF_BUSY_STAT = 0x%08x\n", + RREG32(mmCP_CPF_BUSY_STAT)); + dev_info(adev->dev, " CP_CPF_STALLED_STAT1 = 0x%08x\n", + RREG32(mmCP_CPF_STALLED_STAT1)); + dev_info(adev->dev, " CP_CPF_STATUS = 0x%08x\n", RREG32(mmCP_CPF_STATUS)); + dev_info(adev->dev, " CP_CPC_BUSY_STAT = 0x%08x\n", RREG32(mmCP_CPC_BUSY_STAT)); + dev_info(adev->dev, " CP_CPC_STALLED_STAT1 = 0x%08x\n", + RREG32(mmCP_CPC_STALLED_STAT1)); + dev_info(adev->dev, " CP_CPC_STATUS = 0x%08x\n", RREG32(mmCP_CPC_STATUS)); +} + +/** + * vi_gpu_check_soft_reset - check which blocks are busy + * + * @adev: amdgpu_device pointer + * + * Check which blocks are busy and return the relevant reset + * mask to be used by vi_gpu_soft_reset(). + * Returns a mask of the blocks to be reset. + */ +u32 vi_gpu_check_soft_reset(struct amdgpu_device *adev) +{ + u32 reset_mask = 0; + u32 tmp; + + /* GRBM_STATUS */ + tmp = RREG32(mmGRBM_STATUS); + if (tmp & (GRBM_STATUS__PA_BUSY_MASK | GRBM_STATUS__SC_BUSY_MASK | + GRBM_STATUS__BCI_BUSY_MASK | GRBM_STATUS__SX_BUSY_MASK | + GRBM_STATUS__TA_BUSY_MASK | GRBM_STATUS__VGT_BUSY_MASK | + GRBM_STATUS__DB_BUSY_MASK | GRBM_STATUS__CB_BUSY_MASK | + GRBM_STATUS__GDS_BUSY_MASK | GRBM_STATUS__SPI_BUSY_MASK | + GRBM_STATUS__IA_BUSY_MASK | GRBM_STATUS__IA_BUSY_NO_DMA_MASK)) + reset_mask |= AMDGPU_RESET_GFX; + + if (tmp & (GRBM_STATUS__CP_BUSY_MASK | GRBM_STATUS__CP_COHERENCY_BUSY_MASK)) + reset_mask |= AMDGPU_RESET_CP; + + /* GRBM_STATUS2 */ + tmp = RREG32(mmGRBM_STATUS2); + if (tmp & GRBM_STATUS2__RLC_BUSY_MASK) + reset_mask |= AMDGPU_RESET_RLC; + + if (tmp & (GRBM_STATUS2__CPF_BUSY_MASK | + GRBM_STATUS2__CPC_BUSY_MASK | + GRBM_STATUS2__CPG_BUSY_MASK)) + reset_mask |= AMDGPU_RESET_CP; + + /* SRBM_STATUS2 */ + tmp = RREG32(mmSRBM_STATUS2); + if (tmp & SRBM_STATUS2__SDMA_BUSY_MASK) + reset_mask |= AMDGPU_RESET_DMA; + + if (tmp & SRBM_STATUS2__SDMA1_BUSY_MASK) + reset_mask |= AMDGPU_RESET_DMA1; + + /* SRBM_STATUS */ + tmp = RREG32(mmSRBM_STATUS); + + if (tmp & SRBM_STATUS__IH_BUSY_MASK) + reset_mask |= AMDGPU_RESET_IH; + + if (tmp & SRBM_STATUS__SEM_BUSY_MASK) + reset_mask |= AMDGPU_RESET_SEM; + + if (tmp & SRBM_STATUS__GRBM_RQ_PENDING_MASK) + reset_mask |= AMDGPU_RESET_GRBM; + + if (adev->asic_type != CHIP_TOPAZ) { + if (tmp & (SRBM_STATUS__UVD_RQ_PENDING_MASK | + SRBM_STATUS__UVD_BUSY_MASK)) + reset_mask |= AMDGPU_RESET_UVD; + } + + if (tmp & SRBM_STATUS__VMC_BUSY_MASK) + reset_mask |= AMDGPU_RESET_VMC; + + if (tmp & (SRBM_STATUS__MCB_BUSY_MASK | SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK | + SRBM_STATUS__MCC_BUSY_MASK | SRBM_STATUS__MCD_BUSY_MASK)) + reset_mask |= AMDGPU_RESET_MC; + + /* SDMA0_STATUS_REG */ + tmp = RREG32(mmSDMA0_STATUS_REG + SDMA0_REGISTER_OFFSET); + if (!(tmp & SDMA0_STATUS_REG__IDLE_MASK)) + reset_mask |= AMDGPU_RESET_DMA; + + /* SDMA1_STATUS_REG */ + tmp = RREG32(mmSDMA0_STATUS_REG + SDMA1_REGISTER_OFFSET); + if (!(tmp & SDMA0_STATUS_REG__IDLE_MASK)) + reset_mask |= AMDGPU_RESET_DMA1; +#if 0 + /* VCE_STATUS */ + if (adev->asic_type != CHIP_TOPAZ) { + tmp = RREG32(mmVCE_STATUS); + if (tmp & VCE_STATUS__VCPU_REPORT_RB0_BUSY_MASK) + reset_mask |= AMDGPU_RESET_VCE; + if (tmp & VCE_STATUS__VCPU_REPORT_RB1_BUSY_MASK) + reset_mask |= AMDGPU_RESET_VCE1; + + } + + if (adev->asic_type != CHIP_TOPAZ) { + if (amdgpu_display_is_display_hung(adev)) + reset_mask |= AMDGPU_RESET_DISPLAY; + } +#endif + + /* Skip MC reset as it's mostly likely not hung, just busy */ + if (reset_mask & AMDGPU_RESET_MC) { + DRM_DEBUG("MC busy: 0x%08X, clearing.\n", reset_mask); + reset_mask &= ~AMDGPU_RESET_MC; + } + + return reset_mask; +} + +/** + * vi_gpu_soft_reset - soft reset GPU + * + * @adev: amdgpu_device pointer + * @reset_mask: mask of which blocks to reset + * + * Soft reset the blocks specified in @reset_mask. + */ +static void vi_gpu_soft_reset(struct amdgpu_device *adev, u32 reset_mask) +{ + struct amdgpu_mode_mc_save save; + u32 grbm_soft_reset = 0, srbm_soft_reset = 0; + u32 tmp; + + if (reset_mask == 0) + return; + + dev_info(adev->dev, "GPU softreset: 0x%08X\n", reset_mask); + + vi_print_gpu_status_regs(adev); + dev_info(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n", + RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_ADDR)); + dev_info(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n", + RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_STATUS)); + + /* disable CG/PG */ + + /* stop the rlc */ + //XXX + //gfx_v8_0_rlc_stop(adev); + + /* Disable GFX parsing/prefetching */ + tmp = RREG32(mmCP_ME_CNTL); + tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, ME_HALT, 1); + tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_HALT, 1); + tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, CE_HALT, 1); + WREG32(mmCP_ME_CNTL, tmp); + + /* Disable MEC parsing/prefetching */ + tmp = RREG32(mmCP_MEC_CNTL); + tmp = REG_SET_FIELD(tmp, CP_MEC_CNTL, MEC_ME1_HALT, 1); + tmp = REG_SET_FIELD(tmp, CP_MEC_CNTL, MEC_ME2_HALT, 1); + WREG32(mmCP_MEC_CNTL, tmp); + + if (reset_mask & AMDGPU_RESET_DMA) { + /* sdma0 */ + tmp = RREG32(mmSDMA0_F32_CNTL + SDMA0_REGISTER_OFFSET); + tmp = REG_SET_FIELD(tmp, SDMA0_F32_CNTL, HALT, 1); + WREG32(mmSDMA0_F32_CNTL + SDMA0_REGISTER_OFFSET, tmp); + } + if (reset_mask & AMDGPU_RESET_DMA1) { + /* sdma1 */ + tmp = RREG32(mmSDMA0_F32_CNTL + SDMA1_REGISTER_OFFSET); + tmp = REG_SET_FIELD(tmp, SDMA0_F32_CNTL, HALT, 1); + WREG32(mmSDMA0_F32_CNTL + SDMA1_REGISTER_OFFSET, tmp); + } + + gmc_v8_0_mc_stop(adev, &save); + if (amdgpu_asic_wait_for_mc_idle(adev)) { + dev_warn(adev->dev, "Wait for MC idle timedout !\n"); + } + + if (reset_mask & (AMDGPU_RESET_GFX | AMDGPU_RESET_COMPUTE | AMDGPU_RESET_CP)) { + grbm_soft_reset = + REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_CP, 1); + grbm_soft_reset = + REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_GFX, 1); + } + + if (reset_mask & AMDGPU_RESET_CP) { + grbm_soft_reset = + REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_CP, 1); + srbm_soft_reset = + REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_GRBM, 1); + } + + if (reset_mask & AMDGPU_RESET_DMA) + srbm_soft_reset = + REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_SDMA, 1); + + if (reset_mask & AMDGPU_RESET_DMA1) + srbm_soft_reset = + REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_SDMA1, 1); + + if (reset_mask & AMDGPU_RESET_DISPLAY) + srbm_soft_reset = + REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_DC, 1); + + if (reset_mask & AMDGPU_RESET_RLC) + grbm_soft_reset = + REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_RLC, 1); + + if (reset_mask & AMDGPU_RESET_SEM) + srbm_soft_reset = + REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_SEM, 1); + + if (reset_mask & AMDGPU_RESET_IH) + srbm_soft_reset = + REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_IH, 1); + + if (reset_mask & AMDGPU_RESET_GRBM) + srbm_soft_reset = + REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_GRBM, 1); + + if (reset_mask & AMDGPU_RESET_VMC) + srbm_soft_reset = + REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VMC, 1); + + if (reset_mask & AMDGPU_RESET_UVD) + srbm_soft_reset = + REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_UVD, 1); + + if (reset_mask & AMDGPU_RESET_VCE) + srbm_soft_reset = + REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE0, 1); + + if (reset_mask & AMDGPU_RESET_VCE) + srbm_soft_reset = + REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE1, 1); + + if (!(adev->flags & AMDGPU_IS_APU)) { + if (reset_mask & AMDGPU_RESET_MC) + srbm_soft_reset = + REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_MC, 1); + } + + if (grbm_soft_reset) { + tmp = RREG32(mmGRBM_SOFT_RESET); + tmp |= grbm_soft_reset; + dev_info(adev->dev, "GRBM_SOFT_RESET=0x%08X\n", tmp); + WREG32(mmGRBM_SOFT_RESET, tmp); + tmp = RREG32(mmGRBM_SOFT_RESET); + + udelay(50); + + tmp &= ~grbm_soft_reset; + WREG32(mmGRBM_SOFT_RESET, tmp); + tmp = RREG32(mmGRBM_SOFT_RESET); + } + + if (srbm_soft_reset) { + tmp = RREG32(mmSRBM_SOFT_RESET); + tmp |= srbm_soft_reset; + dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp); + WREG32(mmSRBM_SOFT_RESET, tmp); + tmp = RREG32(mmSRBM_SOFT_RESET); + + udelay(50); + + tmp &= ~srbm_soft_reset; + WREG32(mmSRBM_SOFT_RESET, tmp); + tmp = RREG32(mmSRBM_SOFT_RESET); + } + + /* Wait a little for things to settle down */ + udelay(50); + + gmc_v8_0_mc_resume(adev, &save); + udelay(50); + + vi_print_gpu_status_regs(adev); +} + +static void vi_gpu_pci_config_reset(struct amdgpu_device *adev) +{ + struct amdgpu_mode_mc_save save; + u32 tmp, i; + + dev_info(adev->dev, "GPU pci config reset\n"); + + /* disable dpm? */ + + /* disable cg/pg */ + + /* Disable GFX parsing/prefetching */ + tmp = RREG32(mmCP_ME_CNTL); + tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, ME_HALT, 1); + tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_HALT, 1); + tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, CE_HALT, 1); + WREG32(mmCP_ME_CNTL, tmp); + + /* Disable MEC parsing/prefetching */ + tmp = RREG32(mmCP_MEC_CNTL); + tmp = REG_SET_FIELD(tmp, CP_MEC_CNTL, MEC_ME1_HALT, 1); + tmp = REG_SET_FIELD(tmp, CP_MEC_CNTL, MEC_ME2_HALT, 1); + WREG32(mmCP_MEC_CNTL, tmp); + + /* Disable GFX parsing/prefetching */ + WREG32(mmCP_ME_CNTL, CP_ME_CNTL__ME_HALT_MASK | + CP_ME_CNTL__PFP_HALT_MASK | CP_ME_CNTL__CE_HALT_MASK); + + /* Disable MEC parsing/prefetching */ + WREG32(mmCP_MEC_CNTL, + CP_MEC_CNTL__MEC_ME1_HALT_MASK | CP_MEC_CNTL__MEC_ME2_HALT_MASK); + + /* sdma0 */ + tmp = RREG32(mmSDMA0_F32_CNTL + SDMA0_REGISTER_OFFSET); + tmp = REG_SET_FIELD(tmp, SDMA0_F32_CNTL, HALT, 1); + WREG32(mmSDMA0_F32_CNTL + SDMA0_REGISTER_OFFSET, tmp); + + /* sdma1 */ + tmp = RREG32(mmSDMA0_F32_CNTL + SDMA1_REGISTER_OFFSET); + tmp = REG_SET_FIELD(tmp, SDMA0_F32_CNTL, HALT, 1); + WREG32(mmSDMA0_F32_CNTL + SDMA1_REGISTER_OFFSET, tmp); + + /* XXX other engines? */ + + /* halt the rlc, disable cp internal ints */ + //XXX + //gfx_v8_0_rlc_stop(adev); + + udelay(50); + + /* disable mem access */ + gmc_v8_0_mc_stop(adev, &save); + if (amdgpu_asic_wait_for_mc_idle(adev)) { + dev_warn(adev->dev, "Wait for MC idle timed out !\n"); + } + + /* disable BM */ + pci_clear_master(adev->pdev); + /* reset */ + amdgpu_pci_config_reset(adev); + + udelay(100); + + /* wait for asic to come out of reset */ + for (i = 0; i < adev->usec_timeout; i++) { + if (RREG32(mmCONFIG_MEMSIZE) != 0xffffffff) + break; + udelay(1); + } + +} + +static void vi_set_bios_scratch_engine_hung(struct amdgpu_device *adev, bool hung) +{ + u32 tmp = RREG32(mmBIOS_SCRATCH_3); + + if (hung) + tmp |= ATOM_S3_ASIC_GUI_ENGINE_HUNG; + else + tmp &= ~ATOM_S3_ASIC_GUI_ENGINE_HUNG; + + WREG32(mmBIOS_SCRATCH_3, tmp); +} + +/** + * vi_asic_reset - soft reset GPU + * + * @adev: amdgpu_device pointer + * + * Look up which blocks are hung and attempt + * to reset them. + * Returns 0 for success. + */ +static int vi_asic_reset(struct amdgpu_device *adev) +{ + u32 reset_mask; + + reset_mask = vi_gpu_check_soft_reset(adev); + + if (reset_mask) + vi_set_bios_scratch_engine_hung(adev, true); + + /* try soft reset */ + vi_gpu_soft_reset(adev, reset_mask); + + reset_mask = vi_gpu_check_soft_reset(adev); + + /* try pci config reset */ + if (reset_mask && amdgpu_hard_reset) + vi_gpu_pci_config_reset(adev); + + reset_mask = vi_gpu_check_soft_reset(adev); + + if (!reset_mask) + vi_set_bios_scratch_engine_hung(adev, false); + + return 0; +} + +static int vi_set_uvd_clock(struct amdgpu_device *adev, u32 clock, + u32 cntl_reg, u32 status_reg) +{ + int r, i; + struct atom_clock_dividers dividers; + uint32_t tmp; + + r = amdgpu_atombios_get_clock_dividers(adev, + COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK, + clock, false, ÷rs); + if (r) + return r; + + tmp = RREG32_SMC(cntl_reg); + tmp &= ~(CG_DCLK_CNTL__DCLK_DIR_CNTL_EN_MASK | + CG_DCLK_CNTL__DCLK_DIVIDER_MASK); + tmp |= dividers.post_divider; + WREG32_SMC(cntl_reg, tmp); + + for (i = 0; i < 100; i++) { + if (RREG32_SMC(status_reg) & CG_DCLK_STATUS__DCLK_STATUS_MASK) + break; + mdelay(10); + } + if (i == 100) + return -ETIMEDOUT; + + return 0; +} + +static int vi_set_uvd_clocks(struct amdgpu_device *adev, u32 vclk, u32 dclk) +{ + int r; + + r = vi_set_uvd_clock(adev, vclk, ixCG_VCLK_CNTL, ixCG_VCLK_STATUS); + if (r) + return r; + + r = vi_set_uvd_clock(adev, dclk, ixCG_DCLK_CNTL, ixCG_DCLK_STATUS); + + return 0; +} + +static int vi_set_vce_clocks(struct amdgpu_device *adev, u32 evclk, u32 ecclk) +{ + /* todo */ + + return 0; +} + +static void vi_pcie_gen3_enable(struct amdgpu_device *adev) +{ + u32 mask; + int ret; + + if (amdgpu_pcie_gen2 == 0) + return; + + if (adev->flags & AMDGPU_IS_APU) + return; + + ret = drm_pcie_get_speed_cap_mask(adev->ddev, &mask); + if (ret != 0) + return; + + if (!(mask & (DRM_PCIE_SPEED_50 | DRM_PCIE_SPEED_80))) + return; + + /* todo */ +} + +static void vi_program_aspm(struct amdgpu_device *adev) +{ + + if (amdgpu_aspm == 0) + return; + + /* todo */ +} + +static void vi_enable_doorbell_aperture(struct amdgpu_device *adev, + bool enable) +{ + u32 tmp; + + /* not necessary on CZ */ + if (adev->flags & AMDGPU_IS_APU) + return; + + tmp = RREG32(mmBIF_DOORBELL_APER_EN); + if (enable) + tmp = REG_SET_FIELD(tmp, BIF_DOORBELL_APER_EN, BIF_DOORBELL_APER_EN, 1); + else + tmp = REG_SET_FIELD(tmp, BIF_DOORBELL_APER_EN, BIF_DOORBELL_APER_EN, 0); + + WREG32(mmBIF_DOORBELL_APER_EN, tmp); +} + +/* topaz has no DCE, UVD, VCE */ +static const struct amdgpu_ip_block_version topaz_ip_blocks[] = +{ + /* ORDER MATTERS! */ + { + .type = AMDGPU_IP_BLOCK_TYPE_COMMON, + .major = 2, + .minor = 0, + .rev = 0, + .funcs = &vi_common_ip_funcs, + }, + { + .type = AMDGPU_IP_BLOCK_TYPE_GMC, + .major = 8, + .minor = 0, + .rev = 0, + .funcs = &gmc_v8_0_ip_funcs, + }, + { + .type = AMDGPU_IP_BLOCK_TYPE_IH, + .major = 2, + .minor = 4, + .rev = 0, + .funcs = &iceland_ih_ip_funcs, + }, + { + .type = AMDGPU_IP_BLOCK_TYPE_SMC, + .major = 7, + .minor = 1, + .rev = 0, + .funcs = &iceland_dpm_ip_funcs, + }, + { + .type = AMDGPU_IP_BLOCK_TYPE_GFX, + .major = 8, + .minor = 0, + .rev = 0, + .funcs = &gfx_v8_0_ip_funcs, + }, + { + .type = AMDGPU_IP_BLOCK_TYPE_SDMA, + .major = 2, + .minor = 4, + .rev = 0, + .funcs = &sdma_v2_4_ip_funcs, + }, +}; + +static const struct amdgpu_ip_block_version tonga_ip_blocks[] = +{ + /* ORDER MATTERS! */ + { + .type = AMDGPU_IP_BLOCK_TYPE_COMMON, + .major = 2, + .minor = 0, + .rev = 0, + .funcs = &vi_common_ip_funcs, + }, + { + .type = AMDGPU_IP_BLOCK_TYPE_GMC, + .major = 8, + .minor = 0, + .rev = 0, + .funcs = &gmc_v8_0_ip_funcs, + }, + { + .type = AMDGPU_IP_BLOCK_TYPE_IH, + .major = 3, + .minor = 0, + .rev = 0, + .funcs = &tonga_ih_ip_funcs, + }, + { + .type = AMDGPU_IP_BLOCK_TYPE_SMC, + .major = 7, + .minor = 1, + .rev = 0, + .funcs = &tonga_dpm_ip_funcs, + }, + { + .type = AMDGPU_IP_BLOCK_TYPE_DCE, + .major = 10, + .minor = 0, + .rev = 0, + .funcs = &dce_v10_0_ip_funcs, + }, + { + .type = AMDGPU_IP_BLOCK_TYPE_GFX, + .major = 8, + .minor = 0, + .rev = 0, + .funcs = &gfx_v8_0_ip_funcs, + }, + { + .type = AMDGPU_IP_BLOCK_TYPE_SDMA, + .major = 3, + .minor = 0, + .rev = 0, + .funcs = &sdma_v3_0_ip_funcs, + }, + { + .type = AMDGPU_IP_BLOCK_TYPE_UVD, + .major = 5, + .minor = 0, + .rev = 0, + .funcs = &uvd_v5_0_ip_funcs, + }, + { + .type = AMDGPU_IP_BLOCK_TYPE_VCE, + .major = 3, + .minor = 0, + .rev = 0, + .funcs = &vce_v3_0_ip_funcs, + }, +}; + +static const struct amdgpu_ip_block_version cz_ip_blocks[] = +{ + /* ORDER MATTERS! */ + { + .type = AMDGPU_IP_BLOCK_TYPE_COMMON, + .major = 2, + .minor = 0, + .rev = 0, + .funcs = &vi_common_ip_funcs, + }, + { + .type = AMDGPU_IP_BLOCK_TYPE_GMC, + .major = 8, + .minor = 0, + .rev = 0, + .funcs = &gmc_v8_0_ip_funcs, + }, + { + .type = AMDGPU_IP_BLOCK_TYPE_IH, + .major = 3, + .minor = 0, + .rev = 0, + .funcs = &cz_ih_ip_funcs, + }, + { + .type = AMDGPU_IP_BLOCK_TYPE_SMC, + .major = 8, + .minor = 0, + .rev = 0, + .funcs = &cz_dpm_ip_funcs, + }, + { + .type = AMDGPU_IP_BLOCK_TYPE_DCE, + .major = 11, + .minor = 0, + .rev = 0, + .funcs = &dce_v11_0_ip_funcs, + }, + { + .type = AMDGPU_IP_BLOCK_TYPE_GFX, + .major = 8, + .minor = 0, + .rev = 0, + .funcs = &gfx_v8_0_ip_funcs, + }, + { + .type = AMDGPU_IP_BLOCK_TYPE_SDMA, + .major = 3, + .minor = 0, + .rev = 0, + .funcs = &sdma_v3_0_ip_funcs, + }, + { + .type = AMDGPU_IP_BLOCK_TYPE_UVD, + .major = 6, + .minor = 0, + .rev = 0, + .funcs = &uvd_v6_0_ip_funcs, + }, + { + .type = AMDGPU_IP_BLOCK_TYPE_VCE, + .major = 3, + .minor = 0, + .rev = 0, + .funcs = &vce_v3_0_ip_funcs, + }, +}; + +int vi_set_ip_blocks(struct amdgpu_device *adev) +{ + switch (adev->asic_type) { + case CHIP_TOPAZ: + adev->ip_blocks = topaz_ip_blocks; + adev->num_ip_blocks = ARRAY_SIZE(topaz_ip_blocks); + break; + case CHIP_TONGA: + adev->ip_blocks = tonga_ip_blocks; + adev->num_ip_blocks = ARRAY_SIZE(tonga_ip_blocks); + break; + case CHIP_CARRIZO: + adev->ip_blocks = cz_ip_blocks; + adev->num_ip_blocks = ARRAY_SIZE(cz_ip_blocks); + break; + default: + /* FIXME: not supported yet */ + return -EINVAL; + } + + adev->ip_block_enabled = kcalloc(adev->num_ip_blocks, sizeof(bool), GFP_KERNEL); + if (adev->ip_block_enabled == NULL) + return -ENOMEM; + + return 0; +} + +static uint32_t vi_get_rev_id(struct amdgpu_device *adev) +{ + if (adev->asic_type == CHIP_TOPAZ) + return (RREG32(mmPCIE_EFUSE4) & PCIE_EFUSE4__STRAP_BIF_ATI_REV_ID_MASK) + >> PCIE_EFUSE4__STRAP_BIF_ATI_REV_ID__SHIFT; + else + return (RREG32(mmCC_DRM_ID_STRAPS) & CC_DRM_ID_STRAPS__ATI_REV_ID_MASK) + >> CC_DRM_ID_STRAPS__ATI_REV_ID__SHIFT; +} + +static const struct amdgpu_asic_funcs vi_asic_funcs = +{ + .read_disabled_bios = &vi_read_disabled_bios, + .read_register = &vi_read_register, + .reset = &vi_asic_reset, + .set_vga_state = &vi_vga_set_state, + .get_xclk = &vi_get_xclk, + .set_uvd_clocks = &vi_set_uvd_clocks, + .set_vce_clocks = &vi_set_vce_clocks, + .get_cu_info = &gfx_v8_0_get_cu_info, + /* these should be moved to their own ip modules */ + .get_gpu_clock_counter = &gfx_v8_0_get_gpu_clock_counter, + .wait_for_mc_idle = &gmc_v8_0_mc_wait_for_idle, +}; + +static int vi_common_early_init(struct amdgpu_device *adev) +{ + bool smc_enabled = false; + + adev->smc_rreg = &vi_smc_rreg; + adev->smc_wreg = &vi_smc_wreg; + adev->pcie_rreg = &vi_pcie_rreg; + adev->pcie_wreg = &vi_pcie_wreg; + adev->uvd_ctx_rreg = &vi_uvd_ctx_rreg; + adev->uvd_ctx_wreg = &vi_uvd_ctx_wreg; + adev->didt_rreg = &vi_didt_rreg; + adev->didt_wreg = &vi_didt_wreg; + + adev->asic_funcs = &vi_asic_funcs; + + if (amdgpu_get_ip_block(adev, AMDGPU_IP_BLOCK_TYPE_SMC) && + (amdgpu_ip_block_mask & (1 << AMDGPU_IP_BLOCK_TYPE_SMC))) + smc_enabled = true; + + adev->rev_id = vi_get_rev_id(adev); + adev->external_rev_id = 0xFF; + switch (adev->asic_type) { + case CHIP_TOPAZ: + adev->has_uvd = false; + adev->cg_flags = 0; + adev->pg_flags = 0; + adev->external_rev_id = 0x1; + if (amdgpu_smc_load_fw && smc_enabled) + adev->firmware.smu_load = true; + break; + case CHIP_TONGA: + adev->has_uvd = true; + adev->cg_flags = 0; + adev->pg_flags = 0; + adev->external_rev_id = adev->rev_id + 0x14; + if (amdgpu_smc_load_fw && smc_enabled) + adev->firmware.smu_load = true; + break; + case CHIP_CARRIZO: + adev->has_uvd = true; + adev->cg_flags = 0; + adev->pg_flags = 0; + adev->external_rev_id = adev->rev_id + 0x1; + if (amdgpu_smc_load_fw && smc_enabled) + adev->firmware.smu_load = true; + break; + default: + /* FIXME: not supported yet */ + return -EINVAL; + } + + return 0; +} + +static int vi_common_sw_init(struct amdgpu_device *adev) +{ + return 0; +} + +static int vi_common_sw_fini(struct amdgpu_device *adev) +{ + return 0; +} + +static int vi_common_hw_init(struct amdgpu_device *adev) +{ + /* move the golden regs per IP block */ + vi_init_golden_registers(adev); + /* enable pcie gen2/3 link */ + vi_pcie_gen3_enable(adev); + /* enable aspm */ + vi_program_aspm(adev); + /* enable the doorbell aperture */ + vi_enable_doorbell_aperture(adev, true); + + return 0; +} + +static int vi_common_hw_fini(struct amdgpu_device *adev) +{ + /* enable the doorbell aperture */ + vi_enable_doorbell_aperture(adev, false); + + return 0; +} + +static int vi_common_suspend(struct amdgpu_device *adev) +{ + return vi_common_hw_fini(adev); +} + +static int vi_common_resume(struct amdgpu_device *adev) +{ + return vi_common_hw_init(adev); +} + +static bool vi_common_is_idle(struct amdgpu_device *adev) +{ + return true; +} + +static int vi_common_wait_for_idle(struct amdgpu_device *adev) +{ + return 0; +} + +static void vi_common_print_status(struct amdgpu_device *adev) +{ + +} + +static int vi_common_soft_reset(struct amdgpu_device *adev) +{ + /* XXX hard reset?? */ + return 0; +} + +static int vi_common_set_clockgating_state(struct amdgpu_device *adev, + enum amdgpu_clockgating_state state) +{ + return 0; +} + +static int vi_common_set_powergating_state(struct amdgpu_device *adev, + enum amdgpu_powergating_state state) +{ + return 0; +} + +const struct amdgpu_ip_funcs vi_common_ip_funcs = { + .early_init = vi_common_early_init, + .late_init = NULL, + .sw_init = vi_common_sw_init, + .sw_fini = vi_common_sw_fini, + .hw_init = vi_common_hw_init, + .hw_fini = vi_common_hw_fini, + .suspend = vi_common_suspend, + .resume = vi_common_resume, + .is_idle = vi_common_is_idle, + .wait_for_idle = vi_common_wait_for_idle, + .soft_reset = vi_common_soft_reset, + .print_status = vi_common_print_status, + .set_clockgating_state = vi_common_set_clockgating_state, + .set_powergating_state = vi_common_set_powergating_state, +}; + diff --git a/drivers/gpu/drm/amd/amdgpu/vi.h b/drivers/gpu/drm/amd/amdgpu/vi.h new file mode 100644 index 000000000000..d16a5f7e4edd --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/vi.h @@ -0,0 +1,33 @@ +/* + * Copyright 2014 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef __VI_H__ +#define __VI_H__ + +extern const struct amdgpu_ip_funcs vi_common_ip_funcs; + +void vi_srbm_select(struct amdgpu_device *adev, + u32 me, u32 pipe, u32 queue, u32 vmid); +int vi_set_ip_blocks(struct amdgpu_device *adev); + +#endif diff --git a/drivers/gpu/drm/amd/amdgpu/vi_dpm.h b/drivers/gpu/drm/amd/amdgpu/vi_dpm.h new file mode 100644 index 000000000000..11cb1f7eeba5 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/vi_dpm.h @@ -0,0 +1,36 @@ +/* + * Copyright 2014 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef __VI_DPM_H__ +#define __VI_DPM_H__ + +extern const struct amdgpu_ip_funcs cz_dpm_ip_funcs; +int cz_smu_init(struct amdgpu_device *adev); +int cz_smu_start(struct amdgpu_device *adev); +int cz_smu_fini(struct amdgpu_device *adev); + +extern const struct amdgpu_ip_funcs tonga_dpm_ip_funcs; + +extern const struct amdgpu_ip_funcs iceland_dpm_ip_funcs; + +#endif diff --git a/drivers/gpu/drm/amd/amdgpu/vid.h b/drivers/gpu/drm/amd/amdgpu/vid.h new file mode 100644 index 000000000000..385267c31d11 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/vid.h @@ -0,0 +1,363 @@ +/* + * Copyright 2014 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#ifndef VI_H +#define VI_H + +#define SDMA0_REGISTER_OFFSET 0x0 /* not a register */ +#define SDMA1_REGISTER_OFFSET 0x200 /* not a register */ +#define SDMA_MAX_INSTANCE 2 + +/* crtc instance offsets */ +#define CRTC0_REGISTER_OFFSET (0x1b9c - 0x1b9c) +#define CRTC1_REGISTER_OFFSET (0x1d9c - 0x1b9c) +#define CRTC2_REGISTER_OFFSET (0x1f9c - 0x1b9c) +#define CRTC3_REGISTER_OFFSET (0x419c - 0x1b9c) +#define CRTC4_REGISTER_OFFSET (0x439c - 0x1b9c) +#define CRTC5_REGISTER_OFFSET (0x459c - 0x1b9c) +#define CRTC6_REGISTER_OFFSET (0x479c - 0x1b9c) + +/* dig instance offsets */ +#define DIG0_REGISTER_OFFSET (0x4a00 - 0x4a00) +#define DIG1_REGISTER_OFFSET (0x4b00 - 0x4a00) +#define DIG2_REGISTER_OFFSET (0x4c00 - 0x4a00) +#define DIG3_REGISTER_OFFSET (0x4d00 - 0x4a00) +#define DIG4_REGISTER_OFFSET (0x4e00 - 0x4a00) +#define DIG5_REGISTER_OFFSET (0x4f00 - 0x4a00) +#define DIG6_REGISTER_OFFSET (0x5400 - 0x4a00) +#define DIG7_REGISTER_OFFSET (0x5600 - 0x4a00) +#define DIG8_REGISTER_OFFSET (0x5700 - 0x4a00) + +/* audio endpt instance offsets */ +#define AUD0_REGISTER_OFFSET (0x17a8 - 0x17a8) +#define AUD1_REGISTER_OFFSET (0x17ac - 0x17a8) +#define AUD2_REGISTER_OFFSET (0x17b0 - 0x17a8) +#define AUD3_REGISTER_OFFSET (0x17b4 - 0x17a8) +#define AUD4_REGISTER_OFFSET (0x17b8 - 0x17a8) +#define AUD5_REGISTER_OFFSET (0x17bc - 0x17a8) +#define AUD6_REGISTER_OFFSET (0x17c4 - 0x17a8) + +/* hpd instance offsets */ +#define HPD0_REGISTER_OFFSET (0x1898 - 0x1898) +#define HPD1_REGISTER_OFFSET (0x18a0 - 0x1898) +#define HPD2_REGISTER_OFFSET (0x18a8 - 0x1898) +#define HPD3_REGISTER_OFFSET (0x18b0 - 0x1898) +#define HPD4_REGISTER_OFFSET (0x18b8 - 0x1898) +#define HPD5_REGISTER_OFFSET (0x18c0 - 0x1898) + +#define AMDGPU_NUM_OF_VMIDS 8 + +#define RB_BITMAP_WIDTH_PER_SH 2 + +#define MC_SEQ_MISC0__GDDR5__SHIFT 0x1c +#define MC_SEQ_MISC0__GDDR5_MASK 0xf0000000 +#define MC_SEQ_MISC0__GDDR5_VALUE 5 + +/* + * PM4 + */ +#define PACKET_TYPE0 0 +#define PACKET_TYPE1 1 +#define PACKET_TYPE2 2 +#define PACKET_TYPE3 3 + +#define CP_PACKET_GET_TYPE(h) (((h) >> 30) & 3) +#define CP_PACKET_GET_COUNT(h) (((h) >> 16) & 0x3FFF) +#define CP_PACKET0_GET_REG(h) ((h) & 0xFFFF) +#define CP_PACKET3_GET_OPCODE(h) (((h) >> 8) & 0xFF) +#define PACKET0(reg, n) ((PACKET_TYPE0 << 30) | \ + ((reg) & 0xFFFF) | \ + ((n) & 0x3FFF) << 16) +#define CP_PACKET2 0x80000000 +#define PACKET2_PAD_SHIFT 0 +#define PACKET2_PAD_MASK (0x3fffffff << 0) + +#define PACKET2(v) (CP_PACKET2 | REG_SET(PACKET2_PAD, (v))) + +#define PACKET3(op, n) ((PACKET_TYPE3 << 30) | \ + (((op) & 0xFF) << 8) | \ + ((n) & 0x3FFF) << 16) + +#define PACKET3_COMPUTE(op, n) (PACKET3(op, n) | 1 << 1) + +/* Packet 3 types */ +#define PACKET3_NOP 0x10 +#define PACKET3_SET_BASE 0x11 +#define PACKET3_BASE_INDEX(x) ((x) << 0) +#define CE_PARTITION_BASE 3 +#define PACKET3_CLEAR_STATE 0x12 +#define PACKET3_INDEX_BUFFER_SIZE 0x13 +#define PACKET3_DISPATCH_DIRECT 0x15 +#define PACKET3_DISPATCH_INDIRECT 0x16 +#define PACKET3_ATOMIC_GDS 0x1D +#define PACKET3_ATOMIC_MEM 0x1E +#define PACKET3_OCCLUSION_QUERY 0x1F +#define PACKET3_SET_PREDICATION 0x20 +#define PACKET3_REG_RMW 0x21 +#define PACKET3_COND_EXEC 0x22 +#define PACKET3_PRED_EXEC 0x23 +#define PACKET3_DRAW_INDIRECT 0x24 +#define PACKET3_DRAW_INDEX_INDIRECT 0x25 +#define PACKET3_INDEX_BASE 0x26 +#define PACKET3_DRAW_INDEX_2 0x27 +#define PACKET3_CONTEXT_CONTROL 0x28 +#define PACKET3_INDEX_TYPE 0x2A +#define PACKET3_DRAW_INDIRECT_MULTI 0x2C +#define PACKET3_DRAW_INDEX_AUTO 0x2D +#define PACKET3_NUM_INSTANCES 0x2F +#define PACKET3_DRAW_INDEX_MULTI_AUTO 0x30 +#define PACKET3_INDIRECT_BUFFER_CONST 0x33 +#define PACKET3_STRMOUT_BUFFER_UPDATE 0x34 +#define PACKET3_DRAW_INDEX_OFFSET_2 0x35 +#define PACKET3_DRAW_PREAMBLE 0x36 +#define PACKET3_WRITE_DATA 0x37 +#define WRITE_DATA_DST_SEL(x) ((x) << 8) + /* 0 - register + * 1 - memory (sync - via GRBM) + * 2 - gl2 + * 3 - gds + * 4 - reserved + * 5 - memory (async - direct) + */ +#define WR_ONE_ADDR (1 << 16) +#define WR_CONFIRM (1 << 20) +#define WRITE_DATA_CACHE_POLICY(x) ((x) << 25) + /* 0 - LRU + * 1 - Stream + */ +#define WRITE_DATA_ENGINE_SEL(x) ((x) << 30) + /* 0 - me + * 1 - pfp + * 2 - ce + */ +#define PACKET3_DRAW_INDEX_INDIRECT_MULTI 0x38 +#define PACKET3_MEM_SEMAPHORE 0x39 +# define PACKET3_SEM_USE_MAILBOX (0x1 << 16) +# define PACKET3_SEM_SEL_SIGNAL_TYPE (0x1 << 20) /* 0 = increment, 1 = write 1 */ +# define PACKET3_SEM_CLIENT_CODE ((x) << 24) /* 0 = CP, 1 = CB, 2 = DB */ +# define PACKET3_SEM_SEL_SIGNAL (0x6 << 29) +# define PACKET3_SEM_SEL_WAIT (0x7 << 29) +#define PACKET3_WAIT_REG_MEM 0x3C +#define WAIT_REG_MEM_FUNCTION(x) ((x) << 0) + /* 0 - always + * 1 - < + * 2 - <= + * 3 - == + * 4 - != + * 5 - >= + * 6 - > + */ +#define WAIT_REG_MEM_MEM_SPACE(x) ((x) << 4) + /* 0 - reg + * 1 - mem + */ +#define WAIT_REG_MEM_OPERATION(x) ((x) << 6) + /* 0 - wait_reg_mem + * 1 - wr_wait_wr_reg + */ +#define WAIT_REG_MEM_ENGINE(x) ((x) << 8) + /* 0 - me + * 1 - pfp + */ +#define PACKET3_INDIRECT_BUFFER 0x3F +#define INDIRECT_BUFFER_TCL2_VOLATILE (1 << 22) +#define INDIRECT_BUFFER_VALID (1 << 23) +#define INDIRECT_BUFFER_CACHE_POLICY(x) ((x) << 28) + /* 0 - LRU + * 1 - Stream + * 2 - Bypass + */ +#define PACKET3_COPY_DATA 0x40 +#define PACKET3_PFP_SYNC_ME 0x42 +#define PACKET3_SURFACE_SYNC 0x43 +# define PACKET3_DEST_BASE_0_ENA (1 << 0) +# define PACKET3_DEST_BASE_1_ENA (1 << 1) +# define PACKET3_CB0_DEST_BASE_ENA (1 << 6) +# define PACKET3_CB1_DEST_BASE_ENA (1 << 7) +# define PACKET3_CB2_DEST_BASE_ENA (1 << 8) +# define PACKET3_CB3_DEST_BASE_ENA (1 << 9) +# define PACKET3_CB4_DEST_BASE_ENA (1 << 10) +# define PACKET3_CB5_DEST_BASE_ENA (1 << 11) +# define PACKET3_CB6_DEST_BASE_ENA (1 << 12) +# define PACKET3_CB7_DEST_BASE_ENA (1 << 13) +# define PACKET3_DB_DEST_BASE_ENA (1 << 14) +# define PACKET3_TCL1_VOL_ACTION_ENA (1 << 15) +# define PACKET3_TC_VOL_ACTION_ENA (1 << 16) /* L2 */ +# define PACKET3_TC_WB_ACTION_ENA (1 << 18) /* L2 */ +# define PACKET3_DEST_BASE_2_ENA (1 << 19) +# define PACKET3_DEST_BASE_3_ENA (1 << 21) +# define PACKET3_TCL1_ACTION_ENA (1 << 22) +# define PACKET3_TC_ACTION_ENA (1 << 23) /* L2 */ +# define PACKET3_CB_ACTION_ENA (1 << 25) +# define PACKET3_DB_ACTION_ENA (1 << 26) +# define PACKET3_SH_KCACHE_ACTION_ENA (1 << 27) +# define PACKET3_SH_KCACHE_VOL_ACTION_ENA (1 << 28) +# define PACKET3_SH_ICACHE_ACTION_ENA (1 << 29) +#define PACKET3_COND_WRITE 0x45 +#define PACKET3_EVENT_WRITE 0x46 +#define EVENT_TYPE(x) ((x) << 0) +#define EVENT_INDEX(x) ((x) << 8) + /* 0 - any non-TS event + * 1 - ZPASS_DONE, PIXEL_PIPE_STAT_* + * 2 - SAMPLE_PIPELINESTAT + * 3 - SAMPLE_STREAMOUTSTAT* + * 4 - *S_PARTIAL_FLUSH + * 5 - EOP events + * 6 - EOS events + */ +#define PACKET3_EVENT_WRITE_EOP 0x47 +#define EOP_TCL1_VOL_ACTION_EN (1 << 12) +#define EOP_TC_VOL_ACTION_EN (1 << 13) /* L2 */ +#define EOP_TC_WB_ACTION_EN (1 << 15) /* L2 */ +#define EOP_TCL1_ACTION_EN (1 << 16) +#define EOP_TC_ACTION_EN (1 << 17) /* L2 */ +#define EOP_TCL2_VOLATILE (1 << 24) +#define EOP_CACHE_POLICY(x) ((x) << 25) + /* 0 - LRU + * 1 - Stream + * 2 - Bypass + */ +#define DATA_SEL(x) ((x) << 29) + /* 0 - discard + * 1 - send low 32bit data + * 2 - send 64bit data + * 3 - send 64bit GPU counter value + * 4 - send 64bit sys counter value + */ +#define INT_SEL(x) ((x) << 24) + /* 0 - none + * 1 - interrupt only (DATA_SEL = 0) + * 2 - interrupt when data write is confirmed + */ +#define DST_SEL(x) ((x) << 16) + /* 0 - MC + * 1 - TC/L2 + */ +#define PACKET3_EVENT_WRITE_EOS 0x48 +#define PACKET3_RELEASE_MEM 0x49 +#define PACKET3_PREAMBLE_CNTL 0x4A +# define PACKET3_PREAMBLE_BEGIN_CLEAR_STATE (2 << 28) +# define PACKET3_PREAMBLE_END_CLEAR_STATE (3 << 28) +#define PACKET3_DMA_DATA 0x50 +/* 1. header + * 2. CONTROL + * 3. SRC_ADDR_LO or DATA [31:0] + * 4. SRC_ADDR_HI [31:0] + * 5. DST_ADDR_LO [31:0] + * 6. DST_ADDR_HI [7:0] + * 7. COMMAND [30:21] | BYTE_COUNT [20:0] + */ +/* CONTROL */ +# define PACKET3_DMA_DATA_ENGINE(x) ((x) << 0) + /* 0 - ME + * 1 - PFP + */ +# define PACKET3_DMA_DATA_SRC_CACHE_POLICY(x) ((x) << 13) + /* 0 - LRU + * 1 - Stream + * 2 - Bypass + */ +# define PACKET3_DMA_DATA_SRC_VOLATILE (1 << 15) +# define PACKET3_DMA_DATA_DST_SEL(x) ((x) << 20) + /* 0 - DST_ADDR using DAS + * 1 - GDS + * 3 - DST_ADDR using L2 + */ +# define PACKET3_DMA_DATA_DST_CACHE_POLICY(x) ((x) << 25) + /* 0 - LRU + * 1 - Stream + * 2 - Bypass + */ +# define PACKET3_DMA_DATA_DST_VOLATILE (1 << 27) +# define PACKET3_DMA_DATA_SRC_SEL(x) ((x) << 29) + /* 0 - SRC_ADDR using SAS + * 1 - GDS + * 2 - DATA + * 3 - SRC_ADDR using L2 + */ +# define PACKET3_DMA_DATA_CP_SYNC (1 << 31) +/* COMMAND */ +# define PACKET3_DMA_DATA_DIS_WC (1 << 21) +# define PACKET3_DMA_DATA_CMD_SRC_SWAP(x) ((x) << 22) + /* 0 - none + * 1 - 8 in 16 + * 2 - 8 in 32 + * 3 - 8 in 64 + */ +# define PACKET3_DMA_DATA_CMD_DST_SWAP(x) ((x) << 24) + /* 0 - none + * 1 - 8 in 16 + * 2 - 8 in 32 + * 3 - 8 in 64 + */ +# define PACKET3_DMA_DATA_CMD_SAS (1 << 26) + /* 0 - memory + * 1 - register + */ +# define PACKET3_DMA_DATA_CMD_DAS (1 << 27) + /* 0 - memory + * 1 - register + */ +# define PACKET3_DMA_DATA_CMD_SAIC (1 << 28) +# define PACKET3_DMA_DATA_CMD_DAIC (1 << 29) +# define PACKET3_DMA_DATA_CMD_RAW_WAIT (1 << 30) +#define PACKET3_AQUIRE_MEM 0x58 +#define PACKET3_REWIND 0x59 +#define PACKET3_LOAD_UCONFIG_REG 0x5E +#define PACKET3_LOAD_SH_REG 0x5F +#define PACKET3_LOAD_CONFIG_REG 0x60 +#define PACKET3_LOAD_CONTEXT_REG 0x61 +#define PACKET3_SET_CONFIG_REG 0x68 +#define PACKET3_SET_CONFIG_REG_START 0x00002000 +#define PACKET3_SET_CONFIG_REG_END 0x00002c00 +#define PACKET3_SET_CONTEXT_REG 0x69 +#define PACKET3_SET_CONTEXT_REG_START 0x0000a000 +#define PACKET3_SET_CONTEXT_REG_END 0x0000a400 +#define PACKET3_SET_CONTEXT_REG_INDIRECT 0x73 +#define PACKET3_SET_SH_REG 0x76 +#define PACKET3_SET_SH_REG_START 0x00002c00 +#define PACKET3_SET_SH_REG_END 0x00003000 +#define PACKET3_SET_SH_REG_OFFSET 0x77 +#define PACKET3_SET_QUEUE_REG 0x78 +#define PACKET3_SET_UCONFIG_REG 0x79 +#define PACKET3_SET_UCONFIG_REG_START 0x0000c000 +#define PACKET3_SET_UCONFIG_REG_END 0x0000c400 +#define PACKET3_SCRATCH_RAM_WRITE 0x7D +#define PACKET3_SCRATCH_RAM_READ 0x7E +#define PACKET3_LOAD_CONST_RAM 0x80 +#define PACKET3_WRITE_CONST_RAM 0x81 +#define PACKET3_DUMP_CONST_RAM 0x83 +#define PACKET3_INCREMENT_CE_COUNTER 0x84 +#define PACKET3_INCREMENT_DE_COUNTER 0x85 +#define PACKET3_WAIT_ON_CE_COUNTER 0x86 +#define PACKET3_WAIT_ON_DE_COUNTER_DIFF 0x88 +#define PACKET3_SWITCH_BUFFER 0x8B + +#define VCE_CMD_NO_OP 0x00000000 +#define VCE_CMD_END 0x00000001 +#define VCE_CMD_IB 0x00000002 +#define VCE_CMD_FENCE 0x00000003 +#define VCE_CMD_TRAP 0x00000004 +#define VCE_CMD_IB_AUTO 0x00000005 +#define VCE_CMD_SEMAPHORE 0x00000006 + +#endif -- cgit v1.2.3 From d94aed5a6c947b1fda346aff1fa316dacf4a1a5a Mon Sep 17 00:00:00 2001 From: Marek Olšák Date: Tue, 5 May 2015 21:13:49 +0200 Subject: drm/amdgpu: add and implement the GPU reset status query MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Marek Olšák Reviewed-by: Christian König Reviewed-by: Jammy Zhou --- drivers/gpu/drm/amd/amdgpu/amdgpu.h | 6 ++--- drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c | 36 +++++++++++++++++++----------- drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 1 + include/uapi/drm/amdgpu_drm.h | 11 ++++++++- 4 files changed, 37 insertions(+), 17 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_device.c') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index 66b5bd058799..ebff89eb2f4c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -1040,7 +1040,7 @@ struct amdgpu_vm_manager { struct amdgpu_ctx_state { uint64_t flags; - uint64_t hangs; + uint32_t hangs; }; struct amdgpu_ctx { @@ -1049,6 +1049,7 @@ struct amdgpu_ctx { struct amdgpu_fpriv *fpriv; struct amdgpu_ctx_state state; uint32_t id; + unsigned reset_counter; }; struct amdgpu_ctx_mgr { @@ -1897,8 +1898,6 @@ int amdgpu_ctx_alloc(struct amdgpu_device *adev,struct amdgpu_fpriv *fpriv, uint32_t *id,uint32_t flags); int amdgpu_ctx_free(struct amdgpu_device *adev, struct amdgpu_fpriv *fpriv, uint32_t id); -int amdgpu_ctx_query(struct amdgpu_device *adev, struct amdgpu_fpriv *fpriv, - uint32_t id,struct amdgpu_ctx_state *state); void amdgpu_ctx_fini(struct amdgpu_fpriv *fpriv); struct amdgpu_ctx *amdgpu_ctx_get(struct amdgpu_fpriv *fpriv, uint32_t id); @@ -2006,6 +2005,7 @@ struct amdgpu_device { atomic64_t vram_vis_usage; atomic64_t gtt_usage; atomic64_t num_bytes_moved; + atomic_t gpu_reset_counter; /* display */ struct amdgpu_mode_info mode_info; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c index bcd332e085f6..6c66ac8a1891 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c @@ -81,21 +81,36 @@ int amdgpu_ctx_free(struct amdgpu_device *adev, struct amdgpu_fpriv *fpriv, uint return -EINVAL; } -int amdgpu_ctx_query(struct amdgpu_device *adev, struct amdgpu_fpriv *fpriv, uint32_t id, struct amdgpu_ctx_state *state) +static int amdgpu_ctx_query(struct amdgpu_device *adev, + struct amdgpu_fpriv *fpriv, uint32_t id, + union drm_amdgpu_ctx_out *out) { struct amdgpu_ctx *ctx; struct amdgpu_ctx_mgr *mgr = &fpriv->ctx_mgr; + unsigned reset_counter; mutex_lock(&mgr->lock); ctx = idr_find(&mgr->ctx_handles, id); - if (ctx) { - /* state should alter with CS activity */ - *state = ctx->state; + if (!ctx) { mutex_unlock(&mgr->lock); - return 0; + return -EINVAL; } + + /* TODO: these two are always zero */ + out->state.flags = ctx->state.flags; + out->state.hangs = ctx->state.hangs; + + /* determine if a GPU reset has occured since the last call */ + reset_counter = atomic_read(&adev->gpu_reset_counter); + /* TODO: this should ideally return NO, GUILTY, or INNOCENT. */ + if (ctx->reset_counter == reset_counter) + out->state.reset_status = AMDGPU_CTX_NO_RESET; + else + out->state.reset_status = AMDGPU_CTX_UNKNOWN_RESET; + ctx->reset_counter = reset_counter; + mutex_unlock(&mgr->lock); - return -EINVAL; + return 0; } void amdgpu_ctx_fini(struct amdgpu_fpriv *fpriv) @@ -115,12 +130,11 @@ void amdgpu_ctx_fini(struct amdgpu_fpriv *fpriv) } int amdgpu_ctx_ioctl(struct drm_device *dev, void *data, - struct drm_file *filp) + struct drm_file *filp) { int r; uint32_t id; uint32_t flags; - struct amdgpu_ctx_state state; union drm_amdgpu_ctx *args = data; struct amdgpu_device *adev = dev->dev_private; @@ -139,11 +153,7 @@ int amdgpu_ctx_ioctl(struct drm_device *dev, void *data, r = amdgpu_ctx_free(adev, fpriv, id); break; case AMDGPU_CTX_OP_QUERY_STATE: - r = amdgpu_ctx_query(adev, fpriv, id, &state); - if (r == 0) { - args->out.state.flags = state.flags; - args->out.state.hangs = state.hangs; - } + r = amdgpu_ctx_query(adev, fpriv, id, &args->out); break; default: return -EINVAL; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 61cf5ad78857..3448d9fe88cd 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -1781,6 +1781,7 @@ int amdgpu_gpu_reset(struct amdgpu_device *adev) } adev->needs_reset = false; + atomic_inc(&adev->gpu_reset_counter); /* block TTM */ resched = ttm_bo_lock_delayed_workqueue(&adev->mman.bdev); diff --git a/include/uapi/drm/amdgpu_drm.h b/include/uapi/drm/amdgpu_drm.h index 65da7cd16c0f..46580e950036 100644 --- a/include/uapi/drm/amdgpu_drm.h +++ b/include/uapi/drm/amdgpu_drm.h @@ -149,6 +149,12 @@ union drm_amdgpu_bo_list { #define AMDGPU_CTX_OP_STATE_RUNNING 1 +/* GPU reset status */ +#define AMDGPU_CTX_NO_RESET 0 +#define AMDGPU_CTX_GUILTY_RESET 1 /* this the context caused it */ +#define AMDGPU_CTX_INNOCENT_RESET 2 /* some other context caused it */ +#define AMDGPU_CTX_UNKNOWN_RESET 3 /* unknown cause */ + struct drm_amdgpu_ctx_in { uint32_t op; uint32_t flags; @@ -164,7 +170,10 @@ union drm_amdgpu_ctx_out { struct { uint64_t flags; - uint64_t hangs; + /** Number of resets caused by this context so far. */ + uint32_t hangs; + /** Reset status since the last call of the ioctl. */ + uint32_t reset_status; } state; }; -- cgit v1.2.3 From 5fc3aeeb9e553a20ce62544f7176c6c4aca52d71 Mon Sep 17 00:00:00 2001 From: yanyang1 Date: Fri, 22 May 2015 14:39:35 -0400 Subject: drm/amdgpu: rename amdgpu_ip_funcs to amd_ip_funcs (v2) The structure is renamed and moved to amd_shared.h to make the component independent. This makes it easier to add new components in the future. v2: fix include path Reviewed-by: Jammy Zhou Signed-off-by: yanyang1 Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/Makefile | 3 +- drivers/gpu/drm/amd/amdgpu/amdgpu.h | 75 ++--------------- drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 50 +++++------ drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c | 24 +++--- drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c | 2 +- drivers/gpu/drm/amd/amdgpu/ci_dpm.c | 57 ++++++++----- drivers/gpu/drm/amd/amdgpu/cik.c | 130 +++++++++++++++-------------- drivers/gpu/drm/amd/amdgpu/cik.h | 2 +- drivers/gpu/drm/amd/amdgpu/cik_dpm.h | 4 +- drivers/gpu/drm/amd/amdgpu/cik_ih.c | 54 ++++++++---- drivers/gpu/drm/amd/amdgpu/cik_ih.h | 2 +- drivers/gpu/drm/amd/amdgpu/cik_sdma.c | 53 +++++++----- drivers/gpu/drm/amd/amdgpu/cik_sdma.h | 2 +- drivers/gpu/drm/amd/amdgpu/cz_dpm.c | 53 +++++++----- drivers/gpu/drm/amd/amdgpu/cz_ih.c | 53 ++++++++---- drivers/gpu/drm/amd/amdgpu/cz_ih.h | 2 +- drivers/gpu/drm/amd/amdgpu/dce_v10_0.c | 50 ++++++----- drivers/gpu/drm/amd/amdgpu/dce_v10_0.h | 2 +- drivers/gpu/drm/amd/amdgpu/dce_v11_0.c | 50 ++++++----- drivers/gpu/drm/amd/amdgpu/dce_v11_0.h | 2 +- drivers/gpu/drm/amd/amdgpu/dce_v8_0.c | 50 ++++++----- drivers/gpu/drm/amd/amdgpu/dce_v8_0.h | 2 +- drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c | 57 ++++++++----- drivers/gpu/drm/amd/amdgpu/gfx_v7_0.h | 2 +- drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c | 51 +++++++---- drivers/gpu/drm/amd/amdgpu/gfx_v8_0.h | 2 +- drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c | 52 +++++++----- drivers/gpu/drm/amd/amdgpu/gmc_v7_0.h | 2 +- drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c | 51 ++++++----- drivers/gpu/drm/amd/amdgpu/gmc_v8_0.h | 2 +- drivers/gpu/drm/amd/amdgpu/iceland_dpm.c | 34 +++++--- drivers/gpu/drm/amd/amdgpu/iceland_ih.c | 55 +++++++----- drivers/gpu/drm/amd/amdgpu/iceland_ih.h | 2 +- drivers/gpu/drm/amd/amdgpu/kv_dpm.c | 89 +++++++++++--------- drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c | 51 ++++++----- drivers/gpu/drm/amd/amdgpu/sdma_v2_4.h | 2 +- drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c | 52 +++++++----- drivers/gpu/drm/amd/amdgpu/sdma_v3_0.h | 2 +- drivers/gpu/drm/amd/amdgpu/tonga_dpm.c | 35 +++++--- drivers/gpu/drm/amd/amdgpu/tonga_ih.c | 51 +++++++---- drivers/gpu/drm/amd/amdgpu/tonga_ih.h | 2 +- drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c | 53 ++++++++---- drivers/gpu/drm/amd/amdgpu/uvd_v4_2.h | 2 +- drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c | 52 +++++++----- drivers/gpu/drm/amd/amdgpu/uvd_v5_0.h | 2 +- drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c | 52 +++++++----- drivers/gpu/drm/amd/amdgpu/uvd_v6_0.h | 2 +- drivers/gpu/drm/amd/amdgpu/vce_v2_0.c | 54 +++++++----- drivers/gpu/drm/amd/amdgpu/vce_v2_0.h | 2 +- drivers/gpu/drm/amd/amdgpu/vce_v3_0.c | 52 +++++++----- drivers/gpu/drm/amd/amdgpu/vce_v3_0.h | 2 +- drivers/gpu/drm/amd/amdgpu/vi.c | 96 +++++++++++---------- drivers/gpu/drm/amd/amdgpu/vi.h | 2 +- drivers/gpu/drm/amd/amdgpu/vi_dpm.h | 6 +- drivers/gpu/drm/amd/include/amd_shared.h | 81 ++++++++++++++++++ 55 files changed, 1067 insertions(+), 707 deletions(-) create mode 100644 drivers/gpu/drm/amd/include/amd_shared.h (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_device.c') diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile index 9a573e87cdd3..616dfd4a1398 100644 --- a/drivers/gpu/drm/amd/amdgpu/Makefile +++ b/drivers/gpu/drm/amd/amdgpu/Makefile @@ -2,7 +2,8 @@ # Makefile for the drm device driver. This driver provides support for the # Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher. -ccflags-y := -Iinclude/drm -Idrivers/gpu/drm/amd/include/asic_reg +ccflags-y := -Iinclude/drm -Idrivers/gpu/drm/amd/include/asic_reg \ + -Idrivers/gpu/drm/amd/include amdgpu-y := amdgpu_drv.o diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index 411cfb91170f..80f0bea52e33 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -44,6 +44,7 @@ #include +#include "amd_shared.h" #include "amdgpu_family.h" #include "amdgpu_mode.h" #include "amdgpu_ih.h" @@ -205,86 +206,28 @@ enum amdgpu_thermal_irq { AMDGPU_THERMAL_IRQ_LAST }; -/* - * IP block functions - */ -enum amdgpu_ip_block_type { - AMDGPU_IP_BLOCK_TYPE_COMMON, - AMDGPU_IP_BLOCK_TYPE_GMC, - AMDGPU_IP_BLOCK_TYPE_IH, - AMDGPU_IP_BLOCK_TYPE_SMC, - AMDGPU_IP_BLOCK_TYPE_DCE, - AMDGPU_IP_BLOCK_TYPE_GFX, - AMDGPU_IP_BLOCK_TYPE_SDMA, - AMDGPU_IP_BLOCK_TYPE_UVD, - AMDGPU_IP_BLOCK_TYPE_VCE, -}; - -enum amdgpu_clockgating_state { - AMDGPU_CG_STATE_GATE = 0, - AMDGPU_CG_STATE_UNGATE, -}; - -enum amdgpu_powergating_state { - AMDGPU_PG_STATE_GATE = 0, - AMDGPU_PG_STATE_UNGATE, -}; - -struct amdgpu_ip_funcs { - /* sets up early driver state (pre sw_init), does not configure hw - Optional */ - int (*early_init)(struct amdgpu_device *adev); - /* sets up late driver/hw state (post hw_init) - Optional */ - int (*late_init)(struct amdgpu_device *adev); - /* sets up driver state, does not configure hw */ - int (*sw_init)(struct amdgpu_device *adev); - /* tears down driver state, does not configure hw */ - int (*sw_fini)(struct amdgpu_device *adev); - /* sets up the hw state */ - int (*hw_init)(struct amdgpu_device *adev); - /* tears down the hw state */ - int (*hw_fini)(struct amdgpu_device *adev); - /* handles IP specific hw/sw changes for suspend */ - int (*suspend)(struct amdgpu_device *adev); - /* handles IP specific hw/sw changes for resume */ - int (*resume)(struct amdgpu_device *adev); - /* returns current IP block idle status */ - bool (*is_idle)(struct amdgpu_device *adev); - /* poll for idle */ - int (*wait_for_idle)(struct amdgpu_device *adev); - /* soft reset the IP block */ - int (*soft_reset)(struct amdgpu_device *adev); - /* dump the IP block status registers */ - void (*print_status)(struct amdgpu_device *adev); - /* enable/disable cg for the IP block */ - int (*set_clockgating_state)(struct amdgpu_device *adev, - enum amdgpu_clockgating_state state); - /* enable/disable pg for the IP block */ - int (*set_powergating_state)(struct amdgpu_device *adev, - enum amdgpu_powergating_state state); -}; - int amdgpu_set_clockgating_state(struct amdgpu_device *adev, - enum amdgpu_ip_block_type block_type, - enum amdgpu_clockgating_state state); + enum amd_ip_block_type block_type, + enum amd_clockgating_state state); int amdgpu_set_powergating_state(struct amdgpu_device *adev, - enum amdgpu_ip_block_type block_type, - enum amdgpu_powergating_state state); + enum amd_ip_block_type block_type, + enum amd_powergating_state state); struct amdgpu_ip_block_version { - enum amdgpu_ip_block_type type; + enum amd_ip_block_type type; u32 major; u32 minor; u32 rev; - const struct amdgpu_ip_funcs *funcs; + const struct amd_ip_funcs *funcs; }; int amdgpu_ip_block_version_cmp(struct amdgpu_device *adev, - enum amdgpu_ip_block_type type, + enum amd_ip_block_type type, u32 major, u32 minor); const struct amdgpu_ip_block_version * amdgpu_get_ip_block( struct amdgpu_device *adev, - enum amdgpu_ip_block_type type); + enum amd_ip_block_type type); /* provided by hw blocks that can move/clear data. e.g., gfx or sdma */ struct amdgpu_buffer_funcs { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 3448d9fe88cd..d9d26f2e7bd7 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -1081,14 +1081,14 @@ static const struct vga_switcheroo_client_ops amdgpu_switcheroo_ops = { }; int amdgpu_set_clockgating_state(struct amdgpu_device *adev, - enum amdgpu_ip_block_type block_type, - enum amdgpu_clockgating_state state) + enum amd_ip_block_type block_type, + enum amd_clockgating_state state) { int i, r = 0; for (i = 0; i < adev->num_ip_blocks; i++) { if (adev->ip_blocks[i].type == block_type) { - r = adev->ip_blocks[i].funcs->set_clockgating_state(adev, + r = adev->ip_blocks[i].funcs->set_clockgating_state((void *)adev, state); if (r) return r; @@ -1098,14 +1098,14 @@ int amdgpu_set_clockgating_state(struct amdgpu_device *adev, } int amdgpu_set_powergating_state(struct amdgpu_device *adev, - enum amdgpu_ip_block_type block_type, - enum amdgpu_powergating_state state) + enum amd_ip_block_type block_type, + enum amd_powergating_state state) { int i, r = 0; for (i = 0; i < adev->num_ip_blocks; i++) { if (adev->ip_blocks[i].type == block_type) { - r = adev->ip_blocks[i].funcs->set_powergating_state(adev, + r = adev->ip_blocks[i].funcs->set_powergating_state((void *)adev, state); if (r) return r; @@ -1116,7 +1116,7 @@ int amdgpu_set_powergating_state(struct amdgpu_device *adev, const struct amdgpu_ip_block_version * amdgpu_get_ip_block( struct amdgpu_device *adev, - enum amdgpu_ip_block_type type) + enum amd_ip_block_type type) { int i; @@ -1131,7 +1131,7 @@ const struct amdgpu_ip_block_version * amdgpu_get_ip_block( * amdgpu_ip_block_version_cmp * * @adev: amdgpu_device pointer - * @type: enum amdgpu_ip_block_type + * @type: enum amd_ip_block_type * @major: major version * @minor: minor version * @@ -1139,7 +1139,7 @@ const struct amdgpu_ip_block_version * amdgpu_get_ip_block( * return 1 if smaller or the ip_block doesn't exist */ int amdgpu_ip_block_version_cmp(struct amdgpu_device *adev, - enum amdgpu_ip_block_type type, + enum amd_ip_block_type type, u32 major, u32 minor) { const struct amdgpu_ip_block_version *ip_block; @@ -1204,7 +1204,7 @@ static int amdgpu_early_init(struct amdgpu_device *adev) adev->ip_block_enabled[i] = false; } else { if (adev->ip_blocks[i].funcs->early_init) { - r = adev->ip_blocks[i].funcs->early_init(adev); + r = adev->ip_blocks[i].funcs->early_init((void *)adev); if (r) return r; } @@ -1222,15 +1222,15 @@ static int amdgpu_init(struct amdgpu_device *adev) for (i = 0; i < adev->num_ip_blocks; i++) { if (!adev->ip_block_enabled[i]) continue; - r = adev->ip_blocks[i].funcs->sw_init(adev); + r = adev->ip_blocks[i].funcs->sw_init((void *)adev); if (r) return r; /* need to do gmc hw init early so we can allocate gpu mem */ - if (adev->ip_blocks[i].type == AMDGPU_IP_BLOCK_TYPE_GMC) { + if (adev->ip_blocks[i].type == AMD_IP_BLOCK_TYPE_GMC) { r = amdgpu_vram_scratch_init(adev); if (r) return r; - r = adev->ip_blocks[i].funcs->hw_init(adev); + r = adev->ip_blocks[i].funcs->hw_init((void *)adev); if (r) return r; r = amdgpu_wb_init(adev); @@ -1243,9 +1243,9 @@ static int amdgpu_init(struct amdgpu_device *adev) if (!adev->ip_block_enabled[i]) continue; /* gmc hw init is done early */ - if (adev->ip_blocks[i].type == AMDGPU_IP_BLOCK_TYPE_GMC) + if (adev->ip_blocks[i].type == AMD_IP_BLOCK_TYPE_GMC) continue; - r = adev->ip_blocks[i].funcs->hw_init(adev); + r = adev->ip_blocks[i].funcs->hw_init((void *)adev); if (r) return r; } @@ -1261,12 +1261,12 @@ static int amdgpu_late_init(struct amdgpu_device *adev) if (!adev->ip_block_enabled[i]) continue; /* enable clockgating to save power */ - r = adev->ip_blocks[i].funcs->set_clockgating_state(adev, - AMDGPU_CG_STATE_GATE); + r = adev->ip_blocks[i].funcs->set_clockgating_state((void *)adev, + AMD_CG_STATE_GATE); if (r) return r; if (adev->ip_blocks[i].funcs->late_init) { - r = adev->ip_blocks[i].funcs->late_init(adev); + r = adev->ip_blocks[i].funcs->late_init((void *)adev); if (r) return r; } @@ -1282,23 +1282,23 @@ static int amdgpu_fini(struct amdgpu_device *adev) for (i = adev->num_ip_blocks - 1; i >= 0; i--) { if (!adev->ip_block_enabled[i]) continue; - if (adev->ip_blocks[i].type == AMDGPU_IP_BLOCK_TYPE_GMC) { + if (adev->ip_blocks[i].type == AMD_IP_BLOCK_TYPE_GMC) { amdgpu_wb_fini(adev); amdgpu_vram_scratch_fini(adev); } /* ungate blocks before hw fini so that we can shutdown the blocks safely */ - r = adev->ip_blocks[i].funcs->set_clockgating_state(adev, - AMDGPU_CG_STATE_UNGATE); + r = adev->ip_blocks[i].funcs->set_clockgating_state((void *)adev, + AMD_CG_STATE_UNGATE); if (r) return r; - r = adev->ip_blocks[i].funcs->hw_fini(adev); + r = adev->ip_blocks[i].funcs->hw_fini((void *)adev); /* XXX handle errors */ } for (i = adev->num_ip_blocks - 1; i >= 0; i--) { if (!adev->ip_block_enabled[i]) continue; - r = adev->ip_blocks[i].funcs->sw_fini(adev); + r = adev->ip_blocks[i].funcs->sw_fini((void *)adev); /* XXX handle errors */ adev->ip_block_enabled[i] = false; } @@ -1314,8 +1314,8 @@ static int amdgpu_suspend(struct amdgpu_device *adev) if (!adev->ip_block_enabled[i]) continue; /* ungate blocks so that suspend can properly shut them down */ - r = adev->ip_blocks[i].funcs->set_clockgating_state(adev, - AMDGPU_CG_STATE_UNGATE); + r = adev->ip_blocks[i].funcs->set_clockgating_state((void *)adev, + AMD_CG_STATE_UNGATE); /* XXX handle errors */ r = adev->ip_blocks[i].funcs->suspend(adev); /* XXX handle errors */ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c index 35185d6b7d46..b6dd3751d9a5 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c @@ -186,7 +186,7 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file return copy_to_user(out, &ui32, min(size, 4u)) ? -EFAULT : 0; case AMDGPU_INFO_HW_IP_INFO: { struct drm_amdgpu_info_hw_ip ip = {}; - enum amdgpu_ip_block_type type; + enum amd_ip_block_type type; uint32_t ring_mask = 0; if (info->query_hw_ip.ip_instance >= AMDGPU_HW_IP_INSTANCE_MAX_COUNT) @@ -194,26 +194,26 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file switch (info->query_hw_ip.type) { case AMDGPU_HW_IP_GFX: - type = AMDGPU_IP_BLOCK_TYPE_GFX; + type = AMD_IP_BLOCK_TYPE_GFX; for (i = 0; i < adev->gfx.num_gfx_rings; i++) ring_mask |= ((adev->gfx.gfx_ring[i].ready ? 1 : 0) << i); break; case AMDGPU_HW_IP_COMPUTE: - type = AMDGPU_IP_BLOCK_TYPE_GFX; + type = AMD_IP_BLOCK_TYPE_GFX; for (i = 0; i < adev->gfx.num_compute_rings; i++) ring_mask |= ((adev->gfx.compute_ring[i].ready ? 1 : 0) << i); break; case AMDGPU_HW_IP_DMA: - type = AMDGPU_IP_BLOCK_TYPE_SDMA; + type = AMD_IP_BLOCK_TYPE_SDMA; ring_mask = adev->sdma[0].ring.ready ? 1 : 0; ring_mask |= ((adev->sdma[1].ring.ready ? 1 : 0) << 1); break; case AMDGPU_HW_IP_UVD: - type = AMDGPU_IP_BLOCK_TYPE_UVD; + type = AMD_IP_BLOCK_TYPE_UVD; ring_mask = adev->uvd.ring.ready ? 1 : 0; break; case AMDGPU_HW_IP_VCE: - type = AMDGPU_IP_BLOCK_TYPE_VCE; + type = AMD_IP_BLOCK_TYPE_VCE; for (i = 0; i < AMDGPU_MAX_VCE_RINGS; i++) ring_mask |= ((adev->vce.ring[i].ready ? 1 : 0) << i); break; @@ -235,24 +235,24 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file min((size_t)size, sizeof(ip))) ? -EFAULT : 0; } case AMDGPU_INFO_HW_IP_COUNT: { - enum amdgpu_ip_block_type type; + enum amd_ip_block_type type; uint32_t count = 0; switch (info->query_hw_ip.type) { case AMDGPU_HW_IP_GFX: - type = AMDGPU_IP_BLOCK_TYPE_GFX; + type = AMD_IP_BLOCK_TYPE_GFX; break; case AMDGPU_HW_IP_COMPUTE: - type = AMDGPU_IP_BLOCK_TYPE_GFX; + type = AMD_IP_BLOCK_TYPE_GFX; break; case AMDGPU_HW_IP_DMA: - type = AMDGPU_IP_BLOCK_TYPE_SDMA; + type = AMD_IP_BLOCK_TYPE_SDMA; break; case AMDGPU_HW_IP_UVD: - type = AMDGPU_IP_BLOCK_TYPE_UVD; + type = AMD_IP_BLOCK_TYPE_UVD; break; case AMDGPU_HW_IP_VCE: - type = AMDGPU_IP_BLOCK_TYPE_VCE; + type = AMD_IP_BLOCK_TYPE_VCE; break; default: return -EINVAL; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c index 4b0be906d235..375759c8639c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c @@ -185,7 +185,7 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev) } /* from uvd v5.0 HW addressing capacity increased to 64 bits */ - if (!amdgpu_ip_block_version_cmp(adev, AMDGPU_IP_BLOCK_TYPE_UVD, 5, 0)) + if (!amdgpu_ip_block_version_cmp(adev, AMD_IP_BLOCK_TYPE_UVD, 5, 0)) adev->uvd.address_64_bit = true; return 0; diff --git a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c index 5a9dad8e55c9..03809f1bcfd5 100644 --- a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c +++ b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c @@ -4227,8 +4227,8 @@ static int ci_update_vce_dpm(struct amdgpu_device *adev, if (amdgpu_current_state->evclk != amdgpu_new_state->evclk) { if (amdgpu_new_state->evclk) { /* turn the clocks on when encoding */ - ret = amdgpu_set_clockgating_state(adev, AMDGPU_IP_BLOCK_TYPE_VCE, - AMDGPU_CG_STATE_UNGATE); + ret = amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_VCE, + AMD_CG_STATE_UNGATE); if (ret) return ret; @@ -4241,8 +4241,8 @@ static int ci_update_vce_dpm(struct amdgpu_device *adev, ret = ci_enable_vce_dpm(adev, true); } else { /* turn the clocks off when not encoding */ - ret = amdgpu_set_clockgating_state(adev, AMDGPU_IP_BLOCK_TYPE_VCE, - AMDGPU_CG_STATE_GATE); + ret = amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_VCE, + AMD_CG_STATE_GATE); if (ret) return ret; @@ -6171,17 +6171,20 @@ static int ci_set_temperature_range(struct amdgpu_device *adev) return ret; } -static int ci_dpm_early_init(struct amdgpu_device *adev) +static int ci_dpm_early_init(void *handle) { + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + ci_dpm_set_dpm_funcs(adev); ci_dpm_set_irq_funcs(adev); return 0; } -static int ci_dpm_late_init(struct amdgpu_device *adev) +static int ci_dpm_late_init(void *handle) { int ret; + struct amdgpu_device *adev = (struct amdgpu_device *)handle; if (!amdgpu_dpm) return 0; @@ -6195,9 +6198,10 @@ static int ci_dpm_late_init(struct amdgpu_device *adev) return 0; } -static int ci_dpm_sw_init(struct amdgpu_device *adev) +static int ci_dpm_sw_init(void *handle) { int ret; + struct amdgpu_device *adev = (struct amdgpu_device *)handle; ret = amdgpu_irq_add_id(adev, 230, &adev->pm.dpm.thermal.irq); if (ret) @@ -6243,8 +6247,10 @@ dpm_failed: return ret; } -static int ci_dpm_sw_fini(struct amdgpu_device *adev) +static int ci_dpm_sw_fini(void *handle) { + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + mutex_lock(&adev->pm.mutex); amdgpu_pm_sysfs_fini(adev); ci_dpm_fini(adev); @@ -6253,10 +6259,12 @@ static int ci_dpm_sw_fini(struct amdgpu_device *adev) return 0; } -static int ci_dpm_hw_init(struct amdgpu_device *adev) +static int ci_dpm_hw_init(void *handle) { int ret; + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + if (!amdgpu_dpm) return 0; @@ -6272,8 +6280,10 @@ static int ci_dpm_hw_init(struct amdgpu_device *adev) return ret; } -static int ci_dpm_hw_fini(struct amdgpu_device *adev) +static int ci_dpm_hw_fini(void *handle) { + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + if (adev->pm.dpm_enabled) { mutex_lock(&adev->pm.mutex); ci_dpm_disable(adev); @@ -6283,8 +6293,10 @@ static int ci_dpm_hw_fini(struct amdgpu_device *adev) return 0; } -static int ci_dpm_suspend(struct amdgpu_device *adev) +static int ci_dpm_suspend(void *handle) { + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + if (adev->pm.dpm_enabled) { mutex_lock(&adev->pm.mutex); /* disable dpm */ @@ -6296,9 +6308,10 @@ static int ci_dpm_suspend(struct amdgpu_device *adev) return 0; } -static int ci_dpm_resume(struct amdgpu_device *adev) +static int ci_dpm_resume(void *handle) { int ret; + struct amdgpu_device *adev = (struct amdgpu_device *)handle; if (adev->pm.dpm_enabled) { /* asic init will reset to the boot state */ @@ -6316,20 +6329,22 @@ static int ci_dpm_resume(struct amdgpu_device *adev) return 0; } -static bool ci_dpm_is_idle(struct amdgpu_device *adev) +static bool ci_dpm_is_idle(void *handle) { /* XXX */ return true; } -static int ci_dpm_wait_for_idle(struct amdgpu_device *adev) +static int ci_dpm_wait_for_idle(void *handle) { /* XXX */ return 0; } -static void ci_dpm_print_status(struct amdgpu_device *adev) +static void ci_dpm_print_status(void *handle) { + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + dev_info(adev->dev, "CIK DPM registers\n"); dev_info(adev->dev, " BIOS_SCRATCH_4=0x%08X\n", RREG32(mmBIOS_SCRATCH_4)); @@ -6535,7 +6550,7 @@ static void ci_dpm_print_status(struct amdgpu_device *adev) RREG32_SMC(ixSMC_PC_C)); } -static int ci_dpm_soft_reset(struct amdgpu_device *adev) +static int ci_dpm_soft_reset(void *handle) { return 0; } @@ -6618,19 +6633,19 @@ static int ci_dpm_process_interrupt(struct amdgpu_device *adev, return 0; } -static int ci_dpm_set_clockgating_state(struct amdgpu_device *adev, - enum amdgpu_clockgating_state state) +static int ci_dpm_set_clockgating_state(void *handle, + enum amd_clockgating_state state) { return 0; } -static int ci_dpm_set_powergating_state(struct amdgpu_device *adev, - enum amdgpu_powergating_state state) +static int ci_dpm_set_powergating_state(void *handle, + enum amd_powergating_state state) { return 0; } -const struct amdgpu_ip_funcs ci_dpm_ip_funcs = { +const struct amd_ip_funcs ci_dpm_ip_funcs = { .early_init = ci_dpm_early_init, .late_init = ci_dpm_late_init, .sw_init = ci_dpm_sw_init, diff --git a/drivers/gpu/drm/amd/amdgpu/cik.c b/drivers/gpu/drm/amd/amdgpu/cik.c index 74ce0be2fbb7..5dab578d6462 100644 --- a/drivers/gpu/drm/amd/amdgpu/cik.c +++ b/drivers/gpu/drm/amd/amdgpu/cik.c @@ -1892,63 +1892,63 @@ static const struct amdgpu_ip_block_version bonaire_ip_blocks[] = { /* ORDER MATTERS! */ { - .type = AMDGPU_IP_BLOCK_TYPE_COMMON, + .type = AMD_IP_BLOCK_TYPE_COMMON, .major = 1, .minor = 0, .rev = 0, .funcs = &cik_common_ip_funcs, }, { - .type = AMDGPU_IP_BLOCK_TYPE_GMC, + .type = AMD_IP_BLOCK_TYPE_GMC, .major = 7, .minor = 0, .rev = 0, .funcs = &gmc_v7_0_ip_funcs, }, { - .type = AMDGPU_IP_BLOCK_TYPE_IH, + .type = AMD_IP_BLOCK_TYPE_IH, .major = 2, .minor = 0, .rev = 0, .funcs = &cik_ih_ip_funcs, }, { - .type = AMDGPU_IP_BLOCK_TYPE_SMC, + .type = AMD_IP_BLOCK_TYPE_SMC, .major = 7, .minor = 0, .rev = 0, .funcs = &ci_dpm_ip_funcs, }, { - .type = AMDGPU_IP_BLOCK_TYPE_DCE, + .type = AMD_IP_BLOCK_TYPE_DCE, .major = 8, .minor = 2, .rev = 0, .funcs = &dce_v8_0_ip_funcs, }, { - .type = AMDGPU_IP_BLOCK_TYPE_GFX, + .type = AMD_IP_BLOCK_TYPE_GFX, .major = 7, .minor = 2, .rev = 0, .funcs = &gfx_v7_0_ip_funcs, }, { - .type = AMDGPU_IP_BLOCK_TYPE_SDMA, + .type = AMD_IP_BLOCK_TYPE_SDMA, .major = 2, .minor = 0, .rev = 0, .funcs = &cik_sdma_ip_funcs, }, { - .type = AMDGPU_IP_BLOCK_TYPE_UVD, + .type = AMD_IP_BLOCK_TYPE_UVD, .major = 4, .minor = 2, .rev = 0, .funcs = &uvd_v4_2_ip_funcs, }, { - .type = AMDGPU_IP_BLOCK_TYPE_VCE, + .type = AMD_IP_BLOCK_TYPE_VCE, .major = 2, .minor = 0, .rev = 0, @@ -1960,63 +1960,63 @@ static const struct amdgpu_ip_block_version hawaii_ip_blocks[] = { /* ORDER MATTERS! */ { - .type = AMDGPU_IP_BLOCK_TYPE_COMMON, + .type = AMD_IP_BLOCK_TYPE_COMMON, .major = 1, .minor = 0, .rev = 0, .funcs = &cik_common_ip_funcs, }, { - .type = AMDGPU_IP_BLOCK_TYPE_GMC, + .type = AMD_IP_BLOCK_TYPE_GMC, .major = 7, .minor = 0, .rev = 0, .funcs = &gmc_v7_0_ip_funcs, }, { - .type = AMDGPU_IP_BLOCK_TYPE_IH, + .type = AMD_IP_BLOCK_TYPE_IH, .major = 2, .minor = 0, .rev = 0, .funcs = &cik_ih_ip_funcs, }, { - .type = AMDGPU_IP_BLOCK_TYPE_SMC, + .type = AMD_IP_BLOCK_TYPE_SMC, .major = 7, .minor = 0, .rev = 0, .funcs = &ci_dpm_ip_funcs, }, { - .type = AMDGPU_IP_BLOCK_TYPE_DCE, + .type = AMD_IP_BLOCK_TYPE_DCE, .major = 8, .minor = 5, .rev = 0, .funcs = &dce_v8_0_ip_funcs, }, { - .type = AMDGPU_IP_BLOCK_TYPE_GFX, + .type = AMD_IP_BLOCK_TYPE_GFX, .major = 7, .minor = 3, .rev = 0, .funcs = &gfx_v7_0_ip_funcs, }, { - .type = AMDGPU_IP_BLOCK_TYPE_SDMA, + .type = AMD_IP_BLOCK_TYPE_SDMA, .major = 2, .minor = 0, .rev = 0, .funcs = &cik_sdma_ip_funcs, }, { - .type = AMDGPU_IP_BLOCK_TYPE_UVD, + .type = AMD_IP_BLOCK_TYPE_UVD, .major = 4, .minor = 2, .rev = 0, .funcs = &uvd_v4_2_ip_funcs, }, { - .type = AMDGPU_IP_BLOCK_TYPE_VCE, + .type = AMD_IP_BLOCK_TYPE_VCE, .major = 2, .minor = 0, .rev = 0, @@ -2028,63 +2028,63 @@ static const struct amdgpu_ip_block_version kabini_ip_blocks[] = { /* ORDER MATTERS! */ { - .type = AMDGPU_IP_BLOCK_TYPE_COMMON, + .type = AMD_IP_BLOCK_TYPE_COMMON, .major = 1, .minor = 0, .rev = 0, .funcs = &cik_common_ip_funcs, }, { - .type = AMDGPU_IP_BLOCK_TYPE_GMC, + .type = AMD_IP_BLOCK_TYPE_GMC, .major = 7, .minor = 0, .rev = 0, .funcs = &gmc_v7_0_ip_funcs, }, { - .type = AMDGPU_IP_BLOCK_TYPE_IH, + .type = AMD_IP_BLOCK_TYPE_IH, .major = 2, .minor = 0, .rev = 0, .funcs = &cik_ih_ip_funcs, }, { - .type = AMDGPU_IP_BLOCK_TYPE_SMC, + .type = AMD_IP_BLOCK_TYPE_SMC, .major = 7, .minor = 0, .rev = 0, .funcs = &kv_dpm_ip_funcs, }, { - .type = AMDGPU_IP_BLOCK_TYPE_DCE, + .type = AMD_IP_BLOCK_TYPE_DCE, .major = 8, .minor = 3, .rev = 0, .funcs = &dce_v8_0_ip_funcs, }, { - .type = AMDGPU_IP_BLOCK_TYPE_GFX, + .type = AMD_IP_BLOCK_TYPE_GFX, .major = 7, .minor = 2, .rev = 0, .funcs = &gfx_v7_0_ip_funcs, }, { - .type = AMDGPU_IP_BLOCK_TYPE_SDMA, + .type = AMD_IP_BLOCK_TYPE_SDMA, .major = 2, .minor = 0, .rev = 0, .funcs = &cik_sdma_ip_funcs, }, { - .type = AMDGPU_IP_BLOCK_TYPE_UVD, + .type = AMD_IP_BLOCK_TYPE_UVD, .major = 4, .minor = 2, .rev = 0, .funcs = &uvd_v4_2_ip_funcs, }, { - .type = AMDGPU_IP_BLOCK_TYPE_VCE, + .type = AMD_IP_BLOCK_TYPE_VCE, .major = 2, .minor = 0, .rev = 0, @@ -2096,63 +2096,63 @@ static const struct amdgpu_ip_block_version mullins_ip_blocks[] = { /* ORDER MATTERS! */ { - .type = AMDGPU_IP_BLOCK_TYPE_COMMON, + .type = AMD_IP_BLOCK_TYPE_COMMON, .major = 1, .minor = 0, .rev = 0, .funcs = &cik_common_ip_funcs, }, { - .type = AMDGPU_IP_BLOCK_TYPE_GMC, + .type = AMD_IP_BLOCK_TYPE_GMC, .major = 7, .minor = 0, .rev = 0, .funcs = &gmc_v7_0_ip_funcs, }, { - .type = AMDGPU_IP_BLOCK_TYPE_IH, + .type = AMD_IP_BLOCK_TYPE_IH, .major = 2, .minor = 0, .rev = 0, .funcs = &cik_ih_ip_funcs, }, { - .type = AMDGPU_IP_BLOCK_TYPE_SMC, + .type = AMD_IP_BLOCK_TYPE_SMC, .major = 7, .minor = 0, .rev = 0, .funcs = &kv_dpm_ip_funcs, }, { - .type = AMDGPU_IP_BLOCK_TYPE_DCE, + .type = AMD_IP_BLOCK_TYPE_DCE, .major = 8, .minor = 3, .rev = 0, .funcs = &dce_v8_0_ip_funcs, }, { - .type = AMDGPU_IP_BLOCK_TYPE_GFX, + .type = AMD_IP_BLOCK_TYPE_GFX, .major = 7, .minor = 2, .rev = 0, .funcs = &gfx_v7_0_ip_funcs, }, { - .type = AMDGPU_IP_BLOCK_TYPE_SDMA, + .type = AMD_IP_BLOCK_TYPE_SDMA, .major = 2, .minor = 0, .rev = 0, .funcs = &cik_sdma_ip_funcs, }, { - .type = AMDGPU_IP_BLOCK_TYPE_UVD, + .type = AMD_IP_BLOCK_TYPE_UVD, .major = 4, .minor = 2, .rev = 0, .funcs = &uvd_v4_2_ip_funcs, }, { - .type = AMDGPU_IP_BLOCK_TYPE_VCE, + .type = AMD_IP_BLOCK_TYPE_VCE, .major = 2, .minor = 0, .rev = 0, @@ -2164,63 +2164,63 @@ static const struct amdgpu_ip_block_version kaveri_ip_blocks[] = { /* ORDER MATTERS! */ { - .type = AMDGPU_IP_BLOCK_TYPE_COMMON, + .type = AMD_IP_BLOCK_TYPE_COMMON, .major = 1, .minor = 0, .rev = 0, .funcs = &cik_common_ip_funcs, }, { - .type = AMDGPU_IP_BLOCK_TYPE_GMC, + .type = AMD_IP_BLOCK_TYPE_GMC, .major = 7, .minor = 0, .rev = 0, .funcs = &gmc_v7_0_ip_funcs, }, { - .type = AMDGPU_IP_BLOCK_TYPE_IH, + .type = AMD_IP_BLOCK_TYPE_IH, .major = 2, .minor = 0, .rev = 0, .funcs = &cik_ih_ip_funcs, }, { - .type = AMDGPU_IP_BLOCK_TYPE_SMC, + .type = AMD_IP_BLOCK_TYPE_SMC, .major = 7, .minor = 0, .rev = 0, .funcs = &kv_dpm_ip_funcs, }, { - .type = AMDGPU_IP_BLOCK_TYPE_DCE, + .type = AMD_IP_BLOCK_TYPE_DCE, .major = 8, .minor = 1, .rev = 0, .funcs = &dce_v8_0_ip_funcs, }, { - .type = AMDGPU_IP_BLOCK_TYPE_GFX, + .type = AMD_IP_BLOCK_TYPE_GFX, .major = 7, .minor = 1, .rev = 0, .funcs = &gfx_v7_0_ip_funcs, }, { - .type = AMDGPU_IP_BLOCK_TYPE_SDMA, + .type = AMD_IP_BLOCK_TYPE_SDMA, .major = 2, .minor = 0, .rev = 0, .funcs = &cik_sdma_ip_funcs, }, { - .type = AMDGPU_IP_BLOCK_TYPE_UVD, + .type = AMD_IP_BLOCK_TYPE_UVD, .major = 4, .minor = 2, .rev = 0, .funcs = &uvd_v4_2_ip_funcs, }, { - .type = AMDGPU_IP_BLOCK_TYPE_VCE, + .type = AMD_IP_BLOCK_TYPE_VCE, .major = 2, .minor = 0, .rev = 0, @@ -2278,8 +2278,10 @@ static const struct amdgpu_asic_funcs cik_asic_funcs = .wait_for_mc_idle = &gmc_v7_0_mc_wait_for_idle, }; -static int cik_common_early_init(struct amdgpu_device *adev) +static int cik_common_early_init(void *handle) { + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + adev->smc_rreg = &cik_smc_rreg; adev->smc_wreg = &cik_smc_wreg; adev->pcie_rreg = &cik_pcie_rreg; @@ -2417,18 +2419,20 @@ static int cik_common_early_init(struct amdgpu_device *adev) return 0; } -static int cik_common_sw_init(struct amdgpu_device *adev) +static int cik_common_sw_init(void *handle) { return 0; } -static int cik_common_sw_fini(struct amdgpu_device *adev) +static int cik_common_sw_fini(void *handle) { return 0; } -static int cik_common_hw_init(struct amdgpu_device *adev) +static int cik_common_hw_init(void *handle) { + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + /* move the golden regs per IP block */ cik_init_golden_registers(adev); /* enable pcie gen2/3 link */ @@ -2439,55 +2443,59 @@ static int cik_common_hw_init(struct amdgpu_device *adev) return 0; } -static int cik_common_hw_fini(struct amdgpu_device *adev) +static int cik_common_hw_fini(void *handle) { return 0; } -static int cik_common_suspend(struct amdgpu_device *adev) +static int cik_common_suspend(void *handle) { + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + return cik_common_hw_fini(adev); } -static int cik_common_resume(struct amdgpu_device *adev) +static int cik_common_resume(void *handle) { + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + return cik_common_hw_init(adev); } -static bool cik_common_is_idle(struct amdgpu_device *adev) +static bool cik_common_is_idle(void *handle) { return true; } -static int cik_common_wait_for_idle(struct amdgpu_device *adev) +static int cik_common_wait_for_idle(void *handle) { return 0; } -static void cik_common_print_status(struct amdgpu_device *adev) +static void cik_common_print_status(void *handle) { } -static int cik_common_soft_reset(struct amdgpu_device *adev) +static int cik_common_soft_reset(void *handle) { /* XXX hard reset?? */ return 0; } -static int cik_common_set_clockgating_state(struct amdgpu_device *adev, - enum amdgpu_clockgating_state state) +static int cik_common_set_clockgating_state(void *handle, + enum amd_clockgating_state state) { return 0; } -static int cik_common_set_powergating_state(struct amdgpu_device *adev, - enum amdgpu_powergating_state state) +static int cik_common_set_powergating_state(void *handle, + enum amd_powergating_state state) { return 0; } -const struct amdgpu_ip_funcs cik_common_ip_funcs = { +const struct amd_ip_funcs cik_common_ip_funcs = { .early_init = cik_common_early_init, .late_init = NULL, .sw_init = cik_common_sw_init, diff --git a/drivers/gpu/drm/amd/amdgpu/cik.h b/drivers/gpu/drm/amd/amdgpu/cik.h index 967d630a4dcb..5ebd2d7a0327 100644 --- a/drivers/gpu/drm/amd/amdgpu/cik.h +++ b/drivers/gpu/drm/amd/amdgpu/cik.h @@ -24,7 +24,7 @@ #ifndef __CIK_H__ #define __CIK_H__ -extern const struct amdgpu_ip_funcs cik_common_ip_funcs; +extern const struct amd_ip_funcs cik_common_ip_funcs; void cik_srbm_select(struct amdgpu_device *adev, u32 me, u32 pipe, u32 queue, u32 vmid); diff --git a/drivers/gpu/drm/amd/amdgpu/cik_dpm.h b/drivers/gpu/drm/amd/amdgpu/cik_dpm.h index 35d8efdcde7e..b1c8e7b446ea 100644 --- a/drivers/gpu/drm/amd/amdgpu/cik_dpm.h +++ b/drivers/gpu/drm/amd/amdgpu/cik_dpm.h @@ -24,7 +24,7 @@ #ifndef __CIK_DPM_H__ #define __CIK_DPM_H__ -extern const struct amdgpu_ip_funcs ci_dpm_ip_funcs; -extern const struct amdgpu_ip_funcs kv_dpm_ip_funcs; +extern const struct amd_ip_funcs ci_dpm_ip_funcs; +extern const struct amd_ip_funcs kv_dpm_ip_funcs; #endif diff --git a/drivers/gpu/drm/amd/amdgpu/cik_ih.c b/drivers/gpu/drm/amd/amdgpu/cik_ih.c index 81e8bbaba3e8..8993c50cb89f 100644 --- a/drivers/gpu/drm/amd/amdgpu/cik_ih.c +++ b/drivers/gpu/drm/amd/amdgpu/cik_ih.c @@ -271,16 +271,19 @@ static void cik_ih_set_rptr(struct amdgpu_device *adev) WREG32(mmIH_RB_RPTR, adev->irq.ih.rptr); } -static int cik_ih_early_init(struct amdgpu_device *adev) +static int cik_ih_early_init(void *handle) { + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + cik_ih_set_interrupt_funcs(adev); return 0; } -static int cik_ih_sw_init(struct amdgpu_device *adev) +static int cik_ih_sw_init(void *handle) { int r; + struct amdgpu_device *adev = (struct amdgpu_device *)handle; r = amdgpu_ih_ring_init(adev, 64 * 1024, false); if (r) @@ -291,17 +294,20 @@ static int cik_ih_sw_init(struct amdgpu_device *adev) return r; } -static int cik_ih_sw_fini(struct amdgpu_device *adev) +static int cik_ih_sw_fini(void *handle) { + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + amdgpu_irq_fini(adev); amdgpu_ih_ring_fini(adev); return 0; } -static int cik_ih_hw_init(struct amdgpu_device *adev) +static int cik_ih_hw_init(void *handle) { int r; + struct amdgpu_device *adev = (struct amdgpu_device *)handle; r = cik_ih_irq_init(adev); if (r) @@ -310,25 +316,32 @@ static int cik_ih_hw_init(struct amdgpu_device *adev) return 0; } -static int cik_ih_hw_fini(struct amdgpu_device *adev) +static int cik_ih_hw_fini(void *handle) { + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + cik_ih_irq_disable(adev); return 0; } -static int cik_ih_suspend(struct amdgpu_device *adev) +static int cik_ih_suspend(void *handle) { + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + return cik_ih_hw_fini(adev); } -static int cik_ih_resume(struct amdgpu_device *adev) +static int cik_ih_resume(void *handle) { + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + return cik_ih_hw_init(adev); } -static bool cik_ih_is_idle(struct amdgpu_device *adev) +static bool cik_ih_is_idle(void *handle) { + struct amdgpu_device *adev = (struct amdgpu_device *)handle; u32 tmp = RREG32(mmSRBM_STATUS); if (tmp & SRBM_STATUS__IH_BUSY_MASK) @@ -337,10 +350,11 @@ static bool cik_ih_is_idle(struct amdgpu_device *adev) return true; } -static int cik_ih_wait_for_idle(struct amdgpu_device *adev) +static int cik_ih_wait_for_idle(void *handle) { unsigned i; u32 tmp; + struct amdgpu_device *adev = (struct amdgpu_device *)handle; for (i = 0; i < adev->usec_timeout; i++) { /* read MC_STATUS */ @@ -352,8 +366,10 @@ static int cik_ih_wait_for_idle(struct amdgpu_device *adev) return -ETIMEDOUT; } -static void cik_ih_print_status(struct amdgpu_device *adev) +static void cik_ih_print_status(void *handle) { + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + dev_info(adev->dev, "CIK IH registers\n"); dev_info(adev->dev, " SRBM_STATUS=0x%08X\n", RREG32(mmSRBM_STATUS)); @@ -379,8 +395,10 @@ static void cik_ih_print_status(struct amdgpu_device *adev) RREG32(mmIH_RB_WPTR)); } -static int cik_ih_soft_reset(struct amdgpu_device *adev) +static int cik_ih_soft_reset(void *handle) { + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + u32 srbm_soft_reset = 0; u32 tmp = RREG32(mmSRBM_STATUS); @@ -388,7 +406,7 @@ static int cik_ih_soft_reset(struct amdgpu_device *adev) srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_IH_MASK; if (srbm_soft_reset) { - cik_ih_print_status(adev); + cik_ih_print_status((void *)adev); tmp = RREG32(mmSRBM_SOFT_RESET); tmp |= srbm_soft_reset; @@ -405,25 +423,25 @@ static int cik_ih_soft_reset(struct amdgpu_device *adev) /* Wait a little for things to settle down */ udelay(50); - cik_ih_print_status(adev); + cik_ih_print_status((void *)adev); } return 0; } -static int cik_ih_set_clockgating_state(struct amdgpu_device *adev, - enum amdgpu_clockgating_state state) +static int cik_ih_set_clockgating_state(void *handle, + enum amd_clockgating_state state) { return 0; } -static int cik_ih_set_powergating_state(struct amdgpu_device *adev, - enum amdgpu_powergating_state state) +static int cik_ih_set_powergating_state(void *handle, + enum amd_powergating_state state) { return 0; } -const struct amdgpu_ip_funcs cik_ih_ip_funcs = { +const struct amd_ip_funcs cik_ih_ip_funcs = { .early_init = cik_ih_early_init, .late_init = NULL, .sw_init = cik_ih_sw_init, diff --git a/drivers/gpu/drm/amd/amdgpu/cik_ih.h b/drivers/gpu/drm/amd/amdgpu/cik_ih.h index f70162525034..6b0f375ec244 100644 --- a/drivers/gpu/drm/amd/amdgpu/cik_ih.h +++ b/drivers/gpu/drm/amd/amdgpu/cik_ih.h @@ -24,6 +24,6 @@ #ifndef __CIK_IH_H__ #define __CIK_IH_H__ -extern const struct amdgpu_ip_funcs cik_ih_ip_funcs; +extern const struct amd_ip_funcs cik_ih_ip_funcs; #endif diff --git a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c index d5055ea4d112..7c816b5cf17a 100644 --- a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c +++ b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c @@ -927,8 +927,10 @@ static void cik_enable_sdma_mgls(struct amdgpu_device *adev, } } -static int cik_sdma_early_init(struct amdgpu_device *adev) +static int cik_sdma_early_init(void *handle) { + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + cik_sdma_set_ring_funcs(adev); cik_sdma_set_irq_funcs(adev); cik_sdma_set_buffer_funcs(adev); @@ -937,9 +939,10 @@ static int cik_sdma_early_init(struct amdgpu_device *adev) return 0; } -static int cik_sdma_sw_init(struct amdgpu_device *adev) +static int cik_sdma_sw_init(void *handle) { struct amdgpu_ring *ring; + struct amdgpu_device *adev = (struct amdgpu_device *)handle; int r; r = cik_sdma_init_microcode(adev); @@ -990,17 +993,20 @@ static int cik_sdma_sw_init(struct amdgpu_device *adev) return r; } -static int cik_sdma_sw_fini(struct amdgpu_device *adev) +static int cik_sdma_sw_fini(void *handle) { + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + amdgpu_ring_fini(&adev->sdma[0].ring); amdgpu_ring_fini(&adev->sdma[1].ring); return 0; } -static int cik_sdma_hw_init(struct amdgpu_device *adev) +static int cik_sdma_hw_init(void *handle) { int r; + struct amdgpu_device *adev = (struct amdgpu_device *)handle; r = cik_sdma_start(adev); if (r) @@ -1009,27 +1015,32 @@ static int cik_sdma_hw_init(struct amdgpu_device *adev) return r; } -static int cik_sdma_hw_fini(struct amdgpu_device *adev) +static int cik_sdma_hw_fini(void *handle) { + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + cik_sdma_enable(adev, false); return 0; } -static int cik_sdma_suspend(struct amdgpu_device *adev) +static int cik_sdma_suspend(void *handle) { + struct amdgpu_device *adev = (struct amdgpu_device *)handle; return cik_sdma_hw_fini(adev); } -static int cik_sdma_resume(struct amdgpu_device *adev) +static int cik_sdma_resume(void *handle) { + struct amdgpu_device *adev = (struct amdgpu_device *)handle; return cik_sdma_hw_init(adev); } -static bool cik_sdma_is_idle(struct amdgpu_device *adev) +static bool cik_sdma_is_idle(void *handle) { + struct amdgpu_device *adev = (struct amdgpu_device *)handle; u32 tmp = RREG32(mmSRBM_STATUS2); if (tmp & (SRBM_STATUS2__SDMA_BUSY_MASK | @@ -1039,10 +1050,11 @@ static bool cik_sdma_is_idle(struct amdgpu_device *adev) return true; } -static int cik_sdma_wait_for_idle(struct amdgpu_device *adev) +static int cik_sdma_wait_for_idle(void *handle) { unsigned i; u32 tmp; + struct amdgpu_device *adev = (struct amdgpu_device *)handle; for (i = 0; i < adev->usec_timeout; i++) { tmp = RREG32(mmSRBM_STATUS2) & (SRBM_STATUS2__SDMA_BUSY_MASK | @@ -1055,9 +1067,10 @@ static int cik_sdma_wait_for_idle(struct amdgpu_device *adev) return -ETIMEDOUT; } -static void cik_sdma_print_status(struct amdgpu_device *adev) +static void cik_sdma_print_status(void *handle) { int i, j; + struct amdgpu_device *adev = (struct amdgpu_device *)handle; dev_info(adev->dev, "CIK SDMA registers\n"); dev_info(adev->dev, " SRBM_STATUS2=0x%08X\n", @@ -1103,9 +1116,10 @@ static void cik_sdma_print_status(struct amdgpu_device *adev) } } -static int cik_sdma_soft_reset(struct amdgpu_device *adev) +static int cik_sdma_soft_reset(void *handle) { u32 srbm_soft_reset = 0; + struct amdgpu_device *adev = (struct amdgpu_device *)handle; u32 tmp = RREG32(mmSRBM_STATUS2); if (tmp & SRBM_STATUS2__SDMA_BUSY_MASK) { @@ -1124,7 +1138,7 @@ static int cik_sdma_soft_reset(struct amdgpu_device *adev) } if (srbm_soft_reset) { - cik_sdma_print_status(adev); + cik_sdma_print_status((void *)adev); tmp = RREG32(mmSRBM_SOFT_RESET); tmp |= srbm_soft_reset; @@ -1141,7 +1155,7 @@ static int cik_sdma_soft_reset(struct amdgpu_device *adev) /* Wait a little for things to settle down */ udelay(50); - cik_sdma_print_status(adev); + cik_sdma_print_status((void *)adev); } return 0; @@ -1243,12 +1257,13 @@ static int cik_sdma_process_illegal_inst_irq(struct amdgpu_device *adev, return 0; } -static int cik_sdma_set_clockgating_state(struct amdgpu_device *adev, - enum amdgpu_clockgating_state state) +static int cik_sdma_set_clockgating_state(void *handle, + enum amd_clockgating_state state) { bool gate = false; + struct amdgpu_device *adev = (struct amdgpu_device *)handle; - if (state == AMDGPU_CG_STATE_GATE) + if (state == AMD_CG_STATE_GATE) gate = true; cik_enable_sdma_mgcg(adev, gate); @@ -1257,13 +1272,13 @@ static int cik_sdma_set_clockgating_state(struct amdgpu_device *adev, return 0; } -static int cik_sdma_set_powergating_state(struct amdgpu_device *adev, - enum amdgpu_powergating_state state) +static int cik_sdma_set_powergating_state(void *handle, + enum amd_powergating_state state) { return 0; } -const struct amdgpu_ip_funcs cik_sdma_ip_funcs = { +const struct amd_ip_funcs cik_sdma_ip_funcs = { .early_init = cik_sdma_early_init, .late_init = NULL, .sw_init = cik_sdma_sw_init, diff --git a/drivers/gpu/drm/amd/amdgpu/cik_sdma.h b/drivers/gpu/drm/amd/amdgpu/cik_sdma.h index 42b59960bc53..027727c677b8 100644 --- a/drivers/gpu/drm/amd/amdgpu/cik_sdma.h +++ b/drivers/gpu/drm/amd/amdgpu/cik_sdma.h @@ -24,6 +24,6 @@ #ifndef __CIK_SDMA_H__ #define __CIK_SDMA_H__ -extern const struct amdgpu_ip_funcs cik_sdma_ip_funcs; +extern const struct amd_ip_funcs cik_sdma_ip_funcs; #endif diff --git a/drivers/gpu/drm/amd/amdgpu/cz_dpm.c b/drivers/gpu/drm/amd/amdgpu/cz_dpm.c index 2649b505d2d0..f275b5d2d060 100644 --- a/drivers/gpu/drm/amd/amdgpu/cz_dpm.c +++ b/drivers/gpu/drm/amd/amdgpu/cz_dpm.c @@ -542,24 +542,29 @@ static void cz_dpm_print_power_state(struct amdgpu_device *adev, static void cz_dpm_set_funcs(struct amdgpu_device *adev); -static int cz_dpm_early_init(struct amdgpu_device *adev) +static int cz_dpm_early_init(void *handle) { + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + cz_dpm_set_funcs(adev); return 0; } -static int cz_dpm_late_init(struct amdgpu_device *adev) +static int cz_dpm_late_init(void *handle) { + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + /* powerdown unused blocks for now */ cz_dpm_powergate_uvd(adev, true); return 0; } -static int cz_dpm_sw_init(struct amdgpu_device *adev) +static int cz_dpm_sw_init(void *handle) { + struct amdgpu_device *adev = (struct amdgpu_device *)handle; int ret = 0; /* fix me to add thermal support TODO */ @@ -602,8 +607,10 @@ dpm_init_failed: return ret; } -static int cz_dpm_sw_fini(struct amdgpu_device *adev) +static int cz_dpm_sw_fini(void *handle) { + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + mutex_lock(&adev->pm.mutex); amdgpu_pm_sysfs_fini(adev); cz_dpm_fini(adev); @@ -1216,8 +1223,9 @@ static int cz_dpm_enable(struct amdgpu_device *adev) return 0; } -static int cz_dpm_hw_init(struct amdgpu_device *adev) +static int cz_dpm_hw_init(void *handle) { + struct amdgpu_device *adev = (struct amdgpu_device *)handle; int ret = 0; mutex_lock(&adev->pm.mutex); @@ -1282,9 +1290,10 @@ static int cz_dpm_disable(struct amdgpu_device *adev) return 0; } -static int cz_dpm_hw_fini(struct amdgpu_device *adev) +static int cz_dpm_hw_fini(void *handle) { int ret = 0; + struct amdgpu_device *adev = (struct amdgpu_device *)handle; mutex_lock(&adev->pm.mutex); @@ -1305,9 +1314,10 @@ static int cz_dpm_hw_fini(struct amdgpu_device *adev) return ret; } -static int cz_dpm_suspend(struct amdgpu_device *adev) +static int cz_dpm_suspend(void *handle) { int ret = 0; + struct amdgpu_device *adev = (struct amdgpu_device *)handle; if (adev->pm.dpm_enabled) { mutex_lock(&adev->pm.mutex); @@ -1324,9 +1334,10 @@ static int cz_dpm_suspend(struct amdgpu_device *adev) return ret; } -static int cz_dpm_resume(struct amdgpu_device *adev) +static int cz_dpm_resume(void *handle) { int ret = 0; + struct amdgpu_device *adev = (struct amdgpu_device *)handle; mutex_lock(&adev->pm.mutex); ret = cz_smu_init(adev); @@ -1368,14 +1379,14 @@ static int cz_dpm_resume(struct amdgpu_device *adev) return 0; } -static int cz_dpm_set_clockgating_state(struct amdgpu_device *adev, - enum amdgpu_clockgating_state state) +static int cz_dpm_set_clockgating_state(void *handle, + enum amd_clockgating_state state) { return 0; } -static int cz_dpm_set_powergating_state(struct amdgpu_device *adev, - enum amdgpu_powergating_state state) +static int cz_dpm_set_powergating_state(void *handle, + enum amd_powergating_state state) { return 0; } @@ -1733,11 +1744,11 @@ static void cz_dpm_powergate_uvd(struct amdgpu_device *adev, bool gate) if (gate) { if (pi->caps_uvd_pg) { /* disable clockgating so we can properly shut down the block */ - ret = amdgpu_set_clockgating_state(adev, AMDGPU_IP_BLOCK_TYPE_UVD, - AMDGPU_CG_STATE_UNGATE); + ret = amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_UVD, + AMD_CG_STATE_UNGATE); /* shutdown the UVD block */ - ret = amdgpu_set_powergating_state(adev, AMDGPU_IP_BLOCK_TYPE_UVD, - AMDGPU_PG_STATE_GATE); + ret = amdgpu_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_UVD, + AMD_PG_STATE_GATE); /* XXX: check for errors */ } cz_update_uvd_dpm(adev, gate); @@ -1752,18 +1763,18 @@ static void cz_dpm_powergate_uvd(struct amdgpu_device *adev, bool gate) else cz_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_UVDPowerON, 0); /* re-init the UVD block */ - ret = amdgpu_set_powergating_state(adev, AMDGPU_IP_BLOCK_TYPE_UVD, - AMDGPU_PG_STATE_UNGATE); + ret = amdgpu_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_UVD, + AMD_PG_STATE_UNGATE); /* enable clockgating. hw will dynamically gate/ungate clocks on the fly */ - ret = amdgpu_set_clockgating_state(adev, AMDGPU_IP_BLOCK_TYPE_UVD, - AMDGPU_CG_STATE_GATE); + ret = amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_UVD, + AMD_CG_STATE_GATE); /* XXX: check for errors */ } cz_update_uvd_dpm(adev, gate); } } -const struct amdgpu_ip_funcs cz_dpm_ip_funcs = { +const struct amd_ip_funcs cz_dpm_ip_funcs = { .early_init = cz_dpm_early_init, .late_init = cz_dpm_late_init, .sw_init = cz_dpm_sw_init, diff --git a/drivers/gpu/drm/amd/amdgpu/cz_ih.c b/drivers/gpu/drm/amd/amdgpu/cz_ih.c index 80d508e64a86..bc751bfbcae2 100644 --- a/drivers/gpu/drm/amd/amdgpu/cz_ih.c +++ b/drivers/gpu/drm/amd/amdgpu/cz_ih.c @@ -250,15 +250,18 @@ static void cz_ih_set_rptr(struct amdgpu_device *adev) WREG32(mmIH_RB_RPTR, adev->irq.ih.rptr); } -static int cz_ih_early_init(struct amdgpu_device *adev) +static int cz_ih_early_init(void *handle) { + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + cz_ih_set_interrupt_funcs(adev); return 0; } -static int cz_ih_sw_init(struct amdgpu_device *adev) +static int cz_ih_sw_init(void *handle) { int r; + struct amdgpu_device *adev = (struct amdgpu_device *)handle; r = amdgpu_ih_ring_init(adev, 64 * 1024, false); if (r) @@ -269,17 +272,20 @@ static int cz_ih_sw_init(struct amdgpu_device *adev) return r; } -static int cz_ih_sw_fini(struct amdgpu_device *adev) +static int cz_ih_sw_fini(void *handle) { + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + amdgpu_irq_fini(adev); amdgpu_ih_ring_fini(adev); return 0; } -static int cz_ih_hw_init(struct amdgpu_device *adev) +static int cz_ih_hw_init(void *handle) { int r; + struct amdgpu_device *adev = (struct amdgpu_device *)handle; r = cz_ih_irq_init(adev); if (r) @@ -288,25 +294,32 @@ static int cz_ih_hw_init(struct amdgpu_device *adev) return 0; } -static int cz_ih_hw_fini(struct amdgpu_device *adev) +static int cz_ih_hw_fini(void *handle) { + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + cz_ih_irq_disable(adev); return 0; } -static int cz_ih_suspend(struct amdgpu_device *adev) +static int cz_ih_suspend(void *handle) { + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + return cz_ih_hw_fini(adev); } -static int cz_ih_resume(struct amdgpu_device *adev) +static int cz_ih_resume(void *handle) { + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + return cz_ih_hw_init(adev); } -static bool cz_ih_is_idle(struct amdgpu_device *adev) +static bool cz_ih_is_idle(void *handle) { + struct amdgpu_device *adev = (struct amdgpu_device *)handle; u32 tmp = RREG32(mmSRBM_STATUS); if (REG_GET_FIELD(tmp, SRBM_STATUS, IH_BUSY)) @@ -315,10 +328,11 @@ static bool cz_ih_is_idle(struct amdgpu_device *adev) return true; } -static int cz_ih_wait_for_idle(struct amdgpu_device *adev) +static int cz_ih_wait_for_idle(void *handle) { unsigned i; u32 tmp; + struct amdgpu_device *adev = (struct amdgpu_device *)handle; for (i = 0; i < adev->usec_timeout; i++) { /* read MC_STATUS */ @@ -330,8 +344,10 @@ static int cz_ih_wait_for_idle(struct amdgpu_device *adev) return -ETIMEDOUT; } -static void cz_ih_print_status(struct amdgpu_device *adev) +static void cz_ih_print_status(void *handle) { + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + dev_info(adev->dev, "CZ IH registers\n"); dev_info(adev->dev, " SRBM_STATUS=0x%08X\n", RREG32(mmSRBM_STATUS)); @@ -357,9 +373,10 @@ static void cz_ih_print_status(struct amdgpu_device *adev) RREG32(mmIH_RB_WPTR)); } -static int cz_ih_soft_reset(struct amdgpu_device *adev) +static int cz_ih_soft_reset(void *handle) { u32 srbm_soft_reset = 0; + struct amdgpu_device *adev = (struct amdgpu_device *)handle; u32 tmp = RREG32(mmSRBM_STATUS); if (tmp & SRBM_STATUS__IH_BUSY_MASK) @@ -367,7 +384,7 @@ static int cz_ih_soft_reset(struct amdgpu_device *adev) SOFT_RESET_IH, 1); if (srbm_soft_reset) { - cz_ih_print_status(adev); + cz_ih_print_status((void *)adev); tmp = RREG32(mmSRBM_SOFT_RESET); tmp |= srbm_soft_reset; @@ -384,27 +401,27 @@ static int cz_ih_soft_reset(struct amdgpu_device *adev) /* Wait a little for things to settle down */ udelay(50); - cz_ih_print_status(adev); + cz_ih_print_status((void *)adev); } return 0; } -static int cz_ih_set_clockgating_state(struct amdgpu_device *adev, - enum amdgpu_clockgating_state state) +static int cz_ih_set_clockgating_state(void *handle, + enum amd_clockgating_state state) { // TODO return 0; } -static int cz_ih_set_powergating_state(struct amdgpu_device *adev, - enum amdgpu_powergating_state state) +static int cz_ih_set_powergating_state(void *handle, + enum amd_powergating_state state) { // TODO return 0; } -const struct amdgpu_ip_funcs cz_ih_ip_funcs = { +const struct amd_ip_funcs cz_ih_ip_funcs = { .early_init = cz_ih_early_init, .late_init = NULL, .sw_init = cz_ih_sw_init, diff --git a/drivers/gpu/drm/amd/amdgpu/cz_ih.h b/drivers/gpu/drm/amd/amdgpu/cz_ih.h index 1bce136876ff..fc4057a2ecb9 100644 --- a/drivers/gpu/drm/amd/amdgpu/cz_ih.h +++ b/drivers/gpu/drm/amd/amdgpu/cz_ih.h @@ -24,6 +24,6 @@ #ifndef __CZ_IH_H__ #define __CZ_IH_H__ -extern const struct amdgpu_ip_funcs cz_ih_ip_funcs; +extern const struct amd_ip_funcs cz_ih_ip_funcs; #endif /* __CZ_IH_H__ */ diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c index 37b96236fe2c..da9a4b9a1f6c 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c @@ -2864,8 +2864,10 @@ static int dce_v10_0_crtc_init(struct amdgpu_device *adev, int index) return 0; } -static int dce_v10_0_early_init(struct amdgpu_device *adev) +static int dce_v10_0_early_init(void *handle) { + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + adev->audio_endpt_rreg = &dce_v10_0_audio_endpt_rreg; adev->audio_endpt_wreg = &dce_v10_0_audio_endpt_wreg; @@ -2886,9 +2888,10 @@ static int dce_v10_0_early_init(struct amdgpu_device *adev) return 0; } -static int dce_v10_0_sw_init(struct amdgpu_device *adev) +static int dce_v10_0_sw_init(void *handle) { int r, i; + struct amdgpu_device *adev = (struct amdgpu_device *)handle; for (i = 0; i < adev->mode_info.num_crtc; i++) { r = amdgpu_irq_add_id(adev, i + 1, &adev->crtc_irq); @@ -2950,8 +2953,10 @@ static int dce_v10_0_sw_init(struct amdgpu_device *adev) return r; } -static int dce_v10_0_sw_fini(struct amdgpu_device *adev) +static int dce_v10_0_sw_fini(void *handle) { + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + kfree(adev->mode_info.bios_hardcoded_edid); drm_kms_helper_poll_fini(adev->ddev); @@ -2966,9 +2971,10 @@ static int dce_v10_0_sw_fini(struct amdgpu_device *adev) return 0; } -static int dce_v10_0_hw_init(struct amdgpu_device *adev) +static int dce_v10_0_hw_init(void *handle) { int i; + struct amdgpu_device *adev = (struct amdgpu_device *)handle; dce_v10_0_init_golden_registers(adev); @@ -2986,9 +2992,10 @@ static int dce_v10_0_hw_init(struct amdgpu_device *adev) return 0; } -static int dce_v10_0_hw_fini(struct amdgpu_device *adev) +static int dce_v10_0_hw_fini(void *handle) { int i; + struct amdgpu_device *adev = (struct amdgpu_device *)handle; dce_v10_0_hpd_fini(adev); @@ -2999,9 +3006,10 @@ static int dce_v10_0_hw_fini(struct amdgpu_device *adev) return 0; } -static int dce_v10_0_suspend(struct amdgpu_device *adev) +static int dce_v10_0_suspend(void *handle) { struct drm_connector *connector; + struct amdgpu_device *adev = (struct amdgpu_device *)handle; drm_kms_helper_poll_disable(adev->ddev); @@ -3017,9 +3025,10 @@ static int dce_v10_0_suspend(struct amdgpu_device *adev) return 0; } -static int dce_v10_0_resume(struct amdgpu_device *adev) +static int dce_v10_0_resume(void *handle) { struct drm_connector *connector; + struct amdgpu_device *adev = (struct amdgpu_device *)handle; dce_v10_0_init_golden_registers(adev); @@ -3051,33 +3060,34 @@ static int dce_v10_0_resume(struct amdgpu_device *adev) return 0; } -static bool dce_v10_0_is_idle(struct amdgpu_device *adev) +static bool dce_v10_0_is_idle(void *handle) { - /* XXX todo */ return true; } -static int dce_v10_0_wait_for_idle(struct amdgpu_device *adev) +static int dce_v10_0_wait_for_idle(void *handle) { - /* XXX todo */ return 0; } -static void dce_v10_0_print_status(struct amdgpu_device *adev) +static void dce_v10_0_print_status(void *handle) { + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + dev_info(adev->dev, "DCE 10.x registers\n"); /* XXX todo */ } -static int dce_v10_0_soft_reset(struct amdgpu_device *adev) +static int dce_v10_0_soft_reset(void *handle) { u32 srbm_soft_reset = 0, tmp; + struct amdgpu_device *adev = (struct amdgpu_device *)handle; if (dce_v10_0_is_display_hung(adev)) srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_DC_MASK; if (srbm_soft_reset) { - dce_v10_0_print_status(adev); + dce_v10_0_print_status((void *)adev); tmp = RREG32(mmSRBM_SOFT_RESET); tmp |= srbm_soft_reset; @@ -3093,7 +3103,7 @@ static int dce_v10_0_soft_reset(struct amdgpu_device *adev) /* Wait a little for things to settle down */ udelay(50); - dce_v10_0_print_status(adev); + dce_v10_0_print_status((void *)adev); } return 0; } @@ -3449,19 +3459,19 @@ static int dce_v10_0_hpd_irq(struct amdgpu_device *adev, return 0; } -static int dce_v10_0_set_clockgating_state(struct amdgpu_device *adev, - enum amdgpu_clockgating_state state) +static int dce_v10_0_set_clockgating_state(void *handle, + enum amd_clockgating_state state) { return 0; } -static int dce_v10_0_set_powergating_state(struct amdgpu_device *adev, - enum amdgpu_powergating_state state) +static int dce_v10_0_set_powergating_state(void *handle, + enum amd_powergating_state state) { return 0; } -const struct amdgpu_ip_funcs dce_v10_0_ip_funcs = { +const struct amd_ip_funcs dce_v10_0_ip_funcs = { .early_init = dce_v10_0_early_init, .late_init = NULL, .sw_init = dce_v10_0_sw_init, diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.h b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.h index 72ca20d1793c..1bfa48ddd8a6 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.h +++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.h @@ -24,6 +24,6 @@ #ifndef __DCE_V10_0_H__ #define __DCE_V10_0_H__ -extern const struct amdgpu_ip_funcs dce_v10_0_ip_funcs; +extern const struct amd_ip_funcs dce_v10_0_ip_funcs; #endif diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c index 04a5d4cd75b6..edd9d17ba82a 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c @@ -2863,8 +2863,10 @@ static int dce_v11_0_crtc_init(struct amdgpu_device *adev, int index) return 0; } -static int dce_v11_0_early_init(struct amdgpu_device *adev) +static int dce_v11_0_early_init(void *handle) { + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + adev->audio_endpt_rreg = &dce_v11_0_audio_endpt_rreg; adev->audio_endpt_wreg = &dce_v11_0_audio_endpt_wreg; @@ -2885,9 +2887,10 @@ static int dce_v11_0_early_init(struct amdgpu_device *adev) return 0; } -static int dce_v11_0_sw_init(struct amdgpu_device *adev) +static int dce_v11_0_sw_init(void *handle) { int r, i; + struct amdgpu_device *adev = (struct amdgpu_device *)handle; for (i = 0; i < adev->mode_info.num_crtc; i++) { r = amdgpu_irq_add_id(adev, i + 1, &adev->crtc_irq); @@ -2949,8 +2952,10 @@ static int dce_v11_0_sw_init(struct amdgpu_device *adev) return r; } -static int dce_v11_0_sw_fini(struct amdgpu_device *adev) +static int dce_v11_0_sw_fini(void *handle) { + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + kfree(adev->mode_info.bios_hardcoded_edid); drm_kms_helper_poll_fini(adev->ddev); @@ -2964,9 +2969,10 @@ static int dce_v11_0_sw_fini(struct amdgpu_device *adev) return 0; } -static int dce_v11_0_hw_init(struct amdgpu_device *adev) +static int dce_v11_0_hw_init(void *handle) { int i; + struct amdgpu_device *adev = (struct amdgpu_device *)handle; dce_v11_0_init_golden_registers(adev); @@ -2984,9 +2990,10 @@ static int dce_v11_0_hw_init(struct amdgpu_device *adev) return 0; } -static int dce_v11_0_hw_fini(struct amdgpu_device *adev) +static int dce_v11_0_hw_fini(void *handle) { int i; + struct amdgpu_device *adev = (struct amdgpu_device *)handle; dce_v11_0_hpd_fini(adev); @@ -2997,9 +3004,10 @@ static int dce_v11_0_hw_fini(struct amdgpu_device *adev) return 0; } -static int dce_v11_0_suspend(struct amdgpu_device *adev) +static int dce_v11_0_suspend(void *handle) { struct drm_connector *connector; + struct amdgpu_device *adev = (struct amdgpu_device *)handle; drm_kms_helper_poll_disable(adev->ddev); @@ -3015,9 +3023,10 @@ static int dce_v11_0_suspend(struct amdgpu_device *adev) return 0; } -static int dce_v11_0_resume(struct amdgpu_device *adev) +static int dce_v11_0_resume(void *handle) { struct drm_connector *connector; + struct amdgpu_device *adev = (struct amdgpu_device *)handle; dce_v11_0_init_golden_registers(adev); @@ -3050,33 +3059,34 @@ static int dce_v11_0_resume(struct amdgpu_device *adev) return 0; } -static bool dce_v11_0_is_idle(struct amdgpu_device *adev) +static bool dce_v11_0_is_idle(void *handle) { - /* XXX todo */ return true; } -static int dce_v11_0_wait_for_idle(struct amdgpu_device *adev) +static int dce_v11_0_wait_for_idle(void *handle) { - /* XXX todo */ return 0; } -static void dce_v11_0_print_status(struct amdgpu_device *adev) +static void dce_v11_0_print_status(void *handle) { + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + dev_info(adev->dev, "DCE 10.x registers\n"); /* XXX todo */ } -static int dce_v11_0_soft_reset(struct amdgpu_device *adev) +static int dce_v11_0_soft_reset(void *handle) { u32 srbm_soft_reset = 0, tmp; + struct amdgpu_device *adev = (struct amdgpu_device *)handle; if (dce_v11_0_is_display_hung(adev)) srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_DC_MASK; if (srbm_soft_reset) { - dce_v11_0_print_status(adev); + dce_v11_0_print_status((void *)adev); tmp = RREG32(mmSRBM_SOFT_RESET); tmp |= srbm_soft_reset; @@ -3092,7 +3102,7 @@ static int dce_v11_0_soft_reset(struct amdgpu_device *adev) /* Wait a little for things to settle down */ udelay(50); - dce_v11_0_print_status(adev); + dce_v11_0_print_status((void *)adev); } return 0; } @@ -3448,19 +3458,19 @@ static int dce_v11_0_hpd_irq(struct amdgpu_device *adev, return 0; } -static int dce_v11_0_set_clockgating_state(struct amdgpu_device *adev, - enum amdgpu_clockgating_state state) +static int dce_v11_0_set_clockgating_state(void *handle, + enum amd_clockgating_state state) { return 0; } -static int dce_v11_0_set_powergating_state(struct amdgpu_device *adev, - enum amdgpu_powergating_state state) +static int dce_v11_0_set_powergating_state(void *handle, + enum amd_powergating_state state) { return 0; } -const struct amdgpu_ip_funcs dce_v11_0_ip_funcs = { +const struct amd_ip_funcs dce_v11_0_ip_funcs = { .early_init = dce_v11_0_early_init, .late_init = NULL, .sw_init = dce_v11_0_sw_init, diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.h b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.h index eeb9a56b514a..84e4618f5253 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.h +++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.h @@ -24,6 +24,6 @@ #ifndef __DCE_V11_0_H__ #define __DCE_V11_0_H__ -extern const struct amdgpu_ip_funcs dce_v11_0_ip_funcs; +extern const struct amd_ip_funcs dce_v11_0_ip_funcs; #endif diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c index 9f2ff8d374f3..1d291f1d5b79 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c @@ -2794,8 +2794,10 @@ static int dce_v8_0_crtc_init(struct amdgpu_device *adev, int index) return 0; } -static int dce_v8_0_early_init(struct amdgpu_device *adev) +static int dce_v8_0_early_init(void *handle) { + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + adev->audio_endpt_rreg = &dce_v8_0_audio_endpt_rreg; adev->audio_endpt_wreg = &dce_v8_0_audio_endpt_wreg; @@ -2828,9 +2830,10 @@ static int dce_v8_0_early_init(struct amdgpu_device *adev) return 0; } -static int dce_v8_0_sw_init(struct amdgpu_device *adev) +static int dce_v8_0_sw_init(void *handle) { int r, i; + struct amdgpu_device *adev = (struct amdgpu_device *)handle; for (i = 0; i < adev->mode_info.num_crtc; i++) { r = amdgpu_irq_add_id(adev, i + 1, &adev->crtc_irq); @@ -2892,8 +2895,10 @@ static int dce_v8_0_sw_init(struct amdgpu_device *adev) return r; } -static int dce_v8_0_sw_fini(struct amdgpu_device *adev) +static int dce_v8_0_sw_fini(void *handle) { + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + kfree(adev->mode_info.bios_hardcoded_edid); drm_kms_helper_poll_fini(adev->ddev); @@ -2908,9 +2913,10 @@ static int dce_v8_0_sw_fini(struct amdgpu_device *adev) return 0; } -static int dce_v8_0_hw_init(struct amdgpu_device *adev) +static int dce_v8_0_hw_init(void *handle) { int i; + struct amdgpu_device *adev = (struct amdgpu_device *)handle; /* init dig PHYs, disp eng pll */ amdgpu_atombios_encoder_init_dig(adev); @@ -2926,9 +2932,10 @@ static int dce_v8_0_hw_init(struct amdgpu_device *adev) return 0; } -static int dce_v8_0_hw_fini(struct amdgpu_device *adev) +static int dce_v8_0_hw_fini(void *handle) { int i; + struct amdgpu_device *adev = (struct amdgpu_device *)handle; dce_v8_0_hpd_fini(adev); @@ -2939,9 +2946,10 @@ static int dce_v8_0_hw_fini(struct amdgpu_device *adev) return 0; } -static int dce_v8_0_suspend(struct amdgpu_device *adev) +static int dce_v8_0_suspend(void *handle) { struct drm_connector *connector; + struct amdgpu_device *adev = (struct amdgpu_device *)handle; drm_kms_helper_poll_disable(adev->ddev); @@ -2957,9 +2965,10 @@ static int dce_v8_0_suspend(struct amdgpu_device *adev) return 0; } -static int dce_v8_0_resume(struct amdgpu_device *adev) +static int dce_v8_0_resume(void *handle) { struct drm_connector *connector; + struct amdgpu_device *adev = (struct amdgpu_device *)handle; amdgpu_atombios_scratch_regs_restore(adev); @@ -2989,33 +2998,34 @@ static int dce_v8_0_resume(struct amdgpu_device *adev) return 0; } -static bool dce_v8_0_is_idle(struct amdgpu_device *adev) +static bool dce_v8_0_is_idle(void *handle) { - /* XXX todo */ return true; } -static int dce_v8_0_wait_for_idle(struct amdgpu_device *adev) +static int dce_v8_0_wait_for_idle(void *handle) { - /* XXX todo */ return 0; } -static void dce_v8_0_print_status(struct amdgpu_device *adev) +static void dce_v8_0_print_status(void *handle) { + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + dev_info(adev->dev, "DCE 8.x registers\n"); /* XXX todo */ } -static int dce_v8_0_soft_reset(struct amdgpu_device *adev) +static int dce_v8_0_soft_reset(void *handle) { u32 srbm_soft_reset = 0, tmp; + struct amdgpu_device *adev = (struct amdgpu_device *)handle; if (dce_v8_0_is_display_hung(adev)) srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_DC_MASK; if (srbm_soft_reset) { - dce_v8_0_print_status(adev); + dce_v8_0_print_status((void *)adev); tmp = RREG32(mmSRBM_SOFT_RESET); tmp |= srbm_soft_reset; @@ -3031,7 +3041,7 @@ static int dce_v8_0_soft_reset(struct amdgpu_device *adev) /* Wait a little for things to settle down */ udelay(50); - dce_v8_0_print_status(adev); + dce_v8_0_print_status((void *)adev); } return 0; } @@ -3409,19 +3419,19 @@ static int dce_v8_0_hpd_irq(struct amdgpu_device *adev, } -static int dce_v8_0_set_clockgating_state(struct amdgpu_device *adev, - enum amdgpu_clockgating_state state) +static int dce_v8_0_set_clockgating_state(void *handle, + enum amd_clockgating_state state) { return 0; } -static int dce_v8_0_set_powergating_state(struct amdgpu_device *adev, - enum amdgpu_powergating_state state) +static int dce_v8_0_set_powergating_state(void *handle, + enum amd_powergating_state state) { return 0; } -const struct amdgpu_ip_funcs dce_v8_0_ip_funcs = { +const struct amd_ip_funcs dce_v8_0_ip_funcs = { .early_init = dce_v8_0_early_init, .late_init = NULL, .sw_init = dce_v8_0_sw_init, diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.h b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.h index 3a0a73b41041..77016852b252 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.h +++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.h @@ -24,6 +24,6 @@ #ifndef __DCE_V8_0_H__ #define __DCE_V8_0_H__ -extern const struct amdgpu_ip_funcs dce_v8_0_ip_funcs; +extern const struct amd_ip_funcs dce_v8_0_ip_funcs; #endif diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c index 5315c13a8921..0057699cb8fa 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c @@ -4682,8 +4682,9 @@ static void gfx_v7_0_ring_emit_gds_switch(struct amdgpu_ring *ring, amdgpu_ring_write(ring, (1 << (oa_size + oa_base)) - (1 << oa_base)); } -static int gfx_v7_0_early_init(struct amdgpu_device *adev) +static int gfx_v7_0_early_init(void *handle) { + struct amdgpu_device *adev = (struct amdgpu_device *)handle; adev->gfx.num_gfx_rings = GFX7_NUM_GFX_RINGS; adev->gfx.num_compute_rings = GFX7_NUM_COMPUTE_RINGS; @@ -4694,9 +4695,10 @@ static int gfx_v7_0_early_init(struct amdgpu_device *adev) return 0; } -static int gfx_v7_0_sw_init(struct amdgpu_device *adev) +static int gfx_v7_0_sw_init(void *handle) { struct amdgpu_ring *ring; + struct amdgpu_device *adev = (struct amdgpu_device *)handle; int i, r; /* EOP Event */ @@ -4805,9 +4807,10 @@ static int gfx_v7_0_sw_init(struct amdgpu_device *adev) return r; } -static int gfx_v7_0_sw_fini(struct amdgpu_device *adev) +static int gfx_v7_0_sw_fini(void *handle) { int i; + struct amdgpu_device *adev = (struct amdgpu_device *)handle; amdgpu_bo_unref(&adev->gds.oa_gfx_bo); amdgpu_bo_unref(&adev->gds.gws_gfx_bo); @@ -4827,9 +4830,10 @@ static int gfx_v7_0_sw_fini(struct amdgpu_device *adev) return 0; } -static int gfx_v7_0_hw_init(struct amdgpu_device *adev) +static int gfx_v7_0_hw_init(void *handle) { int r; + struct amdgpu_device *adev = (struct amdgpu_device *)handle; gfx_v7_0_gpu_init(adev); @@ -4845,8 +4849,10 @@ static int gfx_v7_0_hw_init(struct amdgpu_device *adev) return r; } -static int gfx_v7_0_hw_fini(struct amdgpu_device *adev) +static int gfx_v7_0_hw_fini(void *handle) { + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + gfx_v7_0_cp_enable(adev, false); gfx_v7_0_rlc_stop(adev); gfx_v7_0_fini_pg(adev); @@ -4854,28 +4860,35 @@ static int gfx_v7_0_hw_fini(struct amdgpu_device *adev) return 0; } -static int gfx_v7_0_suspend(struct amdgpu_device *adev) +static int gfx_v7_0_suspend(void *handle) { + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + return gfx_v7_0_hw_fini(adev); } -static int gfx_v7_0_resume(struct amdgpu_device *adev) +static int gfx_v7_0_resume(void *handle) { + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + return gfx_v7_0_hw_init(adev); } -static bool gfx_v7_0_is_idle(struct amdgpu_device *adev) +static bool gfx_v7_0_is_idle(void *handle) { + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + if (RREG32(mmGRBM_STATUS) & GRBM_STATUS__GUI_ACTIVE_MASK) return false; else return true; } -static int gfx_v7_0_wait_for_idle(struct amdgpu_device *adev) +static int gfx_v7_0_wait_for_idle(void *handle) { unsigned i; u32 tmp; + struct amdgpu_device *adev = (struct amdgpu_device *)handle; for (i = 0; i < adev->usec_timeout; i++) { /* read MC_STATUS */ @@ -4888,9 +4901,10 @@ static int gfx_v7_0_wait_for_idle(struct amdgpu_device *adev) return -ETIMEDOUT; } -static void gfx_v7_0_print_status(struct amdgpu_device *adev) +static void gfx_v7_0_print_status(void *handle) { int i; + struct amdgpu_device *adev = (struct amdgpu_device *)handle; dev_info(adev->dev, "GFX 7.x registers\n"); dev_info(adev->dev, " GRBM_STATUS=0x%08X\n", @@ -5147,10 +5161,11 @@ static void gfx_v7_0_print_status(struct amdgpu_device *adev) mutex_unlock(&adev->srbm_mutex); } -static int gfx_v7_0_soft_reset(struct amdgpu_device *adev) +static int gfx_v7_0_soft_reset(void *handle) { u32 grbm_soft_reset = 0, srbm_soft_reset = 0; u32 tmp; + struct amdgpu_device *adev = (struct amdgpu_device *)handle; /* GRBM_STATUS */ tmp = RREG32(mmGRBM_STATUS); @@ -5179,7 +5194,7 @@ static int gfx_v7_0_soft_reset(struct amdgpu_device *adev) srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_GRBM_MASK; if (grbm_soft_reset || srbm_soft_reset) { - gfx_v7_0_print_status(adev); + gfx_v7_0_print_status((void *)adev); /* disable CG/PG */ gfx_v7_0_fini_pg(adev); gfx_v7_0_update_cg(adev, false); @@ -5222,7 +5237,7 @@ static int gfx_v7_0_soft_reset(struct amdgpu_device *adev) } /* Wait a little for things to settle down */ udelay(50); - gfx_v7_0_print_status(adev); + gfx_v7_0_print_status((void *)adev); } return 0; } @@ -5425,12 +5440,13 @@ static int gfx_v7_0_priv_inst_irq(struct amdgpu_device *adev, return 0; } -static int gfx_v7_0_set_clockgating_state(struct amdgpu_device *adev, - enum amdgpu_clockgating_state state) +static int gfx_v7_0_set_clockgating_state(void *handle, + enum amd_clockgating_state state) { bool gate = false; + struct amdgpu_device *adev = (struct amdgpu_device *)handle; - if (state == AMDGPU_CG_STATE_GATE) + if (state == AMD_CG_STATE_GATE) gate = true; gfx_v7_0_enable_gui_idle_interrupt(adev, false); @@ -5447,12 +5463,13 @@ static int gfx_v7_0_set_clockgating_state(struct amdgpu_device *adev, return 0; } -static int gfx_v7_0_set_powergating_state(struct amdgpu_device *adev, - enum amdgpu_powergating_state state) +static int gfx_v7_0_set_powergating_state(void *handle, + enum amd_powergating_state state) { bool gate = false; + struct amdgpu_device *adev = (struct amdgpu_device *)handle; - if (state == AMDGPU_PG_STATE_GATE) + if (state == AMD_PG_STATE_GATE) gate = true; if (adev->pg_flags & (AMDGPU_PG_SUPPORT_GFX_PG | @@ -5471,7 +5488,7 @@ static int gfx_v7_0_set_powergating_state(struct amdgpu_device *adev, return 0; } -const struct amdgpu_ip_funcs gfx_v7_0_ip_funcs = { +const struct amd_ip_funcs gfx_v7_0_ip_funcs = { .early_init = gfx_v7_0_early_init, .late_init = NULL, .sw_init = gfx_v7_0_sw_init, diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.h b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.h index 668b91a89e1e..c04bfbabfc88 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.h +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.h @@ -24,7 +24,7 @@ #ifndef __GFX_V7_0_H__ #define __GFX_V7_0_H__ -extern const struct amdgpu_ip_funcs gfx_v7_0_ip_funcs; +extern const struct amd_ip_funcs gfx_v7_0_ip_funcs; /* XXX these shouldn't be exported */ void gfx_v7_0_enter_rlc_safe_mode(struct amdgpu_device *adev); diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c index 4597256cc6e2..a7d687da10d7 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c @@ -784,10 +784,11 @@ static int gfx_v8_0_mec_init(struct amdgpu_device *adev) return 0; } -static int gfx_v8_0_sw_init(struct amdgpu_device *adev) +static int gfx_v8_0_sw_init(void *handle) { int i, r; struct amdgpu_ring *ring; + struct amdgpu_device *adev = (struct amdgpu_device *)handle; /* EOP Event */ r = amdgpu_irq_add_id(adev, 181, &adev->gfx.eop_irq); @@ -897,9 +898,10 @@ static int gfx_v8_0_sw_init(struct amdgpu_device *adev) return 0; } -static int gfx_v8_0_sw_fini(struct amdgpu_device *adev) +static int gfx_v8_0_sw_fini(void *handle) { int i; + struct amdgpu_device *adev = (struct amdgpu_device *)handle; amdgpu_bo_unref(&adev->gds.oa_gfx_bo); amdgpu_bo_unref(&adev->gds.gws_gfx_bo); @@ -3148,9 +3150,10 @@ static void gfx_v8_0_cp_enable(struct amdgpu_device *adev, bool enable) gfx_v8_0_cp_compute_enable(adev, enable); } -static int gfx_v8_0_hw_init(struct amdgpu_device *adev) +static int gfx_v8_0_hw_init(void *handle) { int r; + struct amdgpu_device *adev = (struct amdgpu_device *)handle; gfx_v8_0_init_golden_registers(adev); @@ -3167,8 +3170,10 @@ static int gfx_v8_0_hw_init(struct amdgpu_device *adev) return r; } -static int gfx_v8_0_hw_fini(struct amdgpu_device *adev) +static int gfx_v8_0_hw_fini(void *handle) { + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + gfx_v8_0_cp_enable(adev, false); gfx_v8_0_rlc_stop(adev); gfx_v8_0_cp_compute_fini(adev); @@ -3176,28 +3181,35 @@ static int gfx_v8_0_hw_fini(struct amdgpu_device *adev) return 0; } -static int gfx_v8_0_suspend(struct amdgpu_device *adev) +static int gfx_v8_0_suspend(void *handle) { + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + return gfx_v8_0_hw_fini(adev); } -static int gfx_v8_0_resume(struct amdgpu_device *adev) +static int gfx_v8_0_resume(void *handle) { + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + return gfx_v8_0_hw_init(adev); } -static bool gfx_v8_0_is_idle(struct amdgpu_device *adev) +static bool gfx_v8_0_is_idle(void *handle) { + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + if (REG_GET_FIELD(RREG32(mmGRBM_STATUS), GRBM_STATUS, GUI_ACTIVE)) return false; else return true; } -static int gfx_v8_0_wait_for_idle(struct amdgpu_device *adev) +static int gfx_v8_0_wait_for_idle(void *handle) { unsigned i; u32 tmp; + struct amdgpu_device *adev = (struct amdgpu_device *)handle; for (i = 0; i < adev->usec_timeout; i++) { /* read MC_STATUS */ @@ -3210,9 +3222,10 @@ static int gfx_v8_0_wait_for_idle(struct amdgpu_device *adev) return -ETIMEDOUT; } -static void gfx_v8_0_print_status(struct amdgpu_device *adev) +static void gfx_v8_0_print_status(void *handle) { int i; + struct amdgpu_device *adev = (struct amdgpu_device *)handle; dev_info(adev->dev, "GFX 8.x registers\n"); dev_info(adev->dev, " GRBM_STATUS=0x%08X\n", @@ -3398,10 +3411,11 @@ static void gfx_v8_0_print_status(struct amdgpu_device *adev) mutex_unlock(&adev->srbm_mutex); } -static int gfx_v8_0_soft_reset(struct amdgpu_device *adev) +static int gfx_v8_0_soft_reset(void *handle) { u32 grbm_soft_reset = 0, srbm_soft_reset = 0; u32 tmp; + struct amdgpu_device *adev = (struct amdgpu_device *)handle; /* GRBM_STATUS */ tmp = RREG32(mmGRBM_STATUS); @@ -3437,7 +3451,7 @@ static int gfx_v8_0_soft_reset(struct amdgpu_device *adev) SRBM_SOFT_RESET, SOFT_RESET_GRBM, 1); if (grbm_soft_reset || srbm_soft_reset) { - gfx_v8_0_print_status(adev); + gfx_v8_0_print_status((void *)adev); /* stop the rlc */ gfx_v8_0_rlc_stop(adev); @@ -3476,7 +3490,7 @@ static int gfx_v8_0_soft_reset(struct amdgpu_device *adev) } /* Wait a little for things to settle down */ udelay(50); - gfx_v8_0_print_status(adev); + gfx_v8_0_print_status((void *)adev); } return 0; } @@ -3549,8 +3563,9 @@ static void gfx_v8_0_ring_emit_gds_switch(struct amdgpu_ring *ring, amdgpu_ring_write(ring, (1 << (oa_size + oa_base)) - (1 << oa_base)); } -static int gfx_v8_0_early_init(struct amdgpu_device *adev) +static int gfx_v8_0_early_init(void *handle) { + struct amdgpu_device *adev = (struct amdgpu_device *)handle; adev->gfx.num_gfx_rings = GFX8_NUM_GFX_RINGS; adev->gfx.num_compute_rings = GFX8_NUM_COMPUTE_RINGS; @@ -3561,14 +3576,14 @@ static int gfx_v8_0_early_init(struct amdgpu_device *adev) return 0; } -static int gfx_v8_0_set_powergating_state(struct amdgpu_device *adev, - enum amdgpu_powergating_state state) +static int gfx_v8_0_set_powergating_state(void *handle, + enum amd_powergating_state state) { return 0; } -static int gfx_v8_0_set_clockgating_state(struct amdgpu_device *adev, - enum amdgpu_clockgating_state state) +static int gfx_v8_0_set_clockgating_state(void *handle, + enum amd_clockgating_state state) { return 0; } @@ -4116,7 +4131,7 @@ static int gfx_v8_0_priv_inst_irq(struct amdgpu_device *adev, return 0; } -const struct amdgpu_ip_funcs gfx_v8_0_ip_funcs = { +const struct amd_ip_funcs gfx_v8_0_ip_funcs = { .early_init = gfx_v8_0_early_init, .late_init = NULL, .sw_init = gfx_v8_0_sw_init, diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.h b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.h index be8a5f8e176e..021e05193cb9 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.h +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.h @@ -24,7 +24,7 @@ #ifndef __GFX_V8_0_H__ #define __GFX_V8_0_H__ -extern const struct amdgpu_ip_funcs gfx_v8_0_ip_funcs; +extern const struct amd_ip_funcs gfx_v8_0_ip_funcs; uint64_t gfx_v8_0_get_gpu_clock_counter(struct amdgpu_device *adev); void gfx_v8_0_select_se_sh(struct amdgpu_device *adev, u32 se_num, u32 sh_num); diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c index ab096723a588..01cd6b207d26 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c @@ -812,8 +812,10 @@ static void gmc_v7_0_enable_hdp_ls(struct amdgpu_device *adev, WREG32(mmHDP_MEM_POWER_LS, data); } -static int gmc_v7_0_early_init(struct amdgpu_device *adev) +static int gmc_v7_0_early_init(void *handle) { + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + gmc_v7_0_set_gart_funcs(adev); gmc_v7_0_set_irq_funcs(adev); @@ -832,10 +834,11 @@ static int gmc_v7_0_early_init(struct amdgpu_device *adev) return 0; } -static int gmc_v7_0_sw_init(struct amdgpu_device *adev) +static int gmc_v7_0_sw_init(void *handle) { int r; int dma_bits; + struct amdgpu_device *adev = (struct amdgpu_device *)handle; r = amdgpu_gem_init(adev); if (r) @@ -911,9 +914,10 @@ static int gmc_v7_0_sw_init(struct amdgpu_device *adev) return r; } -static int gmc_v7_0_sw_fini(struct amdgpu_device *adev) +static int gmc_v7_0_sw_fini(void *handle) { int i; + struct amdgpu_device *adev = (struct amdgpu_device *)handle; if (adev->vm_manager.enabled) { for (i = 0; i < AMDGPU_NUM_VM; ++i) @@ -928,9 +932,10 @@ static int gmc_v7_0_sw_fini(struct amdgpu_device *adev) return 0; } -static int gmc_v7_0_hw_init(struct amdgpu_device *adev) +static int gmc_v7_0_hw_init(void *handle) { int r; + struct amdgpu_device *adev = (struct amdgpu_device *)handle; gmc_v7_0_mc_program(adev); @@ -949,16 +954,19 @@ static int gmc_v7_0_hw_init(struct amdgpu_device *adev) return r; } -static int gmc_v7_0_hw_fini(struct amdgpu_device *adev) +static int gmc_v7_0_hw_fini(void *handle) { + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + gmc_v7_0_gart_disable(adev); return 0; } -static int gmc_v7_0_suspend(struct amdgpu_device *adev) +static int gmc_v7_0_suspend(void *handle) { int i; + struct amdgpu_device *adev = (struct amdgpu_device *)handle; if (adev->vm_manager.enabled) { for (i = 0; i < AMDGPU_NUM_VM; ++i) @@ -971,9 +979,10 @@ static int gmc_v7_0_suspend(struct amdgpu_device *adev) return 0; } -static int gmc_v7_0_resume(struct amdgpu_device *adev) +static int gmc_v7_0_resume(void *handle) { int r; + struct amdgpu_device *adev = (struct amdgpu_device *)handle; r = gmc_v7_0_hw_init(adev); if (r) @@ -991,8 +1000,9 @@ static int gmc_v7_0_resume(struct amdgpu_device *adev) return r; } -static bool gmc_v7_0_is_idle(struct amdgpu_device *adev) +static bool gmc_v7_0_is_idle(void *handle) { + struct amdgpu_device *adev = (struct amdgpu_device *)handle; u32 tmp = RREG32(mmSRBM_STATUS); if (tmp & (SRBM_STATUS__MCB_BUSY_MASK | SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK | @@ -1002,10 +1012,11 @@ static bool gmc_v7_0_is_idle(struct amdgpu_device *adev) return true; } -static int gmc_v7_0_wait_for_idle(struct amdgpu_device *adev) +static int gmc_v7_0_wait_for_idle(void *handle) { unsigned i; u32 tmp; + struct amdgpu_device *adev = (struct amdgpu_device *)handle; for (i = 0; i < adev->usec_timeout; i++) { /* read MC_STATUS */ @@ -1022,9 +1033,10 @@ static int gmc_v7_0_wait_for_idle(struct amdgpu_device *adev) } -static void gmc_v7_0_print_status(struct amdgpu_device *adev) +static void gmc_v7_0_print_status(void *handle) { int i, j; + struct amdgpu_device *adev = (struct amdgpu_device *)handle; dev_info(adev->dev, "GMC 8.x registers\n"); dev_info(adev->dev, " SRBM_STATUS=0x%08X\n", @@ -1129,8 +1141,9 @@ static void gmc_v7_0_print_status(struct amdgpu_device *adev) RREG32(mmBIF_FB_EN)); } -static int gmc_v7_0_soft_reset(struct amdgpu_device *adev) +static int gmc_v7_0_soft_reset(void *handle) { + struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct amdgpu_mode_mc_save save; u32 srbm_soft_reset = 0; u32 tmp = RREG32(mmSRBM_STATUS); @@ -1147,7 +1160,7 @@ static int gmc_v7_0_soft_reset(struct amdgpu_device *adev) } if (srbm_soft_reset) { - gmc_v7_0_print_status(adev); + gmc_v7_0_print_status((void *)adev); gmc_v7_0_mc_stop(adev, &save); if (gmc_v7_0_wait_for_idle(adev)) { @@ -1173,7 +1186,7 @@ static int gmc_v7_0_soft_reset(struct amdgpu_device *adev) gmc_v7_0_mc_resume(adev, &save); udelay(50); - gmc_v7_0_print_status(adev); + gmc_v7_0_print_status((void *)adev); } return 0; @@ -1242,12 +1255,13 @@ static int gmc_v7_0_process_interrupt(struct amdgpu_device *adev, return 0; } -static int gmc_v7_0_set_clockgating_state(struct amdgpu_device *adev, - enum amdgpu_clockgating_state state) +static int gmc_v7_0_set_clockgating_state(void *handle, + enum amd_clockgating_state state) { bool gate = false; + struct amdgpu_device *adev = (struct amdgpu_device *)handle; - if (state == AMDGPU_CG_STATE_GATE) + if (state == AMD_CG_STATE_GATE) gate = true; if (!(adev->flags & AMDGPU_IS_APU)) { @@ -1261,13 +1275,13 @@ static int gmc_v7_0_set_clockgating_state(struct amdgpu_device *adev, return 0; } -static int gmc_v7_0_set_powergating_state(struct amdgpu_device *adev, - enum amdgpu_powergating_state state) +static int gmc_v7_0_set_powergating_state(void *handle, + enum amd_powergating_state state) { return 0; } -const struct amdgpu_ip_funcs gmc_v7_0_ip_funcs = { +const struct amd_ip_funcs gmc_v7_0_ip_funcs = { .early_init = gmc_v7_0_early_init, .late_init = NULL, .sw_init = gmc_v7_0_sw_init, diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.h b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.h index ab1a2fa1afcd..36fcbbc46ada 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.h +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.h @@ -24,7 +24,7 @@ #ifndef __GMC_V7_0_H__ #define __GMC_V7_0_H__ -extern const struct amdgpu_ip_funcs gmc_v7_0_ip_funcs; +extern const struct amd_ip_funcs gmc_v7_0_ip_funcs; /* XXX these shouldn't be exported */ void gmc_v7_0_mc_stop(struct amdgpu_device *adev, diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c index f816165acd70..675483a612c2 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c @@ -786,8 +786,10 @@ static void gmc_v8_0_vm_decode_fault(struct amdgpu_device *adev, "write" : "read", block, mc_client, mc_id); } -static int gmc_v8_0_early_init(struct amdgpu_device *adev) +static int gmc_v8_0_early_init(void *handle) { + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + gmc_v8_0_set_gart_funcs(adev); gmc_v8_0_set_irq_funcs(adev); @@ -806,10 +808,11 @@ static int gmc_v8_0_early_init(struct amdgpu_device *adev) return 0; } -static int gmc_v8_0_sw_init(struct amdgpu_device *adev) +static int gmc_v8_0_sw_init(void *handle) { int r; int dma_bits; + struct amdgpu_device *adev = (struct amdgpu_device *)handle; r = amdgpu_gem_init(adev); if (r) @@ -885,9 +888,10 @@ static int gmc_v8_0_sw_init(struct amdgpu_device *adev) return r; } -static int gmc_v8_0_sw_fini(struct amdgpu_device *adev) +static int gmc_v8_0_sw_fini(void *handle) { int i; + struct amdgpu_device *adev = (struct amdgpu_device *)handle; if (adev->vm_manager.enabled) { for (i = 0; i < AMDGPU_NUM_VM; ++i) @@ -902,9 +906,10 @@ static int gmc_v8_0_sw_fini(struct amdgpu_device *adev) return 0; } -static int gmc_v8_0_hw_init(struct amdgpu_device *adev) +static int gmc_v8_0_hw_init(void *handle) { int r; + struct amdgpu_device *adev = (struct amdgpu_device *)handle; gmc_v8_0_init_golden_registers(adev); @@ -925,16 +930,19 @@ static int gmc_v8_0_hw_init(struct amdgpu_device *adev) return r; } -static int gmc_v8_0_hw_fini(struct amdgpu_device *adev) +static int gmc_v8_0_hw_fini(void *handle) { + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + gmc_v8_0_gart_disable(adev); return 0; } -static int gmc_v8_0_suspend(struct amdgpu_device *adev) +static int gmc_v8_0_suspend(void *handle) { int i; + struct amdgpu_device *adev = (struct amdgpu_device *)handle; if (adev->vm_manager.enabled) { for (i = 0; i < AMDGPU_NUM_VM; ++i) @@ -947,9 +955,10 @@ static int gmc_v8_0_suspend(struct amdgpu_device *adev) return 0; } -static int gmc_v8_0_resume(struct amdgpu_device *adev) +static int gmc_v8_0_resume(void *handle) { int r; + struct amdgpu_device *adev = (struct amdgpu_device *)handle; r = gmc_v8_0_hw_init(adev); if (r) @@ -967,8 +976,9 @@ static int gmc_v8_0_resume(struct amdgpu_device *adev) return r; } -static bool gmc_v8_0_is_idle(struct amdgpu_device *adev) +static bool gmc_v8_0_is_idle(void *handle) { + struct amdgpu_device *adev = (struct amdgpu_device *)handle; u32 tmp = RREG32(mmSRBM_STATUS); if (tmp & (SRBM_STATUS__MCB_BUSY_MASK | SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK | @@ -978,10 +988,11 @@ static bool gmc_v8_0_is_idle(struct amdgpu_device *adev) return true; } -static int gmc_v8_0_wait_for_idle(struct amdgpu_device *adev) +static int gmc_v8_0_wait_for_idle(void *handle) { unsigned i; u32 tmp; + struct amdgpu_device *adev = (struct amdgpu_device *)handle; for (i = 0; i < adev->usec_timeout; i++) { /* read MC_STATUS */ @@ -999,9 +1010,10 @@ static int gmc_v8_0_wait_for_idle(struct amdgpu_device *adev) } -static void gmc_v8_0_print_status(struct amdgpu_device *adev) +static void gmc_v8_0_print_status(void *handle) { int i, j; + struct amdgpu_device *adev = (struct amdgpu_device *)handle; dev_info(adev->dev, "GMC 8.x registers\n"); dev_info(adev->dev, " SRBM_STATUS=0x%08X\n", @@ -1103,10 +1115,11 @@ static void gmc_v8_0_print_status(struct amdgpu_device *adev) RREG32(mmBIF_FB_EN)); } -static int gmc_v8_0_soft_reset(struct amdgpu_device *adev) +static int gmc_v8_0_soft_reset(void *handle) { struct amdgpu_mode_mc_save save; u32 srbm_soft_reset = 0; + struct amdgpu_device *adev = (struct amdgpu_device *)handle; u32 tmp = RREG32(mmSRBM_STATUS); if (tmp & SRBM_STATUS__VMC_BUSY_MASK) @@ -1121,7 +1134,7 @@ static int gmc_v8_0_soft_reset(struct amdgpu_device *adev) } if (srbm_soft_reset) { - gmc_v8_0_print_status(adev); + gmc_v8_0_print_status((void *)adev); gmc_v8_0_mc_stop(adev, &save); if (gmc_v8_0_wait_for_idle(adev)) { @@ -1147,7 +1160,7 @@ static int gmc_v8_0_soft_reset(struct amdgpu_device *adev) gmc_v8_0_mc_resume(adev, &save); udelay(50); - gmc_v8_0_print_status(adev); + gmc_v8_0_print_status((void *)adev); } return 0; @@ -1217,21 +1230,19 @@ static int gmc_v8_0_process_interrupt(struct amdgpu_device *adev, return 0; } -static int gmc_v8_0_set_clockgating_state(struct amdgpu_device *adev, - enum amdgpu_clockgating_state state) +static int gmc_v8_0_set_clockgating_state(void *handle, + enum amd_clockgating_state state) { - /* XXX handled via the smc on VI */ - return 0; } -static int gmc_v8_0_set_powergating_state(struct amdgpu_device *adev, - enum amdgpu_powergating_state state) +static int gmc_v8_0_set_powergating_state(void *handle, + enum amd_powergating_state state) { return 0; } -const struct amdgpu_ip_funcs gmc_v8_0_ip_funcs = { +const struct amd_ip_funcs gmc_v8_0_ip_funcs = { .early_init = gmc_v8_0_early_init, .late_init = NULL, .sw_init = gmc_v8_0_sw_init, diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.h b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.h index 2dd7f809d4e1..973436086b38 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.h +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.h @@ -24,7 +24,7 @@ #ifndef __GMC_V8_0_H__ #define __GMC_V8_0_H__ -extern const struct amdgpu_ip_funcs gmc_v8_0_ip_funcs; +extern const struct amd_ip_funcs gmc_v8_0_ip_funcs; /* XXX these shouldn't be exported */ void gmc_v8_0_mc_stop(struct amdgpu_device *adev, diff --git a/drivers/gpu/drm/amd/amdgpu/iceland_dpm.c b/drivers/gpu/drm/amd/amdgpu/iceland_dpm.c index 0688b88cb170..4b773f29167a 100644 --- a/drivers/gpu/drm/amd/amdgpu/iceland_dpm.c +++ b/drivers/gpu/drm/amd/amdgpu/iceland_dpm.c @@ -30,8 +30,10 @@ MODULE_FIRMWARE("amdgpu/topaz_smc.bin"); static void iceland_dpm_set_funcs(struct amdgpu_device *adev); -static int iceland_dpm_early_init(struct amdgpu_device *adev) +static int iceland_dpm_early_init(void *handle) { + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + iceland_dpm_set_funcs(adev); return 0; @@ -56,9 +58,10 @@ out: return err; } -static int iceland_dpm_sw_init(struct amdgpu_device *adev) +static int iceland_dpm_sw_init(void *handle) { int ret; + struct amdgpu_device *adev = (struct amdgpu_device *)handle; ret = iceland_dpm_init_microcode(adev); if (ret) @@ -67,14 +70,15 @@ static int iceland_dpm_sw_init(struct amdgpu_device *adev) return 0; } -static int iceland_dpm_sw_fini(struct amdgpu_device *adev) +static int iceland_dpm_sw_fini(void *handle) { return 0; } -static int iceland_dpm_hw_init(struct amdgpu_device *adev) +static int iceland_dpm_hw_init(void *handle) { int ret; + struct amdgpu_device *adev = (struct amdgpu_device *)handle; mutex_lock(&adev->pm.mutex); @@ -99,41 +103,47 @@ fail: return -EINVAL; } -static int iceland_dpm_hw_fini(struct amdgpu_device *adev) +static int iceland_dpm_hw_fini(void *handle) { + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + mutex_lock(&adev->pm.mutex); iceland_smu_fini(adev); mutex_unlock(&adev->pm.mutex); return 0; } -static int iceland_dpm_suspend(struct amdgpu_device *adev) +static int iceland_dpm_suspend(void *handle) { + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + iceland_dpm_hw_fini(adev); return 0; } -static int iceland_dpm_resume(struct amdgpu_device *adev) +static int iceland_dpm_resume(void *handle) { + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + iceland_dpm_hw_init(adev); return 0; } -static int iceland_dpm_set_clockgating_state(struct amdgpu_device *adev, - enum amdgpu_clockgating_state state) +static int iceland_dpm_set_clockgating_state(void *handle, + enum amd_clockgating_state state) { return 0; } -static int iceland_dpm_set_powergating_state(struct amdgpu_device *adev, - enum amdgpu_powergating_state state) +static int iceland_dpm_set_powergating_state(void *handle, + enum amd_powergating_state state) { return 0; } -const struct amdgpu_ip_funcs iceland_dpm_ip_funcs = { +const struct amd_ip_funcs iceland_dpm_ip_funcs = { .early_init = iceland_dpm_early_init, .late_init = NULL, .sw_init = iceland_dpm_sw_init, diff --git a/drivers/gpu/drm/amd/amdgpu/iceland_ih.c b/drivers/gpu/drm/amd/amdgpu/iceland_ih.c index 2de8adfac471..779532d350ff 100644 --- a/drivers/gpu/drm/amd/amdgpu/iceland_ih.c +++ b/drivers/gpu/drm/amd/amdgpu/iceland_ih.c @@ -250,15 +250,18 @@ static void iceland_ih_set_rptr(struct amdgpu_device *adev) WREG32(mmIH_RB_RPTR, adev->irq.ih.rptr); } -static int iceland_ih_early_init(struct amdgpu_device *adev) +static int iceland_ih_early_init(void *handle) { + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + iceland_ih_set_interrupt_funcs(adev); return 0; } -static int iceland_ih_sw_init(struct amdgpu_device *adev) +static int iceland_ih_sw_init(void *handle) { int r; + struct amdgpu_device *adev = (struct amdgpu_device *)handle; r = amdgpu_ih_ring_init(adev, 64 * 1024, false); if (r) @@ -269,17 +272,20 @@ static int iceland_ih_sw_init(struct amdgpu_device *adev) return r; } -static int iceland_ih_sw_fini(struct amdgpu_device *adev) +static int iceland_ih_sw_fini(void *handle) { + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + amdgpu_irq_fini(adev); amdgpu_ih_ring_fini(adev); return 0; } -static int iceland_ih_hw_init(struct amdgpu_device *adev) +static int iceland_ih_hw_init(void *handle) { int r; + struct amdgpu_device *adev = (struct amdgpu_device *)handle; r = iceland_ih_irq_init(adev); if (r) @@ -288,25 +294,32 @@ static int iceland_ih_hw_init(struct amdgpu_device *adev) return 0; } -static int iceland_ih_hw_fini(struct amdgpu_device *adev) +static int iceland_ih_hw_fini(void *handle) { + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + iceland_ih_irq_disable(adev); return 0; } -static int iceland_ih_suspend(struct amdgpu_device *adev) +static int iceland_ih_suspend(void *handle) { + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + return iceland_ih_hw_fini(adev); } -static int iceland_ih_resume(struct amdgpu_device *adev) +static int iceland_ih_resume(void *handle) { + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + return iceland_ih_hw_init(adev); } -static bool iceland_ih_is_idle(struct amdgpu_device *adev) +static bool iceland_ih_is_idle(void *handle) { + struct amdgpu_device *adev = (struct amdgpu_device *)handle; u32 tmp = RREG32(mmSRBM_STATUS); if (REG_GET_FIELD(tmp, SRBM_STATUS, IH_BUSY)) @@ -315,10 +328,11 @@ static bool iceland_ih_is_idle(struct amdgpu_device *adev) return true; } -static int iceland_ih_wait_for_idle(struct amdgpu_device *adev) +static int iceland_ih_wait_for_idle(void *handle) { unsigned i; u32 tmp; + struct amdgpu_device *adev = (struct amdgpu_device *)handle; for (i = 0; i < adev->usec_timeout; i++) { /* read MC_STATUS */ @@ -330,8 +344,10 @@ static int iceland_ih_wait_for_idle(struct amdgpu_device *adev) return -ETIMEDOUT; } -static void iceland_ih_print_status(struct amdgpu_device *adev) +static void iceland_ih_print_status(void *handle) { + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + dev_info(adev->dev, "ICELAND IH registers\n"); dev_info(adev->dev, " SRBM_STATUS=0x%08X\n", RREG32(mmSRBM_STATUS)); @@ -357,9 +373,10 @@ static void iceland_ih_print_status(struct amdgpu_device *adev) RREG32(mmIH_RB_WPTR)); } -static int iceland_ih_soft_reset(struct amdgpu_device *adev) +static int iceland_ih_soft_reset(void *handle) { u32 srbm_soft_reset = 0; + struct amdgpu_device *adev = (struct amdgpu_device *)handle; u32 tmp = RREG32(mmSRBM_STATUS); if (tmp & SRBM_STATUS__IH_BUSY_MASK) @@ -367,7 +384,7 @@ static int iceland_ih_soft_reset(struct amdgpu_device *adev) SOFT_RESET_IH, 1); if (srbm_soft_reset) { - iceland_ih_print_status(adev); + iceland_ih_print_status((void *)adev); tmp = RREG32(mmSRBM_SOFT_RESET); tmp |= srbm_soft_reset; @@ -384,27 +401,25 @@ static int iceland_ih_soft_reset(struct amdgpu_device *adev) /* Wait a little for things to settle down */ udelay(50); - iceland_ih_print_status(adev); + iceland_ih_print_status((void *)adev); } return 0; } -static int iceland_ih_set_clockgating_state(struct amdgpu_device *adev, - enum amdgpu_clockgating_state state) +static int iceland_ih_set_clockgating_state(void *handle, + enum amd_clockgating_state state) { - // TODO return 0; } -static int iceland_ih_set_powergating_state(struct amdgpu_device *adev, - enum amdgpu_powergating_state state) +static int iceland_ih_set_powergating_state(void *handle, + enum amd_powergating_state state) { - // TODO return 0; } -const struct amdgpu_ip_funcs iceland_ih_ip_funcs = { +const struct amd_ip_funcs iceland_ih_ip_funcs = { .early_init = iceland_ih_early_init, .late_init = NULL, .sw_init = iceland_ih_sw_init, diff --git a/drivers/gpu/drm/amd/amdgpu/iceland_ih.h b/drivers/gpu/drm/amd/amdgpu/iceland_ih.h index d001895eb93b..57558cddfbcb 100644 --- a/drivers/gpu/drm/amd/amdgpu/iceland_ih.h +++ b/drivers/gpu/drm/amd/amdgpu/iceland_ih.h @@ -24,6 +24,6 @@ #ifndef __ICELAND_IH_H__ #define __ICELAND_IH_H__ -extern const struct amdgpu_ip_funcs iceland_ih_ip_funcs; +extern const struct amd_ip_funcs iceland_ih_ip_funcs; #endif /* __ICELAND_IH_H__ */ diff --git a/drivers/gpu/drm/amd/amdgpu/kv_dpm.c b/drivers/gpu/drm/amd/amdgpu/kv_dpm.c index cd902419e6a1..94ec04a9c4d5 100644 --- a/drivers/gpu/drm/amd/amdgpu/kv_dpm.c +++ b/drivers/gpu/drm/amd/amdgpu/kv_dpm.c @@ -1552,8 +1552,8 @@ static int kv_update_vce_dpm(struct amdgpu_device *adev, if (amdgpu_new_state->evclk > 0 && amdgpu_current_state->evclk == 0) { kv_dpm_powergate_vce(adev, false); /* turn the clocks on when encoding */ - ret = amdgpu_set_clockgating_state(adev, AMDGPU_IP_BLOCK_TYPE_VCE, - AMDGPU_CG_STATE_UNGATE); + ret = amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_VCE, + AMD_CG_STATE_UNGATE); if (ret) return ret; if (pi->caps_stable_p_state) @@ -1579,8 +1579,8 @@ static int kv_update_vce_dpm(struct amdgpu_device *adev, } else if (amdgpu_new_state->evclk == 0 && amdgpu_current_state->evclk > 0) { kv_enable_vce_dpm(adev, false); /* turn the clocks off when not encoding */ - ret = amdgpu_set_clockgating_state(adev, AMDGPU_IP_BLOCK_TYPE_VCE, - AMDGPU_CG_STATE_GATE); + ret = amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_VCE, + AMD_CG_STATE_GATE); if (ret) return ret; kv_dpm_powergate_vce(adev, true); @@ -1697,11 +1697,11 @@ static void kv_dpm_powergate_uvd(struct amdgpu_device *adev, bool gate) if (gate) { if (pi->caps_uvd_pg) { /* disable clockgating so we can properly shut down the block */ - ret = amdgpu_set_clockgating_state(adev, AMDGPU_IP_BLOCK_TYPE_UVD, - AMDGPU_CG_STATE_UNGATE); + ret = amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_UVD, + AMD_CG_STATE_UNGATE); /* shutdown the UVD block */ - ret = amdgpu_set_powergating_state(adev, AMDGPU_IP_BLOCK_TYPE_UVD, - AMDGPU_PG_STATE_GATE); + ret = amdgpu_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_UVD, + AMD_PG_STATE_GATE); /* XXX: check for errors */ } kv_update_uvd_dpm(adev, gate); @@ -1713,11 +1713,11 @@ static void kv_dpm_powergate_uvd(struct amdgpu_device *adev, bool gate) /* power on the UVD block */ amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_UVDPowerON); /* re-init the UVD block */ - ret = amdgpu_set_powergating_state(adev, AMDGPU_IP_BLOCK_TYPE_UVD, - AMDGPU_PG_STATE_UNGATE); + ret = amdgpu_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_UVD, + AMD_PG_STATE_UNGATE); /* enable clockgating. hw will dynamically gate/ungate clocks on the fly */ - ret = amdgpu_set_clockgating_state(adev, AMDGPU_IP_BLOCK_TYPE_UVD, - AMDGPU_CG_STATE_GATE); + ret = amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_UVD, + AMD_CG_STATE_GATE); /* XXX: check for errors */ } kv_update_uvd_dpm(adev, gate); @@ -1737,8 +1737,8 @@ static void kv_dpm_powergate_vce(struct amdgpu_device *adev, bool gate) if (gate) { if (pi->caps_vce_pg) { /* shutdown the VCE block */ - ret = amdgpu_set_powergating_state(adev, AMDGPU_IP_BLOCK_TYPE_VCE, - AMDGPU_PG_STATE_GATE); + ret = amdgpu_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCE, + AMD_PG_STATE_GATE); /* XXX: check for errors */ /* power off the VCE block */ amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_VCEPowerOFF); @@ -1748,8 +1748,8 @@ static void kv_dpm_powergate_vce(struct amdgpu_device *adev, bool gate) /* power on the VCE block */ amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_VCEPowerON); /* re-init the VCE block */ - ret = amdgpu_set_powergating_state(adev, AMDGPU_IP_BLOCK_TYPE_VCE, - AMDGPU_PG_STATE_UNGATE); + ret = amdgpu_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCE, + AMD_PG_STATE_UNGATE); /* XXX: check for errors */ } } @@ -2981,20 +2981,21 @@ static int kv_dpm_get_temp(struct amdgpu_device *adev) return actual_temp; } -static int kv_dpm_early_init(struct amdgpu_device *adev) +static int kv_dpm_early_init(void *handle) { + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + kv_dpm_set_dpm_funcs(adev); kv_dpm_set_irq_funcs(adev); return 0; } -static int kv_dpm_late_init(struct amdgpu_device *adev) +static int kv_dpm_late_init(void *handle) { - if (!amdgpu_dpm) - return 0; - /* powerdown unused blocks for now */ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + kv_dpm_powergate_acp(adev, true); kv_dpm_powergate_samu(adev, true); kv_dpm_powergate_vce(adev, true); @@ -3003,9 +3004,10 @@ static int kv_dpm_late_init(struct amdgpu_device *adev) return 0; } -static int kv_dpm_sw_init(struct amdgpu_device *adev) +static int kv_dpm_sw_init(void *handle) { int ret; + struct amdgpu_device *adev = (struct amdgpu_device *)handle; ret = amdgpu_irq_add_id(adev, 230, &adev->pm.dpm.thermal.irq); if (ret) @@ -3051,8 +3053,10 @@ dpm_failed: return ret; } -static int kv_dpm_sw_fini(struct amdgpu_device *adev) +static int kv_dpm_sw_fini(void *handle) { + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + mutex_lock(&adev->pm.mutex); amdgpu_pm_sysfs_fini(adev); kv_dpm_fini(adev); @@ -3061,12 +3065,10 @@ static int kv_dpm_sw_fini(struct amdgpu_device *adev) return 0; } -static int kv_dpm_hw_init(struct amdgpu_device *adev) +static int kv_dpm_hw_init(void *handle) { int ret; - - if (!amdgpu_dpm) - return 0; + struct amdgpu_device *adev = (struct amdgpu_device *)handle; mutex_lock(&adev->pm.mutex); kv_dpm_setup_asic(adev); @@ -3080,8 +3082,10 @@ static int kv_dpm_hw_init(struct amdgpu_device *adev) return ret; } -static int kv_dpm_hw_fini(struct amdgpu_device *adev) +static int kv_dpm_hw_fini(void *handle) { + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + if (adev->pm.dpm_enabled) { mutex_lock(&adev->pm.mutex); kv_dpm_disable(adev); @@ -3091,8 +3095,10 @@ static int kv_dpm_hw_fini(struct amdgpu_device *adev) return 0; } -static int kv_dpm_suspend(struct amdgpu_device *adev) +static int kv_dpm_suspend(void *handle) { + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + if (adev->pm.dpm_enabled) { mutex_lock(&adev->pm.mutex); /* disable dpm */ @@ -3104,9 +3110,10 @@ static int kv_dpm_suspend(struct amdgpu_device *adev) return 0; } -static int kv_dpm_resume(struct amdgpu_device *adev) +static int kv_dpm_resume(void *handle) { int ret; + struct amdgpu_device *adev = (struct amdgpu_device *)handle; if (adev->pm.dpm_enabled) { /* asic init will reset to the boot state */ @@ -3124,20 +3131,20 @@ static int kv_dpm_resume(struct amdgpu_device *adev) return 0; } -static bool kv_dpm_is_idle(struct amdgpu_device *adev) +static bool kv_dpm_is_idle(void *handle) { - /* XXX */ return true; } -static int kv_dpm_wait_for_idle(struct amdgpu_device *adev) +static int kv_dpm_wait_for_idle(void *handle) { - /* XXX */ return 0; } -static void kv_dpm_print_status(struct amdgpu_device *adev) +static void kv_dpm_print_status(void *handle) { + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + dev_info(adev->dev, "KV/KB DPM registers\n"); dev_info(adev->dev, " DIDT_SQ_CTRL0=0x%08X\n", RREG32_DIDT(ixDIDT_SQ_CTRL0)); @@ -3191,7 +3198,7 @@ static void kv_dpm_print_status(struct amdgpu_device *adev) RREG32(mmSMC_IND_ACCESS_CNTL)); } -static int kv_dpm_soft_reset(struct amdgpu_device *adev) +static int kv_dpm_soft_reset(void *handle) { return 0; } @@ -3274,19 +3281,19 @@ static int kv_dpm_process_interrupt(struct amdgpu_device *adev, return 0; } -static int kv_dpm_set_clockgating_state(struct amdgpu_device *adev, - enum amdgpu_clockgating_state state) +static int kv_dpm_set_clockgating_state(void *handle, + enum amd_clockgating_state state) { return 0; } -static int kv_dpm_set_powergating_state(struct amdgpu_device *adev, - enum amdgpu_powergating_state state) +static int kv_dpm_set_powergating_state(void *handle, + enum amd_powergating_state state) { return 0; } -const struct amdgpu_ip_funcs kv_dpm_ip_funcs = { +const struct amd_ip_funcs kv_dpm_ip_funcs = { .early_init = kv_dpm_early_init, .late_init = kv_dpm_late_init, .sw_init = kv_dpm_sw_init, diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c index 99b8b326e999..64de8f60e3a5 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c @@ -955,8 +955,10 @@ static void sdma_v2_4_ring_emit_vm_flush(struct amdgpu_ring *ring, SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(10)); /* retry count, poll interval */ } -static int sdma_v2_4_early_init(struct amdgpu_device *adev) +static int sdma_v2_4_early_init(void *handle) { + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + sdma_v2_4_set_ring_funcs(adev); sdma_v2_4_set_buffer_funcs(adev); sdma_v2_4_set_vm_pte_funcs(adev); @@ -965,10 +967,11 @@ static int sdma_v2_4_early_init(struct amdgpu_device *adev) return 0; } -static int sdma_v2_4_sw_init(struct amdgpu_device *adev) +static int sdma_v2_4_sw_init(void *handle) { struct amdgpu_ring *ring; int r; + struct amdgpu_device *adev = (struct amdgpu_device *)handle; /* SDMA trap event */ r = amdgpu_irq_add_id(adev, 224, &adev->sdma_trap_irq); @@ -1020,17 +1023,20 @@ static int sdma_v2_4_sw_init(struct amdgpu_device *adev) return r; } -static int sdma_v2_4_sw_fini(struct amdgpu_device *adev) +static int sdma_v2_4_sw_fini(void *handle) { + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + amdgpu_ring_fini(&adev->sdma[0].ring); amdgpu_ring_fini(&adev->sdma[1].ring); return 0; } -static int sdma_v2_4_hw_init(struct amdgpu_device *adev) +static int sdma_v2_4_hw_init(void *handle) { int r; + struct amdgpu_device *adev = (struct amdgpu_device *)handle; sdma_v2_4_init_golden_registers(adev); @@ -1041,27 +1047,32 @@ static int sdma_v2_4_hw_init(struct amdgpu_device *adev) return r; } -static int sdma_v2_4_hw_fini(struct amdgpu_device *adev) +static int sdma_v2_4_hw_fini(void *handle) { + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + sdma_v2_4_enable(adev, false); return 0; } -static int sdma_v2_4_suspend(struct amdgpu_device *adev) +static int sdma_v2_4_suspend(void *handle) { + struct amdgpu_device *adev = (struct amdgpu_device *)handle; return sdma_v2_4_hw_fini(adev); } -static int sdma_v2_4_resume(struct amdgpu_device *adev) +static int sdma_v2_4_resume(void *handle) { + struct amdgpu_device *adev = (struct amdgpu_device *)handle; return sdma_v2_4_hw_init(adev); } -static bool sdma_v2_4_is_idle(struct amdgpu_device *adev) +static bool sdma_v2_4_is_idle(void *handle) { + struct amdgpu_device *adev = (struct amdgpu_device *)handle; u32 tmp = RREG32(mmSRBM_STATUS2); if (tmp & (SRBM_STATUS2__SDMA_BUSY_MASK | @@ -1071,10 +1082,11 @@ static bool sdma_v2_4_is_idle(struct amdgpu_device *adev) return true; } -static int sdma_v2_4_wait_for_idle(struct amdgpu_device *adev) +static int sdma_v2_4_wait_for_idle(void *handle) { unsigned i; u32 tmp; + struct amdgpu_device *adev = (struct amdgpu_device *)handle; for (i = 0; i < adev->usec_timeout; i++) { tmp = RREG32(mmSRBM_STATUS2) & (SRBM_STATUS2__SDMA_BUSY_MASK | @@ -1087,9 +1099,10 @@ static int sdma_v2_4_wait_for_idle(struct amdgpu_device *adev) return -ETIMEDOUT; } -static void sdma_v2_4_print_status(struct amdgpu_device *adev) +static void sdma_v2_4_print_status(void *handle) { int i, j; + struct amdgpu_device *adev = (struct amdgpu_device *)handle; dev_info(adev->dev, "VI SDMA registers\n"); dev_info(adev->dev, " SRBM_STATUS2=0x%08X\n", @@ -1133,9 +1146,10 @@ static void sdma_v2_4_print_status(struct amdgpu_device *adev) } } -static int sdma_v2_4_soft_reset(struct amdgpu_device *adev) +static int sdma_v2_4_soft_reset(void *handle) { u32 srbm_soft_reset = 0; + struct amdgpu_device *adev = (struct amdgpu_device *)handle; u32 tmp = RREG32(mmSRBM_STATUS2); if (tmp & SRBM_STATUS2__SDMA_BUSY_MASK) { @@ -1154,7 +1168,7 @@ static int sdma_v2_4_soft_reset(struct amdgpu_device *adev) } if (srbm_soft_reset) { - sdma_v2_4_print_status(adev); + sdma_v2_4_print_status((void *)adev); tmp = RREG32(mmSRBM_SOFT_RESET); tmp |= srbm_soft_reset; @@ -1171,7 +1185,7 @@ static int sdma_v2_4_soft_reset(struct amdgpu_device *adev) /* Wait a little for things to settle down */ udelay(50); - sdma_v2_4_print_status(adev); + sdma_v2_4_print_status((void *)adev); } return 0; @@ -1272,21 +1286,20 @@ static int sdma_v2_4_process_illegal_inst_irq(struct amdgpu_device *adev, return 0; } -static int sdma_v2_4_set_clockgating_state(struct amdgpu_device *adev, - enum amdgpu_clockgating_state state) +static int sdma_v2_4_set_clockgating_state(void *handle, + enum amd_clockgating_state state) { /* XXX handled via the smc on VI */ - return 0; } -static int sdma_v2_4_set_powergating_state(struct amdgpu_device *adev, - enum amdgpu_powergating_state state) +static int sdma_v2_4_set_powergating_state(void *handle, + enum amd_powergating_state state) { return 0; } -const struct amdgpu_ip_funcs sdma_v2_4_ip_funcs = { +const struct amd_ip_funcs sdma_v2_4_ip_funcs = { .early_init = sdma_v2_4_early_init, .late_init = NULL, .sw_init = sdma_v2_4_sw_init, diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.h b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.h index 6cdf8941c577..07349f5ee10f 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.h +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.h @@ -24,6 +24,6 @@ #ifndef __SDMA_V2_4_H__ #define __SDMA_V2_4_H__ -extern const struct amdgpu_ip_funcs sdma_v2_4_ip_funcs; +extern const struct amd_ip_funcs sdma_v2_4_ip_funcs; #endif diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c index 86a5278b65f6..bf3cefc447ca 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c @@ -1018,8 +1018,10 @@ static void sdma_v3_0_ring_emit_vm_flush(struct amdgpu_ring *ring, SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(10)); /* retry count, poll interval */ } -static int sdma_v3_0_early_init(struct amdgpu_device *adev) +static int sdma_v3_0_early_init(void *handle) { + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + sdma_v3_0_set_ring_funcs(adev); sdma_v3_0_set_buffer_funcs(adev); sdma_v3_0_set_vm_pte_funcs(adev); @@ -1028,10 +1030,11 @@ static int sdma_v3_0_early_init(struct amdgpu_device *adev) return 0; } -static int sdma_v3_0_sw_init(struct amdgpu_device *adev) +static int sdma_v3_0_sw_init(void *handle) { struct amdgpu_ring *ring; int r; + struct amdgpu_device *adev = (struct amdgpu_device *)handle; /* SDMA trap event */ r = amdgpu_irq_add_id(adev, 224, &adev->sdma_trap_irq); @@ -1085,17 +1088,20 @@ static int sdma_v3_0_sw_init(struct amdgpu_device *adev) return r; } -static int sdma_v3_0_sw_fini(struct amdgpu_device *adev) +static int sdma_v3_0_sw_fini(void *handle) { + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + amdgpu_ring_fini(&adev->sdma[0].ring); amdgpu_ring_fini(&adev->sdma[1].ring); return 0; } -static int sdma_v3_0_hw_init(struct amdgpu_device *adev) +static int sdma_v3_0_hw_init(void *handle) { int r; + struct amdgpu_device *adev = (struct amdgpu_device *)handle; sdma_v3_0_init_golden_registers(adev); @@ -1106,27 +1112,32 @@ static int sdma_v3_0_hw_init(struct amdgpu_device *adev) return r; } -static int sdma_v3_0_hw_fini(struct amdgpu_device *adev) +static int sdma_v3_0_hw_fini(void *handle) { + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + sdma_v3_0_enable(adev, false); return 0; } -static int sdma_v3_0_suspend(struct amdgpu_device *adev) +static int sdma_v3_0_suspend(void *handle) { + struct amdgpu_device *adev = (struct amdgpu_device *)handle; return sdma_v3_0_hw_fini(adev); } -static int sdma_v3_0_resume(struct amdgpu_device *adev) +static int sdma_v3_0_resume(void *handle) { + struct amdgpu_device *adev = (struct amdgpu_device *)handle; return sdma_v3_0_hw_init(adev); } -static bool sdma_v3_0_is_idle(struct amdgpu_device *adev) +static bool sdma_v3_0_is_idle(void *handle) { + struct amdgpu_device *adev = (struct amdgpu_device *)handle; u32 tmp = RREG32(mmSRBM_STATUS2); if (tmp & (SRBM_STATUS2__SDMA_BUSY_MASK | @@ -1136,10 +1147,11 @@ static bool sdma_v3_0_is_idle(struct amdgpu_device *adev) return true; } -static int sdma_v3_0_wait_for_idle(struct amdgpu_device *adev) +static int sdma_v3_0_wait_for_idle(void *handle) { unsigned i; u32 tmp; + struct amdgpu_device *adev = (struct amdgpu_device *)handle; for (i = 0; i < adev->usec_timeout; i++) { tmp = RREG32(mmSRBM_STATUS2) & (SRBM_STATUS2__SDMA_BUSY_MASK | @@ -1152,9 +1164,10 @@ static int sdma_v3_0_wait_for_idle(struct amdgpu_device *adev) return -ETIMEDOUT; } -static void sdma_v3_0_print_status(struct amdgpu_device *adev) +static void sdma_v3_0_print_status(void *handle) { int i, j; + struct amdgpu_device *adev = (struct amdgpu_device *)handle; dev_info(adev->dev, "VI SDMA registers\n"); dev_info(adev->dev, " SRBM_STATUS2=0x%08X\n", @@ -1200,9 +1213,10 @@ static void sdma_v3_0_print_status(struct amdgpu_device *adev) } } -static int sdma_v3_0_soft_reset(struct amdgpu_device *adev) +static int sdma_v3_0_soft_reset(void *handle) { u32 srbm_soft_reset = 0; + struct amdgpu_device *adev = (struct amdgpu_device *)handle; u32 tmp = RREG32(mmSRBM_STATUS2); if (tmp & SRBM_STATUS2__SDMA_BUSY_MASK) { @@ -1221,7 +1235,7 @@ static int sdma_v3_0_soft_reset(struct amdgpu_device *adev) } if (srbm_soft_reset) { - sdma_v3_0_print_status(adev); + sdma_v3_0_print_status((void *)adev); tmp = RREG32(mmSRBM_SOFT_RESET); tmp |= srbm_soft_reset; @@ -1238,7 +1252,7 @@ static int sdma_v3_0_soft_reset(struct amdgpu_device *adev) /* Wait a little for things to settle down */ udelay(50); - sdma_v3_0_print_status(adev); + sdma_v3_0_print_status((void *)adev); } return 0; @@ -1339,21 +1353,19 @@ static int sdma_v3_0_process_illegal_inst_irq(struct amdgpu_device *adev, return 0; } -static int sdma_v3_0_set_clockgating_state(struct amdgpu_device *adev, - enum amdgpu_clockgating_state state) +static int sdma_v3_0_set_clockgating_state(void *handle, + enum amd_clockgating_state state) { - /* XXX handled via the smc on VI */ - return 0; } -static int sdma_v3_0_set_powergating_state(struct amdgpu_device *adev, - enum amdgpu_powergating_state state) +static int sdma_v3_0_set_powergating_state(void *handle, + enum amd_powergating_state state) { return 0; } -const struct amdgpu_ip_funcs sdma_v3_0_ip_funcs = { +const struct amd_ip_funcs sdma_v3_0_ip_funcs = { .early_init = sdma_v3_0_early_init, .late_init = NULL, .sw_init = sdma_v3_0_sw_init, diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.h b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.h index 85bf2ac59252..0cb9698a3054 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.h +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.h @@ -24,6 +24,6 @@ #ifndef __SDMA_V3_0_H__ #define __SDMA_V3_0_H__ -extern const struct amdgpu_ip_funcs sdma_v3_0_ip_funcs; +extern const struct amd_ip_funcs sdma_v3_0_ip_funcs; #endif diff --git a/drivers/gpu/drm/amd/amdgpu/tonga_dpm.c b/drivers/gpu/drm/amd/amdgpu/tonga_dpm.c index cf97d496a7a4..2d5b1bd52afa 100644 --- a/drivers/gpu/drm/amd/amdgpu/tonga_dpm.c +++ b/drivers/gpu/drm/amd/amdgpu/tonga_dpm.c @@ -30,8 +30,10 @@ MODULE_FIRMWARE("amdgpu/tonga_smc.bin"); static void tonga_dpm_set_funcs(struct amdgpu_device *adev); -static int tonga_dpm_early_init(struct amdgpu_device *adev) +static int tonga_dpm_early_init(void *handle) { + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + tonga_dpm_set_funcs(adev); return 0; @@ -41,7 +43,6 @@ static int tonga_dpm_init_microcode(struct amdgpu_device *adev) { char fw_name[30] = "amdgpu/tonga_smc.bin"; int err; - err = request_firmware(&adev->pm.fw, fw_name, adev->dev); if (err) goto out; @@ -56,9 +57,10 @@ out: return err; } -static int tonga_dpm_sw_init(struct amdgpu_device *adev) +static int tonga_dpm_sw_init(void *handle) { int ret; + struct amdgpu_device *adev = (struct amdgpu_device *)handle; ret = tonga_dpm_init_microcode(adev); if (ret) @@ -67,14 +69,15 @@ static int tonga_dpm_sw_init(struct amdgpu_device *adev) return 0; } -static int tonga_dpm_sw_fini(struct amdgpu_device *adev) +static int tonga_dpm_sw_fini(void *handle) { return 0; } -static int tonga_dpm_hw_init(struct amdgpu_device *adev) +static int tonga_dpm_hw_init(void *handle) { int ret; + struct amdgpu_device *adev = (struct amdgpu_device *)handle; mutex_lock(&adev->pm.mutex); @@ -99,41 +102,47 @@ fail: return -EINVAL; } -static int tonga_dpm_hw_fini(struct amdgpu_device *adev) +static int tonga_dpm_hw_fini(void *handle) { + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + mutex_lock(&adev->pm.mutex); tonga_smu_fini(adev); mutex_unlock(&adev->pm.mutex); return 0; } -static int tonga_dpm_suspend(struct amdgpu_device *adev) +static int tonga_dpm_suspend(void *handle) { + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + tonga_dpm_hw_fini(adev); return 0; } -static int tonga_dpm_resume(struct amdgpu_device *adev) +static int tonga_dpm_resume(void *handle) { + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + tonga_dpm_hw_init(adev); return 0; } -static int tonga_dpm_set_clockgating_state(struct amdgpu_device *adev, - enum amdgpu_clockgating_state state) +static int tonga_dpm_set_clockgating_state(void *handle, + enum amd_clockgating_state state) { return 0; } -static int tonga_dpm_set_powergating_state(struct amdgpu_device *adev, - enum amdgpu_powergating_state state) +static int tonga_dpm_set_powergating_state(void *handle, + enum amd_powergating_state state) { return 0; } -const struct amdgpu_ip_funcs tonga_dpm_ip_funcs = { +const struct amd_ip_funcs tonga_dpm_ip_funcs = { .early_init = tonga_dpm_early_init, .late_init = NULL, .sw_init = tonga_dpm_sw_init, diff --git a/drivers/gpu/drm/amd/amdgpu/tonga_ih.c b/drivers/gpu/drm/amd/amdgpu/tonga_ih.c index cff1b8bce6a4..743c372837aa 100644 --- a/drivers/gpu/drm/amd/amdgpu/tonga_ih.c +++ b/drivers/gpu/drm/amd/amdgpu/tonga_ih.c @@ -270,15 +270,18 @@ static void tonga_ih_set_rptr(struct amdgpu_device *adev) } } -static int tonga_ih_early_init(struct amdgpu_device *adev) +static int tonga_ih_early_init(void *handle) { + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + tonga_ih_set_interrupt_funcs(adev); return 0; } -static int tonga_ih_sw_init(struct amdgpu_device *adev) +static int tonga_ih_sw_init(void *handle) { int r; + struct amdgpu_device *adev = (struct amdgpu_device *)handle; r = amdgpu_ih_ring_init(adev, 4 * 1024, true); if (r) @@ -292,17 +295,20 @@ static int tonga_ih_sw_init(struct amdgpu_device *adev) return r; } -static int tonga_ih_sw_fini(struct amdgpu_device *adev) +static int tonga_ih_sw_fini(void *handle) { + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + amdgpu_irq_fini(adev); amdgpu_ih_ring_fini(adev); return 0; } -static int tonga_ih_hw_init(struct amdgpu_device *adev) +static int tonga_ih_hw_init(void *handle) { int r; + struct amdgpu_device *adev = (struct amdgpu_device *)handle; r = tonga_ih_irq_init(adev); if (r) @@ -311,25 +317,32 @@ static int tonga_ih_hw_init(struct amdgpu_device *adev) return 0; } -static int tonga_ih_hw_fini(struct amdgpu_device *adev) +static int tonga_ih_hw_fini(void *handle) { + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + tonga_ih_irq_disable(adev); return 0; } -static int tonga_ih_suspend(struct amdgpu_device *adev) +static int tonga_ih_suspend(void *handle) { + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + return tonga_ih_hw_fini(adev); } -static int tonga_ih_resume(struct amdgpu_device *adev) +static int tonga_ih_resume(void *handle) { + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + return tonga_ih_hw_init(adev); } -static bool tonga_ih_is_idle(struct amdgpu_device *adev) +static bool tonga_ih_is_idle(void *handle) { + struct amdgpu_device *adev = (struct amdgpu_device *)handle; u32 tmp = RREG32(mmSRBM_STATUS); if (REG_GET_FIELD(tmp, SRBM_STATUS, IH_BUSY)) @@ -338,10 +351,11 @@ static bool tonga_ih_is_idle(struct amdgpu_device *adev) return true; } -static int tonga_ih_wait_for_idle(struct amdgpu_device *adev) +static int tonga_ih_wait_for_idle(void *handle) { unsigned i; u32 tmp; + struct amdgpu_device *adev = (struct amdgpu_device *)handle; for (i = 0; i < adev->usec_timeout; i++) { /* read MC_STATUS */ @@ -353,8 +367,10 @@ static int tonga_ih_wait_for_idle(struct amdgpu_device *adev) return -ETIMEDOUT; } -static void tonga_ih_print_status(struct amdgpu_device *adev) +static void tonga_ih_print_status(void *handle) { + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + dev_info(adev->dev, "TONGA IH registers\n"); dev_info(adev->dev, " SRBM_STATUS=0x%08X\n", RREG32(mmSRBM_STATUS)); @@ -380,9 +396,10 @@ static void tonga_ih_print_status(struct amdgpu_device *adev) RREG32(mmIH_RB_WPTR)); } -static int tonga_ih_soft_reset(struct amdgpu_device *adev) +static int tonga_ih_soft_reset(void *handle) { u32 srbm_soft_reset = 0; + struct amdgpu_device *adev = (struct amdgpu_device *)handle; u32 tmp = RREG32(mmSRBM_STATUS); if (tmp & SRBM_STATUS__IH_BUSY_MASK) @@ -413,21 +430,19 @@ static int tonga_ih_soft_reset(struct amdgpu_device *adev) return 0; } -static int tonga_ih_set_clockgating_state(struct amdgpu_device *adev, - enum amdgpu_clockgating_state state) +static int tonga_ih_set_clockgating_state(void *handle, + enum amd_clockgating_state state) { - // TODO return 0; } -static int tonga_ih_set_powergating_state(struct amdgpu_device *adev, - enum amdgpu_powergating_state state) +static int tonga_ih_set_powergating_state(void *handle, + enum amd_powergating_state state) { - // TODO return 0; } -const struct amdgpu_ip_funcs tonga_ih_ip_funcs = { +const struct amd_ip_funcs tonga_ih_ip_funcs = { .early_init = tonga_ih_early_init, .late_init = NULL, .sw_init = tonga_ih_sw_init, diff --git a/drivers/gpu/drm/amd/amdgpu/tonga_ih.h b/drivers/gpu/drm/amd/amdgpu/tonga_ih.h index 7c9bae87124e..7392d70fa4a7 100644 --- a/drivers/gpu/drm/amd/amdgpu/tonga_ih.h +++ b/drivers/gpu/drm/amd/amdgpu/tonga_ih.h @@ -24,6 +24,6 @@ #ifndef __TONGA_IH_H__ #define __TONGA_IH_H__ -extern const struct amdgpu_ip_funcs tonga_ih_ip_funcs; +extern const struct amd_ip_funcs tonga_ih_ip_funcs; #endif /* __CZ_IH_H__ */ diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c index 6e4cb604f928..292932a73c81 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c @@ -83,17 +83,20 @@ static void uvd_v4_2_ring_set_wptr(struct amdgpu_ring *ring) WREG32(mmUVD_RBC_RB_WPTR, ring->wptr); } -static int uvd_v4_2_early_init(struct amdgpu_device *adev) +static int uvd_v4_2_early_init(void *handle) { + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + uvd_v4_2_set_ring_funcs(adev); uvd_v4_2_set_irq_funcs(adev); return 0; } -static int uvd_v4_2_sw_init(struct amdgpu_device *adev) +static int uvd_v4_2_sw_init(void *handle) { struct amdgpu_ring *ring; + struct amdgpu_device *adev = (struct amdgpu_device *)handle; int r; /* UVD TRAP */ @@ -117,9 +120,10 @@ static int uvd_v4_2_sw_init(struct amdgpu_device *adev) return r; } -static int uvd_v4_2_sw_fini(struct amdgpu_device *adev) +static int uvd_v4_2_sw_fini(void *handle) { int r; + struct amdgpu_device *adev = (struct amdgpu_device *)handle; r = amdgpu_uvd_suspend(adev); if (r) @@ -139,8 +143,9 @@ static int uvd_v4_2_sw_fini(struct amdgpu_device *adev) * * Initialize the hardware, boot up the VCPU and do some testing */ -static int uvd_v4_2_hw_init(struct amdgpu_device *adev) +static int uvd_v4_2_hw_init(void *handle) { + struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct amdgpu_ring *ring = &adev->uvd.ring; uint32_t tmp; int r; @@ -203,8 +208,9 @@ done: * * Stop the UVD block, mark ring as not ready any more */ -static int uvd_v4_2_hw_fini(struct amdgpu_device *adev) +static int uvd_v4_2_hw_fini(void *handle) { + struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct amdgpu_ring *ring = &adev->uvd.ring; uvd_v4_2_stop(adev); @@ -213,9 +219,10 @@ static int uvd_v4_2_hw_fini(struct amdgpu_device *adev) return 0; } -static int uvd_v4_2_suspend(struct amdgpu_device *adev) +static int uvd_v4_2_suspend(void *handle) { int r; + struct amdgpu_device *adev = (struct amdgpu_device *)handle; r = uvd_v4_2_hw_fini(adev); if (r) @@ -228,9 +235,10 @@ static int uvd_v4_2_suspend(struct amdgpu_device *adev) return r; } -static int uvd_v4_2_resume(struct amdgpu_device *adev) +static int uvd_v4_2_resume(void *handle) { int r; + struct amdgpu_device *adev = (struct amdgpu_device *)handle; r = amdgpu_uvd_resume(adev); if (r) @@ -662,14 +670,17 @@ static void uvd_v4_2_init_cg(struct amdgpu_device *adev) } } -static bool uvd_v4_2_is_idle(struct amdgpu_device *adev) +static bool uvd_v4_2_is_idle(void *handle) { + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + return !(RREG32(mmSRBM_STATUS) & SRBM_STATUS__UVD_BUSY_MASK); } -static int uvd_v4_2_wait_for_idle(struct amdgpu_device *adev) +static int uvd_v4_2_wait_for_idle(void *handle) { unsigned i; + struct amdgpu_device *adev = (struct amdgpu_device *)handle; for (i = 0; i < adev->usec_timeout; i++) { if (!(RREG32(mmSRBM_STATUS) & SRBM_STATUS__UVD_BUSY_MASK)) @@ -678,8 +689,10 @@ static int uvd_v4_2_wait_for_idle(struct amdgpu_device *adev) return -ETIMEDOUT; } -static int uvd_v4_2_soft_reset(struct amdgpu_device *adev) +static int uvd_v4_2_soft_reset(void *handle) { + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + uvd_v4_2_stop(adev); WREG32_P(mmSRBM_SOFT_RESET, SRBM_SOFT_RESET__SOFT_RESET_UVD_MASK, @@ -689,8 +702,9 @@ static int uvd_v4_2_soft_reset(struct amdgpu_device *adev) return uvd_v4_2_start(adev); } -static void uvd_v4_2_print_status(struct amdgpu_device *adev) +static void uvd_v4_2_print_status(void *handle) { + struct amdgpu_device *adev = (struct amdgpu_device *)handle; dev_info(adev->dev, "UVD 4.2 registers\n"); dev_info(adev->dev, " UVD_SEMA_ADDR_LOW=0x%08X\n", RREG32(mmUVD_SEMA_ADDR_LOW)); @@ -810,12 +824,13 @@ static int uvd_v4_2_process_interrupt(struct amdgpu_device *adev, return 0; } -static int uvd_v4_2_set_clockgating_state(struct amdgpu_device *adev, - enum amdgpu_clockgating_state state) +static int uvd_v4_2_set_clockgating_state(void *handle, + enum amd_clockgating_state state) { bool gate = false; + struct amdgpu_device *adev = (struct amdgpu_device *)handle; - if (state == AMDGPU_CG_STATE_GATE) + if (state == AMD_CG_STATE_GATE) gate = true; uvd_v4_2_enable_mgcg(adev, gate); @@ -823,8 +838,8 @@ static int uvd_v4_2_set_clockgating_state(struct amdgpu_device *adev, return 0; } -static int uvd_v4_2_set_powergating_state(struct amdgpu_device *adev, - enum amdgpu_powergating_state state) +static int uvd_v4_2_set_powergating_state(void *handle, + enum amd_powergating_state state) { /* This doesn't actually powergate the UVD block. * That's done in the dpm code via the SMC. This @@ -833,7 +848,9 @@ static int uvd_v4_2_set_powergating_state(struct amdgpu_device *adev, * revisit this when there is a cleaner line between * the smc and the hw blocks */ - if (state == AMDGPU_PG_STATE_GATE) { + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + if (state == AMD_PG_STATE_GATE) { uvd_v4_2_stop(adev); return 0; } else { @@ -841,7 +858,7 @@ static int uvd_v4_2_set_powergating_state(struct amdgpu_device *adev, } } -const struct amdgpu_ip_funcs uvd_v4_2_ip_funcs = { +const struct amd_ip_funcs uvd_v4_2_ip_funcs = { .early_init = uvd_v4_2_early_init, .late_init = NULL, .sw_init = uvd_v4_2_sw_init, diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.h b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.h index 323a6d828dfe..0a615dd50840 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.h +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.h @@ -24,6 +24,6 @@ #ifndef __UVD_V4_2_H__ #define __UVD_V4_2_H__ -extern const struct amdgpu_ip_funcs uvd_v4_2_ip_funcs; +extern const struct amd_ip_funcs uvd_v4_2_ip_funcs; #endif diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c index f3b3026d5932..004c56496fc4 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c @@ -79,17 +79,20 @@ static void uvd_v5_0_ring_set_wptr(struct amdgpu_ring *ring) WREG32(mmUVD_RBC_RB_WPTR, ring->wptr); } -static int uvd_v5_0_early_init(struct amdgpu_device *adev) +static int uvd_v5_0_early_init(void *handle) { + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + uvd_v5_0_set_ring_funcs(adev); uvd_v5_0_set_irq_funcs(adev); return 0; } -static int uvd_v5_0_sw_init(struct amdgpu_device *adev) +static int uvd_v5_0_sw_init(void *handle) { struct amdgpu_ring *ring; + struct amdgpu_device *adev = (struct amdgpu_device *)handle; int r; /* UVD TRAP */ @@ -113,9 +116,10 @@ static int uvd_v5_0_sw_init(struct amdgpu_device *adev) return r; } -static int uvd_v5_0_sw_fini(struct amdgpu_device *adev) +static int uvd_v5_0_sw_fini(void *handle) { int r; + struct amdgpu_device *adev = (struct amdgpu_device *)handle; r = amdgpu_uvd_suspend(adev); if (r) @@ -135,8 +139,9 @@ static int uvd_v5_0_sw_fini(struct amdgpu_device *adev) * * Initialize the hardware, boot up the VCPU and do some testing */ -static int uvd_v5_0_hw_init(struct amdgpu_device *adev) +static int uvd_v5_0_hw_init(void *handle) { + struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct amdgpu_ring *ring = &adev->uvd.ring; uint32_t tmp; int r; @@ -199,8 +204,9 @@ done: * * Stop the UVD block, mark ring as not ready any more */ -static int uvd_v5_0_hw_fini(struct amdgpu_device *adev) +static int uvd_v5_0_hw_fini(void *handle) { + struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct amdgpu_ring *ring = &adev->uvd.ring; uvd_v5_0_stop(adev); @@ -209,9 +215,10 @@ static int uvd_v5_0_hw_fini(struct amdgpu_device *adev) return 0; } -static int uvd_v5_0_suspend(struct amdgpu_device *adev) +static int uvd_v5_0_suspend(void *handle) { int r; + struct amdgpu_device *adev = (struct amdgpu_device *)handle; r = uvd_v5_0_hw_fini(adev); if (r) @@ -224,9 +231,10 @@ static int uvd_v5_0_suspend(struct amdgpu_device *adev) return r; } -static int uvd_v5_0_resume(struct amdgpu_device *adev) +static int uvd_v5_0_resume(void *handle) { int r; + struct amdgpu_device *adev = (struct amdgpu_device *)handle; r = amdgpu_uvd_resume(adev); if (r) @@ -605,14 +613,17 @@ error: return r; } -static bool uvd_v5_0_is_idle(struct amdgpu_device *adev) +static bool uvd_v5_0_is_idle(void *handle) { + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + return !(RREG32(mmSRBM_STATUS) & SRBM_STATUS__UVD_BUSY_MASK); } -static int uvd_v5_0_wait_for_idle(struct amdgpu_device *adev) +static int uvd_v5_0_wait_for_idle(void *handle) { unsigned i; + struct amdgpu_device *adev = (struct amdgpu_device *)handle; for (i = 0; i < adev->usec_timeout; i++) { if (!(RREG32(mmSRBM_STATUS) & SRBM_STATUS__UVD_BUSY_MASK)) @@ -621,8 +632,10 @@ static int uvd_v5_0_wait_for_idle(struct amdgpu_device *adev) return -ETIMEDOUT; } -static int uvd_v5_0_soft_reset(struct amdgpu_device *adev) +static int uvd_v5_0_soft_reset(void *handle) { + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + uvd_v5_0_stop(adev); WREG32_P(mmSRBM_SOFT_RESET, SRBM_SOFT_RESET__SOFT_RESET_UVD_MASK, @@ -632,8 +645,9 @@ static int uvd_v5_0_soft_reset(struct amdgpu_device *adev) return uvd_v5_0_start(adev); } -static void uvd_v5_0_print_status(struct amdgpu_device *adev) +static void uvd_v5_0_print_status(void *handle) { + struct amdgpu_device *adev = (struct amdgpu_device *)handle; dev_info(adev->dev, "UVD 5.0 registers\n"); dev_info(adev->dev, " UVD_SEMA_ADDR_LOW=0x%08X\n", RREG32(mmUVD_SEMA_ADDR_LOW)); @@ -757,16 +771,14 @@ static int uvd_v5_0_process_interrupt(struct amdgpu_device *adev, return 0; } -static int uvd_v5_0_set_clockgating_state(struct amdgpu_device *adev, - enum amdgpu_clockgating_state state) +static int uvd_v5_0_set_clockgating_state(void *handle, + enum amd_clockgating_state state) { - //TODO - return 0; } -static int uvd_v5_0_set_powergating_state(struct amdgpu_device *adev, - enum amdgpu_powergating_state state) +static int uvd_v5_0_set_powergating_state(void *handle, + enum amd_powergating_state state) { /* This doesn't actually powergate the UVD block. * That's done in the dpm code via the SMC. This @@ -775,7 +787,9 @@ static int uvd_v5_0_set_powergating_state(struct amdgpu_device *adev, * revisit this when there is a cleaner line between * the smc and the hw blocks */ - if (state == AMDGPU_PG_STATE_GATE) { + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + if (state == AMD_PG_STATE_GATE) { uvd_v5_0_stop(adev); return 0; } else { @@ -783,7 +797,7 @@ static int uvd_v5_0_set_powergating_state(struct amdgpu_device *adev, } } -const struct amdgpu_ip_funcs uvd_v5_0_ip_funcs = { +const struct amd_ip_funcs uvd_v5_0_ip_funcs = { .early_init = uvd_v5_0_early_init, .late_init = NULL, .sw_init = uvd_v5_0_sw_init, diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.h b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.h index 7d7a15296383..e3b3c49fa5de 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.h +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.h @@ -24,6 +24,6 @@ #ifndef __UVD_V5_0_H__ #define __UVD_V5_0_H__ -extern const struct amdgpu_ip_funcs uvd_v5_0_ip_funcs; +extern const struct amd_ip_funcs uvd_v5_0_ip_funcs; #endif diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c index f59942d5c50e..8c790fb31e2f 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c @@ -79,18 +79,21 @@ static void uvd_v6_0_ring_set_wptr(struct amdgpu_ring *ring) WREG32(mmUVD_RBC_RB_WPTR, ring->wptr); } -static int uvd_v6_0_early_init(struct amdgpu_device *adev) +static int uvd_v6_0_early_init(void *handle) { + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + uvd_v6_0_set_ring_funcs(adev); uvd_v6_0_set_irq_funcs(adev); return 0; } -static int uvd_v6_0_sw_init(struct amdgpu_device *adev) +static int uvd_v6_0_sw_init(void *handle) { struct amdgpu_ring *ring; int r; + struct amdgpu_device *adev = (struct amdgpu_device *)handle; /* UVD TRAP */ r = amdgpu_irq_add_id(adev, 124, &adev->uvd.irq); @@ -113,9 +116,10 @@ static int uvd_v6_0_sw_init(struct amdgpu_device *adev) return r; } -static int uvd_v6_0_sw_fini(struct amdgpu_device *adev) +static int uvd_v6_0_sw_fini(void *handle) { int r; + struct amdgpu_device *adev = (struct amdgpu_device *)handle; r = amdgpu_uvd_suspend(adev); if (r) @@ -135,8 +139,9 @@ static int uvd_v6_0_sw_fini(struct amdgpu_device *adev) * * Initialize the hardware, boot up the VCPU and do some testing */ -static int uvd_v6_0_hw_init(struct amdgpu_device *adev) +static int uvd_v6_0_hw_init(void *handle) { + struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct amdgpu_ring *ring = &adev->uvd.ring; uint32_t tmp; int r; @@ -193,8 +198,9 @@ done: * * Stop the UVD block, mark ring as not ready any more */ -static int uvd_v6_0_hw_fini(struct amdgpu_device *adev) +static int uvd_v6_0_hw_fini(void *handle) { + struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct amdgpu_ring *ring = &adev->uvd.ring; uvd_v6_0_stop(adev); @@ -203,9 +209,10 @@ static int uvd_v6_0_hw_fini(struct amdgpu_device *adev) return 0; } -static int uvd_v6_0_suspend(struct amdgpu_device *adev) +static int uvd_v6_0_suspend(void *handle) { int r; + struct amdgpu_device *adev = (struct amdgpu_device *)handle; r = uvd_v6_0_hw_fini(adev); if (r) @@ -218,9 +225,10 @@ static int uvd_v6_0_suspend(struct amdgpu_device *adev) return r; } -static int uvd_v6_0_resume(struct amdgpu_device *adev) +static int uvd_v6_0_resume(void *handle) { int r; + struct amdgpu_device *adev = (struct amdgpu_device *)handle; r = amdgpu_uvd_resume(adev); if (r) @@ -593,14 +601,17 @@ error: return r; } -static bool uvd_v6_0_is_idle(struct amdgpu_device *adev) +static bool uvd_v6_0_is_idle(void *handle) { + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + return !(RREG32(mmSRBM_STATUS) & SRBM_STATUS__UVD_BUSY_MASK); } -static int uvd_v6_0_wait_for_idle(struct amdgpu_device *adev) +static int uvd_v6_0_wait_for_idle(void *handle) { unsigned i; + struct amdgpu_device *adev = (struct amdgpu_device *)handle; for (i = 0; i < adev->usec_timeout; i++) { if (!(RREG32(mmSRBM_STATUS) & SRBM_STATUS__UVD_BUSY_MASK)) @@ -609,8 +620,10 @@ static int uvd_v6_0_wait_for_idle(struct amdgpu_device *adev) return -ETIMEDOUT; } -static int uvd_v6_0_soft_reset(struct amdgpu_device *adev) +static int uvd_v6_0_soft_reset(void *handle) { + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + uvd_v6_0_stop(adev); WREG32_P(mmSRBM_SOFT_RESET, SRBM_SOFT_RESET__SOFT_RESET_UVD_MASK, @@ -620,8 +633,9 @@ static int uvd_v6_0_soft_reset(struct amdgpu_device *adev) return uvd_v6_0_start(adev); } -static void uvd_v6_0_print_status(struct amdgpu_device *adev) +static void uvd_v6_0_print_status(void *handle) { + struct amdgpu_device *adev = (struct amdgpu_device *)handle; dev_info(adev->dev, "UVD 6.0 registers\n"); dev_info(adev->dev, " UVD_SEMA_ADDR_LOW=0x%08X\n", RREG32(mmUVD_SEMA_ADDR_LOW)); @@ -737,16 +751,14 @@ static int uvd_v6_0_process_interrupt(struct amdgpu_device *adev, return 0; } -static int uvd_v6_0_set_clockgating_state(struct amdgpu_device *adev, - enum amdgpu_clockgating_state state) +static int uvd_v6_0_set_clockgating_state(void *handle, + enum amd_clockgating_state state) { - //TODO - return 0; } -static int uvd_v6_0_set_powergating_state(struct amdgpu_device *adev, - enum amdgpu_powergating_state state) +static int uvd_v6_0_set_powergating_state(void *handle, + enum amd_powergating_state state) { /* This doesn't actually powergate the UVD block. * That's done in the dpm code via the SMC. This @@ -755,7 +767,9 @@ static int uvd_v6_0_set_powergating_state(struct amdgpu_device *adev, * revisit this when there is a cleaner line between * the smc and the hw blocks */ - if (state == AMDGPU_PG_STATE_GATE) { + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + if (state == AMD_PG_STATE_GATE) { uvd_v6_0_stop(adev); return 0; } else { @@ -763,7 +777,7 @@ static int uvd_v6_0_set_powergating_state(struct amdgpu_device *adev, } } -const struct amdgpu_ip_funcs uvd_v6_0_ip_funcs = { +const struct amd_ip_funcs uvd_v6_0_ip_funcs = { .early_init = uvd_v6_0_early_init, .late_init = NULL, .sw_init = uvd_v6_0_sw_init, diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.h b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.h index bc21afc8abac..6b92a2352986 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.h +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.h @@ -24,6 +24,6 @@ #ifndef __UVD_V6_0_H__ #define __UVD_V6_0_H__ -extern const struct amdgpu_ip_funcs uvd_v6_0_ip_funcs; +extern const struct amd_ip_funcs uvd_v6_0_ip_funcs; #endif diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c index f200df3cf97a..303d961d57bd 100644 --- a/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c @@ -169,18 +169,21 @@ static int vce_v2_0_start(struct amdgpu_device *adev) return 0; } -static int vce_v2_0_early_init(struct amdgpu_device *adev) +static int vce_v2_0_early_init(void *handle) { + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + vce_v2_0_set_ring_funcs(adev); vce_v2_0_set_irq_funcs(adev); return 0; } -static int vce_v2_0_sw_init(struct amdgpu_device *adev) +static int vce_v2_0_sw_init(void *handle) { struct amdgpu_ring *ring; int r; + struct amdgpu_device *adev = (struct amdgpu_device *)handle; /* VCE */ r = amdgpu_irq_add_id(adev, 167, &adev->vce.irq); @@ -213,9 +216,10 @@ static int vce_v2_0_sw_init(struct amdgpu_device *adev) return r; } -static int vce_v2_0_sw_fini(struct amdgpu_device *adev) +static int vce_v2_0_sw_fini(void *handle) { int r; + struct amdgpu_device *adev = (struct amdgpu_device *)handle; r = amdgpu_vce_suspend(adev); if (r) @@ -228,10 +232,11 @@ static int vce_v2_0_sw_fini(struct amdgpu_device *adev) return r; } -static int vce_v2_0_hw_init(struct amdgpu_device *adev) +static int vce_v2_0_hw_init(void *handle) { struct amdgpu_ring *ring; int r; + struct amdgpu_device *adev = (struct amdgpu_device *)handle; r = vce_v2_0_start(adev); if (r) @@ -258,15 +263,15 @@ static int vce_v2_0_hw_init(struct amdgpu_device *adev) return 0; } -static int vce_v2_0_hw_fini(struct amdgpu_device *adev) +static int vce_v2_0_hw_fini(void *handle) { - // TODO return 0; } -static int vce_v2_0_suspend(struct amdgpu_device *adev) +static int vce_v2_0_suspend(void *handle) { int r; + struct amdgpu_device *adev = (struct amdgpu_device *)handle; r = vce_v2_0_hw_fini(adev); if (r) @@ -279,9 +284,10 @@ static int vce_v2_0_suspend(struct amdgpu_device *adev) return r; } -static int vce_v2_0_resume(struct amdgpu_device *adev) +static int vce_v2_0_resume(void *handle) { int r; + struct amdgpu_device *adev = (struct amdgpu_device *)handle; r = amdgpu_vce_resume(adev); if (r) @@ -442,14 +448,17 @@ static void vce_v2_0_mc_resume(struct amdgpu_device *adev) vce_v2_0_init_cg(adev); } -static bool vce_v2_0_is_idle(struct amdgpu_device *adev) +static bool vce_v2_0_is_idle(void *handle) { + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + return !(RREG32(mmSRBM_STATUS2) & SRBM_STATUS2__VCE_BUSY_MASK); } -static int vce_v2_0_wait_for_idle(struct amdgpu_device *adev) +static int vce_v2_0_wait_for_idle(void *handle) { unsigned i; + struct amdgpu_device *adev = (struct amdgpu_device *)handle; for (i = 0; i < adev->usec_timeout; i++) { if (!(RREG32(mmSRBM_STATUS2) & SRBM_STATUS2__VCE_BUSY_MASK)) @@ -458,8 +467,10 @@ static int vce_v2_0_wait_for_idle(struct amdgpu_device *adev) return -ETIMEDOUT; } -static int vce_v2_0_soft_reset(struct amdgpu_device *adev) +static int vce_v2_0_soft_reset(void *handle) { + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + WREG32_P(mmSRBM_SOFT_RESET, SRBM_SOFT_RESET__SOFT_RESET_VCE_MASK, ~SRBM_SOFT_RESET__SOFT_RESET_VCE_MASK); mdelay(5); @@ -467,8 +478,10 @@ static int vce_v2_0_soft_reset(struct amdgpu_device *adev) return vce_v2_0_start(adev); } -static void vce_v2_0_print_status(struct amdgpu_device *adev) +static void vce_v2_0_print_status(void *handle) { + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + dev_info(adev->dev, "VCE 2.0 registers\n"); dev_info(adev->dev, " VCE_STATUS=0x%08X\n", RREG32(mmVCE_STATUS)); @@ -569,12 +582,13 @@ static int vce_v2_0_process_interrupt(struct amdgpu_device *adev, return 0; } -static int vce_v2_0_set_clockgating_state(struct amdgpu_device *adev, - enum amdgpu_clockgating_state state) +static int vce_v2_0_set_clockgating_state(void *handle, + enum amd_clockgating_state state) { bool gate = false; + struct amdgpu_device *adev = (struct amdgpu_device *)handle; - if (state == AMDGPU_CG_STATE_GATE) + if (state == AMD_CG_STATE_GATE) gate = true; vce_v2_0_enable_mgcg(adev, gate); @@ -582,8 +596,8 @@ static int vce_v2_0_set_clockgating_state(struct amdgpu_device *adev, return 0; } -static int vce_v2_0_set_powergating_state(struct amdgpu_device *adev, - enum amdgpu_powergating_state state) +static int vce_v2_0_set_powergating_state(void *handle, + enum amd_powergating_state state) { /* This doesn't actually powergate the VCE block. * That's done in the dpm code via the SMC. This @@ -592,14 +606,16 @@ static int vce_v2_0_set_powergating_state(struct amdgpu_device *adev, * revisit this when there is a cleaner line between * the smc and the hw blocks */ - if (state == AMDGPU_PG_STATE_GATE) + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + if (state == AMD_PG_STATE_GATE) /* XXX do we need a vce_v2_0_stop()? */ return 0; else return vce_v2_0_start(adev); } -const struct amdgpu_ip_funcs vce_v2_0_ip_funcs = { +const struct amd_ip_funcs vce_v2_0_ip_funcs = { .early_init = vce_v2_0_early_init, .late_init = NULL, .sw_init = vce_v2_0_sw_init, diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v2_0.h b/drivers/gpu/drm/amd/amdgpu/vce_v2_0.h index 8eb1cf227ea6..0d2ae8a01acd 100644 --- a/drivers/gpu/drm/amd/amdgpu/vce_v2_0.h +++ b/drivers/gpu/drm/amd/amdgpu/vce_v2_0.h @@ -24,6 +24,6 @@ #ifndef __VCE_V2_0_H__ #define __VCE_V2_0_H__ -extern const struct amdgpu_ip_funcs vce_v2_0_ip_funcs; +extern const struct amd_ip_funcs vce_v2_0_ip_funcs; #endif diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c index 57e0e167c83b..d62c4002e39c 100644 --- a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c @@ -190,16 +190,19 @@ static int vce_v3_0_start(struct amdgpu_device *adev) return 0; } -static int vce_v3_0_early_init(struct amdgpu_device *adev) +static int vce_v3_0_early_init(void *handle) { + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + vce_v3_0_set_ring_funcs(adev); vce_v3_0_set_irq_funcs(adev); return 0; } -static int vce_v3_0_sw_init(struct amdgpu_device *adev) +static int vce_v3_0_sw_init(void *handle) { + struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct amdgpu_ring *ring; int r; @@ -234,9 +237,10 @@ static int vce_v3_0_sw_init(struct amdgpu_device *adev) return r; } -static int vce_v3_0_sw_fini(struct amdgpu_device *adev) +static int vce_v3_0_sw_fini(void *handle) { int r; + struct amdgpu_device *adev = (struct amdgpu_device *)handle; r = amdgpu_vce_suspend(adev); if (r) @@ -249,10 +253,11 @@ static int vce_v3_0_sw_fini(struct amdgpu_device *adev) return r; } -static int vce_v3_0_hw_init(struct amdgpu_device *adev) +static int vce_v3_0_hw_init(void *handle) { struct amdgpu_ring *ring; int r; + struct amdgpu_device *adev = (struct amdgpu_device *)handle; r = vce_v3_0_start(adev); if (r) @@ -279,15 +284,15 @@ static int vce_v3_0_hw_init(struct amdgpu_device *adev) return 0; } -static int vce_v3_0_hw_fini(struct amdgpu_device *adev) +static int vce_v3_0_hw_fini(void *handle) { - // TODO return 0; } -static int vce_v3_0_suspend(struct amdgpu_device *adev) +static int vce_v3_0_suspend(void *handle) { int r; + struct amdgpu_device *adev = (struct amdgpu_device *)handle; r = vce_v3_0_hw_fini(adev); if (r) @@ -300,9 +305,10 @@ static int vce_v3_0_suspend(struct amdgpu_device *adev) return r; } -static int vce_v3_0_resume(struct amdgpu_device *adev) +static int vce_v3_0_resume(void *handle) { int r; + struct amdgpu_device *adev = (struct amdgpu_device *)handle; r = amdgpu_vce_resume(adev); if (r) @@ -362,14 +368,17 @@ static void vce_v3_0_mc_resume(struct amdgpu_device *adev, int idx) ~VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK); } -static bool vce_v3_0_is_idle(struct amdgpu_device *adev) +static bool vce_v3_0_is_idle(void *handle) { + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + return !(RREG32(mmSRBM_STATUS2) & SRBM_STATUS2__VCE_BUSY_MASK); } -static int vce_v3_0_wait_for_idle(struct amdgpu_device *adev) +static int vce_v3_0_wait_for_idle(void *handle) { unsigned i; + struct amdgpu_device *adev = (struct amdgpu_device *)handle; for (i = 0; i < adev->usec_timeout; i++) { if (!(RREG32(mmSRBM_STATUS2) & SRBM_STATUS2__VCE_BUSY_MASK)) @@ -378,8 +387,10 @@ static int vce_v3_0_wait_for_idle(struct amdgpu_device *adev) return -ETIMEDOUT; } -static int vce_v3_0_soft_reset(struct amdgpu_device *adev) +static int vce_v3_0_soft_reset(void *handle) { + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + WREG32_P(mmSRBM_SOFT_RESET, SRBM_SOFT_RESET__SOFT_RESET_VCE_MASK, ~SRBM_SOFT_RESET__SOFT_RESET_VCE_MASK); mdelay(5); @@ -387,8 +398,10 @@ static int vce_v3_0_soft_reset(struct amdgpu_device *adev) return vce_v3_0_start(adev); } -static void vce_v3_0_print_status(struct amdgpu_device *adev) +static void vce_v3_0_print_status(void *handle) { + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + dev_info(adev->dev, "VCE 3.0 registers\n"); dev_info(adev->dev, " VCE_STATUS=0x%08X\n", RREG32(mmVCE_STATUS)); @@ -487,15 +500,14 @@ static int vce_v3_0_process_interrupt(struct amdgpu_device *adev, return 0; } -static int vce_v3_0_set_clockgating_state(struct amdgpu_device *adev, - enum amdgpu_clockgating_state state) +static int vce_v3_0_set_clockgating_state(void *handle, + enum amd_clockgating_state state) { - //TODO return 0; } -static int vce_v3_0_set_powergating_state(struct amdgpu_device *adev, - enum amdgpu_powergating_state state) +static int vce_v3_0_set_powergating_state(void *handle, + enum amd_powergating_state state) { /* This doesn't actually powergate the VCE block. * That's done in the dpm code via the SMC. This @@ -504,14 +516,16 @@ static int vce_v3_0_set_powergating_state(struct amdgpu_device *adev, * revisit this when there is a cleaner line between * the smc and the hw blocks */ - if (state == AMDGPU_PG_STATE_GATE) + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + if (state == AMD_PG_STATE_GATE) /* XXX do we need a vce_v3_0_stop()? */ return 0; else return vce_v3_0_start(adev); } -const struct amdgpu_ip_funcs vce_v3_0_ip_funcs = { +const struct amd_ip_funcs vce_v3_0_ip_funcs = { .early_init = vce_v3_0_early_init, .late_init = NULL, .sw_init = vce_v3_0_sw_init, diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.h b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.h index f3c2ba92a1f1..b45af65da81f 100644 --- a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.h +++ b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.h @@ -24,6 +24,6 @@ #ifndef __VCE_V3_0_H__ #define __VCE_V3_0_H__ -extern const struct amdgpu_ip_funcs vce_v3_0_ip_funcs; +extern const struct amd_ip_funcs vce_v3_0_ip_funcs; #endif diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c index 59a073aa42a4..be7c17610696 100644 --- a/drivers/gpu/drm/amd/amdgpu/vi.c +++ b/drivers/gpu/drm/amd/amdgpu/vi.c @@ -994,42 +994,42 @@ static const struct amdgpu_ip_block_version topaz_ip_blocks[] = { /* ORDER MATTERS! */ { - .type = AMDGPU_IP_BLOCK_TYPE_COMMON, + .type = AMD_IP_BLOCK_TYPE_COMMON, .major = 2, .minor = 0, .rev = 0, .funcs = &vi_common_ip_funcs, }, { - .type = AMDGPU_IP_BLOCK_TYPE_GMC, + .type = AMD_IP_BLOCK_TYPE_GMC, .major = 8, .minor = 0, .rev = 0, .funcs = &gmc_v8_0_ip_funcs, }, { - .type = AMDGPU_IP_BLOCK_TYPE_IH, + .type = AMD_IP_BLOCK_TYPE_IH, .major = 2, .minor = 4, .rev = 0, .funcs = &iceland_ih_ip_funcs, }, { - .type = AMDGPU_IP_BLOCK_TYPE_SMC, + .type = AMD_IP_BLOCK_TYPE_SMC, .major = 7, .minor = 1, .rev = 0, .funcs = &iceland_dpm_ip_funcs, }, { - .type = AMDGPU_IP_BLOCK_TYPE_GFX, + .type = AMD_IP_BLOCK_TYPE_GFX, .major = 8, .minor = 0, .rev = 0, .funcs = &gfx_v8_0_ip_funcs, }, { - .type = AMDGPU_IP_BLOCK_TYPE_SDMA, + .type = AMD_IP_BLOCK_TYPE_SDMA, .major = 2, .minor = 4, .rev = 0, @@ -1041,63 +1041,63 @@ static const struct amdgpu_ip_block_version tonga_ip_blocks[] = { /* ORDER MATTERS! */ { - .type = AMDGPU_IP_BLOCK_TYPE_COMMON, + .type = AMD_IP_BLOCK_TYPE_COMMON, .major = 2, .minor = 0, .rev = 0, .funcs = &vi_common_ip_funcs, }, { - .type = AMDGPU_IP_BLOCK_TYPE_GMC, + .type = AMD_IP_BLOCK_TYPE_GMC, .major = 8, .minor = 0, .rev = 0, .funcs = &gmc_v8_0_ip_funcs, }, { - .type = AMDGPU_IP_BLOCK_TYPE_IH, + .type = AMD_IP_BLOCK_TYPE_IH, .major = 3, .minor = 0, .rev = 0, .funcs = &tonga_ih_ip_funcs, }, { - .type = AMDGPU_IP_BLOCK_TYPE_SMC, + .type = AMD_IP_BLOCK_TYPE_SMC, .major = 7, .minor = 1, .rev = 0, .funcs = &tonga_dpm_ip_funcs, }, { - .type = AMDGPU_IP_BLOCK_TYPE_DCE, + .type = AMD_IP_BLOCK_TYPE_DCE, .major = 10, .minor = 0, .rev = 0, .funcs = &dce_v10_0_ip_funcs, }, { - .type = AMDGPU_IP_BLOCK_TYPE_GFX, + .type = AMD_IP_BLOCK_TYPE_GFX, .major = 8, .minor = 0, .rev = 0, .funcs = &gfx_v8_0_ip_funcs, }, { - .type = AMDGPU_IP_BLOCK_TYPE_SDMA, + .type = AMD_IP_BLOCK_TYPE_SDMA, .major = 3, .minor = 0, .rev = 0, .funcs = &sdma_v3_0_ip_funcs, }, { - .type = AMDGPU_IP_BLOCK_TYPE_UVD, + .type = AMD_IP_BLOCK_TYPE_UVD, .major = 5, .minor = 0, .rev = 0, .funcs = &uvd_v5_0_ip_funcs, }, { - .type = AMDGPU_IP_BLOCK_TYPE_VCE, + .type = AMD_IP_BLOCK_TYPE_VCE, .major = 3, .minor = 0, .rev = 0, @@ -1109,63 +1109,63 @@ static const struct amdgpu_ip_block_version cz_ip_blocks[] = { /* ORDER MATTERS! */ { - .type = AMDGPU_IP_BLOCK_TYPE_COMMON, + .type = AMD_IP_BLOCK_TYPE_COMMON, .major = 2, .minor = 0, .rev = 0, .funcs = &vi_common_ip_funcs, }, { - .type = AMDGPU_IP_BLOCK_TYPE_GMC, + .type = AMD_IP_BLOCK_TYPE_GMC, .major = 8, .minor = 0, .rev = 0, .funcs = &gmc_v8_0_ip_funcs, }, { - .type = AMDGPU_IP_BLOCK_TYPE_IH, + .type = AMD_IP_BLOCK_TYPE_IH, .major = 3, .minor = 0, .rev = 0, .funcs = &cz_ih_ip_funcs, }, { - .type = AMDGPU_IP_BLOCK_TYPE_SMC, + .type = AMD_IP_BLOCK_TYPE_SMC, .major = 8, .minor = 0, .rev = 0, .funcs = &cz_dpm_ip_funcs, }, { - .type = AMDGPU_IP_BLOCK_TYPE_DCE, + .type = AMD_IP_BLOCK_TYPE_DCE, .major = 11, .minor = 0, .rev = 0, .funcs = &dce_v11_0_ip_funcs, }, { - .type = AMDGPU_IP_BLOCK_TYPE_GFX, + .type = AMD_IP_BLOCK_TYPE_GFX, .major = 8, .minor = 0, .rev = 0, .funcs = &gfx_v8_0_ip_funcs, }, { - .type = AMDGPU_IP_BLOCK_TYPE_SDMA, + .type = AMD_IP_BLOCK_TYPE_SDMA, .major = 3, .minor = 0, .rev = 0, .funcs = &sdma_v3_0_ip_funcs, }, { - .type = AMDGPU_IP_BLOCK_TYPE_UVD, + .type = AMD_IP_BLOCK_TYPE_UVD, .major = 6, .minor = 0, .rev = 0, .funcs = &uvd_v6_0_ip_funcs, }, { - .type = AMDGPU_IP_BLOCK_TYPE_VCE, + .type = AMD_IP_BLOCK_TYPE_VCE, .major = 3, .minor = 0, .rev = 0, @@ -1225,9 +1225,10 @@ static const struct amdgpu_asic_funcs vi_asic_funcs = .wait_for_mc_idle = &gmc_v8_0_mc_wait_for_idle, }; -static int vi_common_early_init(struct amdgpu_device *adev) +static int vi_common_early_init(void *handle) { bool smc_enabled = false; + struct amdgpu_device *adev = (struct amdgpu_device *)handle; adev->smc_rreg = &vi_smc_rreg; adev->smc_wreg = &vi_smc_wreg; @@ -1240,8 +1241,8 @@ static int vi_common_early_init(struct amdgpu_device *adev) adev->asic_funcs = &vi_asic_funcs; - if (amdgpu_get_ip_block(adev, AMDGPU_IP_BLOCK_TYPE_SMC) && - (amdgpu_ip_block_mask & (1 << AMDGPU_IP_BLOCK_TYPE_SMC))) + if (amdgpu_get_ip_block(adev, AMD_IP_BLOCK_TYPE_SMC) && + (amdgpu_ip_block_mask & (1 << AMD_IP_BLOCK_TYPE_SMC))) smc_enabled = true; adev->rev_id = vi_get_rev_id(adev); @@ -1279,18 +1280,20 @@ static int vi_common_early_init(struct amdgpu_device *adev) return 0; } -static int vi_common_sw_init(struct amdgpu_device *adev) +static int vi_common_sw_init(void *handle) { return 0; } -static int vi_common_sw_fini(struct amdgpu_device *adev) +static int vi_common_sw_fini(void *handle) { return 0; } -static int vi_common_hw_init(struct amdgpu_device *adev) +static int vi_common_hw_init(void *handle) { + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + /* move the golden regs per IP block */ vi_init_golden_registers(adev); /* enable pcie gen2/3 link */ @@ -1303,58 +1306,63 @@ static int vi_common_hw_init(struct amdgpu_device *adev) return 0; } -static int vi_common_hw_fini(struct amdgpu_device *adev) +static int vi_common_hw_fini(void *handle) { + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + /* enable the doorbell aperture */ vi_enable_doorbell_aperture(adev, false); return 0; } -static int vi_common_suspend(struct amdgpu_device *adev) +static int vi_common_suspend(void *handle) { + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + return vi_common_hw_fini(adev); } -static int vi_common_resume(struct amdgpu_device *adev) +static int vi_common_resume(void *handle) { + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + return vi_common_hw_init(adev); } -static bool vi_common_is_idle(struct amdgpu_device *adev) +static bool vi_common_is_idle(void *handle) { return true; } -static int vi_common_wait_for_idle(struct amdgpu_device *adev) +static int vi_common_wait_for_idle(void *handle) { return 0; } -static void vi_common_print_status(struct amdgpu_device *adev) +static void vi_common_print_status(void *handle) { - + return; } -static int vi_common_soft_reset(struct amdgpu_device *adev) +static int vi_common_soft_reset(void *handle) { - /* XXX hard reset?? */ return 0; } -static int vi_common_set_clockgating_state(struct amdgpu_device *adev, - enum amdgpu_clockgating_state state) +static int vi_common_set_clockgating_state(void *handle, + enum amd_clockgating_state state) { return 0; } -static int vi_common_set_powergating_state(struct amdgpu_device *adev, - enum amdgpu_powergating_state state) +static int vi_common_set_powergating_state(void *handle, + enum amd_powergating_state state) { return 0; } -const struct amdgpu_ip_funcs vi_common_ip_funcs = { +const struct amd_ip_funcs vi_common_ip_funcs = { .early_init = vi_common_early_init, .late_init = NULL, .sw_init = vi_common_sw_init, diff --git a/drivers/gpu/drm/amd/amdgpu/vi.h b/drivers/gpu/drm/amd/amdgpu/vi.h index d16a5f7e4edd..502094042462 100644 --- a/drivers/gpu/drm/amd/amdgpu/vi.h +++ b/drivers/gpu/drm/amd/amdgpu/vi.h @@ -24,7 +24,7 @@ #ifndef __VI_H__ #define __VI_H__ -extern const struct amdgpu_ip_funcs vi_common_ip_funcs; +extern const struct amd_ip_funcs vi_common_ip_funcs; void vi_srbm_select(struct amdgpu_device *adev, u32 me, u32 pipe, u32 queue, u32 vmid); diff --git a/drivers/gpu/drm/amd/amdgpu/vi_dpm.h b/drivers/gpu/drm/amd/amdgpu/vi_dpm.h index 11cb1f7eeba5..3b45332f5df4 100644 --- a/drivers/gpu/drm/amd/amdgpu/vi_dpm.h +++ b/drivers/gpu/drm/amd/amdgpu/vi_dpm.h @@ -24,13 +24,13 @@ #ifndef __VI_DPM_H__ #define __VI_DPM_H__ -extern const struct amdgpu_ip_funcs cz_dpm_ip_funcs; +extern const struct amd_ip_funcs cz_dpm_ip_funcs; int cz_smu_init(struct amdgpu_device *adev); int cz_smu_start(struct amdgpu_device *adev); int cz_smu_fini(struct amdgpu_device *adev); -extern const struct amdgpu_ip_funcs tonga_dpm_ip_funcs; +extern const struct amd_ip_funcs tonga_dpm_ip_funcs; -extern const struct amdgpu_ip_funcs iceland_dpm_ip_funcs; +extern const struct amd_ip_funcs iceland_dpm_ip_funcs; #endif diff --git a/drivers/gpu/drm/amd/include/amd_shared.h b/drivers/gpu/drm/amd/include/amd_shared.h new file mode 100644 index 000000000000..5bdf1b4397a0 --- /dev/null +++ b/drivers/gpu/drm/amd/include/amd_shared.h @@ -0,0 +1,81 @@ +/* + * Copyright 2015 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +#ifndef __AMD_SHARED_H__ +#define __AMD_SHARED_H__ + +enum amd_ip_block_type { + AMD_IP_BLOCK_TYPE_COMMON, + AMD_IP_BLOCK_TYPE_GMC, + AMD_IP_BLOCK_TYPE_IH, + AMD_IP_BLOCK_TYPE_SMC, + AMD_IP_BLOCK_TYPE_DCE, + AMD_IP_BLOCK_TYPE_GFX, + AMD_IP_BLOCK_TYPE_SDMA, + AMD_IP_BLOCK_TYPE_UVD, + AMD_IP_BLOCK_TYPE_VCE, +}; + +enum amd_clockgating_state { + AMD_CG_STATE_GATE = 0, + AMD_CG_STATE_UNGATE, +}; + +enum amd_powergating_state { + AMD_PG_STATE_GATE = 0, + AMD_PG_STATE_UNGATE, +}; + +struct amd_ip_funcs { + /* sets up early driver state (pre sw_init), does not configure hw - Optional */ + int (*early_init)(void *handle); + /* sets up late driver/hw state (post hw_init) - Optional */ + int (*late_init)(void *handle); + /* sets up driver state, does not configure hw */ + int (*sw_init)(void *handle); + /* tears down driver state, does not configure hw */ + int (*sw_fini)(void *handle); + /* sets up the hw state */ + int (*hw_init)(void *handle); + /* tears down the hw state */ + int (*hw_fini)(void *handle); + /* handles IP specific hw/sw changes for suspend */ + int (*suspend)(void *handle); + /* handles IP specific hw/sw changes for resume */ + int (*resume)(void *handle); + /* returns current IP block idle status */ + bool (*is_idle)(void *handle); + /* poll for idle */ + int (*wait_for_idle)(void *handle); + /* soft reset the IP block */ + int (*soft_reset)(void *handle); + /* dump the IP block status registers */ + void (*print_status)(void *handle); + /* enable/disable cg for the IP block */ + int (*set_clockgating_state)(void *handle, + enum amd_clockgating_state state); + /* enable/disable pg for the IP block */ + int (*set_powergating_state)(void *handle, + enum amd_powergating_state state); +}; + +#endif /* __AMD_SHARED_H__ */ -- cgit v1.2.3 From 8dacc127fcb3b60e6893ec74ee2f615d78dcbc4e Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Mon, 11 May 2015 16:20:58 -0400 Subject: drm/amdgpu: fix description of vm_size module parameter (v2) default is 8GB, not 4GB. v2: fix fallback setting when the user provides an invalid input Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 6 +++--- drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_device.c') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index d9d26f2e7bd7..175dc67130c3 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -968,13 +968,13 @@ static void amdgpu_check_arguments(struct amdgpu_device *adev) if (!amdgpu_check_pot_argument(amdgpu_vm_size)) { dev_warn(adev->dev, "VM size (%d) must be a power of 2\n", amdgpu_vm_size); - amdgpu_vm_size = 4; + amdgpu_vm_size = 8; } if (amdgpu_vm_size < 1) { dev_warn(adev->dev, "VM size (%d) too small, min is 1GB\n", amdgpu_vm_size); - amdgpu_vm_size = 4; + amdgpu_vm_size = 8; } /* @@ -983,7 +983,7 @@ static void amdgpu_check_arguments(struct amdgpu_device *adev) if (amdgpu_vm_size > 1024) { dev_warn(adev->dev, "VM size (%d) too large, max is 1TB\n", amdgpu_vm_size); - amdgpu_vm_size = 4; + amdgpu_vm_size = 8; } /* defines number of bits in page table versus page directory, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c index 6edacceb6cfd..56da962231fc 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c @@ -128,7 +128,7 @@ module_param_named(bapm, amdgpu_bapm, int, 0444); MODULE_PARM_DESC(deep_color, "Deep Color support (1 = enable, 0 = disable (default))"); module_param_named(deep_color, amdgpu_deep_color, int, 0444); -MODULE_PARM_DESC(vm_size, "VM address space size in gigabytes (default 4GB)"); +MODULE_PARM_DESC(vm_size, "VM address space size in gigabytes (default 8GB)"); module_param_named(vm_size, amdgpu_vm_size, int, 0444); MODULE_PARM_DESC(vm_block_size, "VM page table size in bits (default depending on vm_size)"); -- cgit v1.2.3 From e176fe176d3a02d9409e0f36502799083ae13e1b Mon Sep 17 00:00:00 2001 From: Christian König Date: Wed, 27 May 2015 10:22:47 +0200 Subject: drm/amdgpu: remove mclk_lock MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Not needed any more. Signed-off-by: Christian König Reviewed-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu.h | 2 -- drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 1 - drivers/gpu/drm/amd/amdgpu/amdgpu_object.c | 2 -- drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c | 2 -- drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 40 +++--------------------------- 5 files changed, 4 insertions(+), 43 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_device.c') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index 6c8c24ba463d..4300e3d4b1cd 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -1558,8 +1558,6 @@ struct amdgpu_dpm { struct amdgpu_pm { struct mutex mutex; - /* write locked while reprogramming mclk */ - struct rw_semaphore mclk_lock; u32 current_sclk; u32 current_mclk; u32 default_sclk; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 175dc67130c3..36be03ce76c2 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -1401,7 +1401,6 @@ int amdgpu_device_init(struct amdgpu_device *adev, mutex_init(&adev->gfx.gpu_clock_mutex); mutex_init(&adev->srbm_mutex); mutex_init(&adev->grbm_idx_mutex); - init_rwsem(&adev->pm.mclk_lock); init_rwsem(&adev->exclusive_lock); mutex_init(&adev->mn_lock); hash_init(adev->mn_hash); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c index 62cabfb5dff8..7d801e016e31 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c @@ -272,11 +272,9 @@ int amdgpu_bo_create_restricted(struct amdgpu_device *adev, bo->flags = flags; amdgpu_fill_placement_to_bo(bo, placement); /* Kernel allocation are uninterruptible */ - down_read(&adev->pm.mclk_lock); r = ttm_bo_init(&adev->mman.bdev, &bo->tbo, size, type, &bo->placement, page_align, !kernel, NULL, acc_size, sg, NULL, &amdgpu_ttm_bo_destroy); - up_read(&adev->pm.mclk_lock); if (unlikely(r != 0)) { return r; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c index 89782543f854..605a9e42f943 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c @@ -581,7 +581,6 @@ force: } mutex_lock(&adev->ddev->struct_mutex); - down_write(&adev->pm.mclk_lock); mutex_lock(&adev->ring_lock); /* update whether vce is active */ @@ -629,7 +628,6 @@ force: done: mutex_unlock(&adev->ring_lock); - up_write(&adev->pm.mclk_lock); mutex_unlock(&adev->ddev->struct_mutex); } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index 729e0bb3070f..d3706a498293 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -966,52 +966,20 @@ void amdgpu_ttm_set_active_vram_size(struct amdgpu_device *adev, u64 size) man->size = size >> PAGE_SHIFT; } -static struct vm_operations_struct amdgpu_ttm_vm_ops; -static const struct vm_operations_struct *ttm_vm_ops = NULL; - -static int amdgpu_ttm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) -{ - struct ttm_buffer_object *bo; - struct amdgpu_device *adev; - int r; - - bo = (struct ttm_buffer_object *)vma->vm_private_data; - if (bo == NULL) { - return VM_FAULT_NOPAGE; - } - adev = amdgpu_get_adev(bo->bdev); - down_read(&adev->pm.mclk_lock); - r = ttm_vm_ops->fault(vma, vmf); - up_read(&adev->pm.mclk_lock); - return r; -} - int amdgpu_mmap(struct file *filp, struct vm_area_struct *vma) { struct drm_file *file_priv; struct amdgpu_device *adev; - int r; - if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET)) { + if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET)) return -EINVAL; - } file_priv = filp->private_data; adev = file_priv->minor->dev->dev_private; - if (adev == NULL) { + if (adev == NULL) return -EINVAL; - } - r = ttm_bo_mmap(filp, vma, &adev->mman.bdev); - if (unlikely(r != 0)) { - return r; - } - if (unlikely(ttm_vm_ops == NULL)) { - ttm_vm_ops = vma->vm_ops; - amdgpu_ttm_vm_ops = *ttm_vm_ops; - amdgpu_ttm_vm_ops.fault = &amdgpu_ttm_fault; - } - vma->vm_ops = &amdgpu_ttm_vm_ops; - return 0; + + return ttm_bo_mmap(filp, vma, &adev->mman.bdev); } int amdgpu_copy_buffer(struct amdgpu_ring *ring, -- cgit v1.2.3 From 3e39ab90833b20eb7fbc4e472726000274739538 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Fri, 5 Jun 2015 15:04:33 -0400 Subject: drm/amdgpu: also print the pci revision when printing the pci ids MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The driver makes use of this information so print if to aid in debugging. Reviewed-by: Christian König Reviewed-by: Samuel Li Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_device.c') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 36be03ce76c2..fec487d1c870 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -1388,9 +1388,9 @@ int amdgpu_device_init(struct amdgpu_device *adev, adev->audio_endpt_rreg = &amdgpu_block_invalid_rreg; adev->audio_endpt_wreg = &amdgpu_block_invalid_wreg; - DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X).\n", - amdgpu_asic_name[adev->asic_type], pdev->vendor, pdev->device, - pdev->subsystem_vendor, pdev->subsystem_device); + DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X 0x%02X).\n", + amdgpu_asic_name[adev->asic_type], pdev->vendor, pdev->device, + pdev->subsystem_vendor, pdev->subsystem_device, pdev->revision); /* mutex initialization are all done here so we * can recall function without having locking issues */ -- cgit v1.2.3