summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/base/soc.c2
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_drv.c19
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_drv.h2
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_ttm.c5
-rw-r--r--drivers/gpu/drm/drm_edid.c11
-rw-r--r--drivers/gpu/drm/i810/i810_dma.c4
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h3
-rw-r--r--drivers/gpu/drm/i915/i915_gem_dmabuf.c61
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_drv.c19
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drv.h3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_prime.c45
-rw-r--r--drivers/gpu/drm/radeon/evergreen.c382
-rw-r--r--drivers/gpu/drm/radeon/evergreend.h11
-rw-r--r--drivers/gpu/drm/radeon/ni.c360
-rw-r--r--drivers/gpu/drm/radeon/nid.h11
-rw-r--r--drivers/gpu/drm/radeon/r600.c199
-rw-r--r--drivers/gpu/drm/radeon/r600d.h2
-rw-r--r--drivers/gpu/drm/radeon/radeon.h8
-rw-r--r--drivers/gpu/drm/radeon/radeon_cs.c31
-rw-r--r--drivers/gpu/drm/radeon/radeon_prime.c44
-rw-r--r--drivers/gpu/drm/radeon/rv770.c274
-rw-r--r--drivers/gpu/drm/radeon/rv770d.h4
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c1
-rw-r--r--drivers/gpu/drm/udl/udl_fb.c13
-rw-r--r--drivers/gpu/drm/udl/udl_gem.c25
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c2
-rw-r--r--drivers/message/fusion/mptbase.c12
-rw-r--r--drivers/message/fusion/mptctl.c4
-rw-r--r--drivers/rapidio/Kconfig14
-rw-r--r--drivers/rapidio/devices/Makefile3
-rw-r--r--drivers/rapidio/devices/tsi721.c211
-rw-r--r--drivers/rapidio/devices/tsi721.h105
-rw-r--r--drivers/rapidio/devices/tsi721_dma.c823
-rw-r--r--drivers/rapidio/rio.c81
34 files changed, 1665 insertions, 1129 deletions
diff --git a/drivers/base/soc.c b/drivers/base/soc.c
index ba29b2e73d48..72b5e7280d14 100644
--- a/drivers/base/soc.c
+++ b/drivers/base/soc.c
@@ -42,7 +42,7 @@ struct device *soc_device_to_device(struct soc_device *soc_dev)
return &soc_dev->dev;
}
-static mode_t soc_attribute_mode(struct kobject *kobj,
+static umode_t soc_attribute_mode(struct kobject *kobj,
struct attribute *attr,
int index)
{
diff --git a/drivers/gpu/drm/cirrus/cirrus_drv.c b/drivers/gpu/drm/cirrus/cirrus_drv.c
index d7038230b71e..7053140c6596 100644
--- a/drivers/gpu/drm/cirrus/cirrus_drv.c
+++ b/drivers/gpu/drm/cirrus/cirrus_drv.c
@@ -35,9 +35,28 @@ static DEFINE_PCI_DEVICE_TABLE(pciidlist) = {
{0,}
};
+
+static void cirrus_kick_out_firmware_fb(struct pci_dev *pdev)
+{
+ struct apertures_struct *ap;
+ bool primary = false;
+
+ ap = alloc_apertures(1);
+ ap->ranges[0].base = pci_resource_start(pdev, 0);
+ ap->ranges[0].size = pci_resource_len(pdev, 0);
+
+#ifdef CONFIG_X86
+ primary = pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW;
+#endif
+ remove_conflicting_framebuffers(ap, "cirrusdrmfb", primary);
+ kfree(ap);
+}
+
static int __devinit
cirrus_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
+ cirrus_kick_out_firmware_fb(pdev);
+
return drm_get_pci_dev(pdev, ent, &driver);
}
diff --git a/drivers/gpu/drm/cirrus/cirrus_drv.h b/drivers/gpu/drm/cirrus/cirrus_drv.h
index 21bdfa8836f7..64ea597cb6d3 100644
--- a/drivers/gpu/drm/cirrus/cirrus_drv.h
+++ b/drivers/gpu/drm/cirrus/cirrus_drv.h
@@ -145,7 +145,7 @@ struct cirrus_device {
struct ttm_bo_device bdev;
atomic_t validate_sequence;
} ttm;
-
+ bool mm_inited;
};
diff --git a/drivers/gpu/drm/cirrus/cirrus_ttm.c b/drivers/gpu/drm/cirrus/cirrus_ttm.c
index 2ebcd11a5023..50e170f879de 100644
--- a/drivers/gpu/drm/cirrus/cirrus_ttm.c
+++ b/drivers/gpu/drm/cirrus/cirrus_ttm.c
@@ -275,12 +275,17 @@ int cirrus_mm_init(struct cirrus_device *cirrus)
pci_resource_len(dev->pdev, 0),
DRM_MTRR_WC);
+ cirrus->mm_inited = true;
return 0;
}
void cirrus_mm_fini(struct cirrus_device *cirrus)
{
struct drm_device *dev = cirrus->dev;
+
+ if (!cirrus->mm_inited)
+ return;
+
ttm_bo_device_release(&cirrus->ttm.bdev);
cirrus_ttm_global_release(cirrus);
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index c3b5139eba7f..eb92fe257a39 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -30,7 +30,7 @@
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/i2c.h>
-#include <linux/export.h>
+#include <linux/module.h>
#include "drmP.h"
#include "drm_edid.h"
#include "drm_edid_modes.h"
@@ -149,6 +149,10 @@ int drm_edid_header_is_valid(const u8 *raw_edid)
}
EXPORT_SYMBOL(drm_edid_header_is_valid);
+static int edid_fixup __read_mostly = 6;
+module_param_named(edid_fixup, edid_fixup, int, 0400);
+MODULE_PARM_DESC(edid_fixup,
+ "Minimum number of valid EDID header bytes (0-8, default 6)");
/*
* Sanity check the EDID block (base or extension). Return 0 if the block
@@ -160,10 +164,13 @@ bool drm_edid_block_valid(u8 *raw_edid, int block)
u8 csum = 0;
struct edid *edid = (struct edid *)raw_edid;
+ if (edid_fixup > 8 || edid_fixup < 0)
+ edid_fixup = 6;
+
if (block == 0) {
int score = drm_edid_header_is_valid(raw_edid);
if (score == 8) ;
- else if (score >= 6) {
+ else if (score >= edid_fixup) {
DRM_DEBUG("Fixing EDID header, your hardware may be failing\n");
memcpy(raw_edid, edid_header, sizeof(edid_header));
} else {
diff --git a/drivers/gpu/drm/i810/i810_dma.c b/drivers/gpu/drm/i810/i810_dma.c
index f920fb5e42b6..fa9439159ebd 100644
--- a/drivers/gpu/drm/i810/i810_dma.c
+++ b/drivers/gpu/drm/i810/i810_dma.c
@@ -130,11 +130,10 @@ static int i810_map_buffer(struct drm_buf *buf, struct drm_file *file_priv)
return -EINVAL;
/* This is all entirely broken */
- down_write(&current->mm->mmap_sem);
old_fops = file_priv->filp->f_op;
file_priv->filp->f_op = &i810_buffer_fops;
dev_priv->mmap_buffer = buf;
- buf_priv->virtual = (void *)do_mmap(file_priv->filp, 0, buf->total,
+ buf_priv->virtual = (void *)vm_mmap(file_priv->filp, 0, buf->total,
PROT_READ | PROT_WRITE,
MAP_SHARED, buf->bus_address);
dev_priv->mmap_buffer = NULL;
@@ -145,7 +144,6 @@ static int i810_map_buffer(struct drm_buf *buf, struct drm_file *file_priv)
retcode = PTR_ERR(buf_priv->virtual);
buf_priv->virtual = NULL;
}
- up_write(&current->mm->mmap_sem);
return retcode;
}
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 377c21f531e4..c9cfc67c2cf5 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -942,6 +942,9 @@ struct drm_i915_gem_object {
/* prime dma-buf support */
struct sg_table *sg_table;
+ void *dma_buf_vmapping;
+ int vmapping_count;
+
/**
* Used for performing relocations during execbuffer insertion.
*/
diff --git a/drivers/gpu/drm/i915/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
index 8e269178d6a5..aa308e1337db 100644
--- a/drivers/gpu/drm/i915/i915_gem_dmabuf.c
+++ b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
@@ -74,6 +74,59 @@ static void i915_gem_dmabuf_release(struct dma_buf *dma_buf)
}
}
+static void *i915_gem_dmabuf_vmap(struct dma_buf *dma_buf)
+{
+ struct drm_i915_gem_object *obj = dma_buf->priv;
+ struct drm_device *dev = obj->base.dev;
+ int ret;
+
+ ret = i915_mutex_lock_interruptible(dev);
+ if (ret)
+ return ERR_PTR(ret);
+
+ if (obj->dma_buf_vmapping) {
+ obj->vmapping_count++;
+ goto out_unlock;
+ }
+
+ if (!obj->pages) {
+ ret = i915_gem_object_get_pages_gtt(obj, __GFP_NORETRY | __GFP_NOWARN);
+ if (ret) {
+ mutex_unlock(&dev->struct_mutex);
+ return ERR_PTR(ret);
+ }
+ }
+
+ obj->dma_buf_vmapping = vmap(obj->pages, obj->base.size / PAGE_SIZE, 0, PAGE_KERNEL);
+ if (!obj->dma_buf_vmapping) {
+ DRM_ERROR("failed to vmap object\n");
+ goto out_unlock;
+ }
+
+ obj->vmapping_count = 1;
+out_unlock:
+ mutex_unlock(&dev->struct_mutex);
+ return obj->dma_buf_vmapping;
+}
+
+static void i915_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr)
+{
+ struct drm_i915_gem_object *obj = dma_buf->priv;
+ struct drm_device *dev = obj->base.dev;
+ int ret;
+
+ ret = i915_mutex_lock_interruptible(dev);
+ if (ret)
+ return;
+
+ --obj->vmapping_count;
+ if (obj->vmapping_count == 0) {
+ vunmap(obj->dma_buf_vmapping);
+ obj->dma_buf_vmapping = NULL;
+ }
+ mutex_unlock(&dev->struct_mutex);
+}
+
static void *i915_gem_dmabuf_kmap_atomic(struct dma_buf *dma_buf, unsigned long page_num)
{
return NULL;
@@ -93,6 +146,11 @@ static void i915_gem_dmabuf_kunmap(struct dma_buf *dma_buf, unsigned long page_n
}
+static int i915_gem_dmabuf_mmap(struct dma_buf *dma_buf, struct vm_area_struct *vma)
+{
+ return -EINVAL;
+}
+
static const struct dma_buf_ops i915_dmabuf_ops = {
.map_dma_buf = i915_gem_map_dma_buf,
.unmap_dma_buf = i915_gem_unmap_dma_buf,
@@ -101,6 +159,9 @@ static const struct dma_buf_ops i915_dmabuf_ops = {
.kmap_atomic = i915_gem_dmabuf_kmap_atomic,
.kunmap = i915_gem_dmabuf_kunmap,
.kunmap_atomic = i915_gem_dmabuf_kunmap_atomic,
+ .mmap = i915_gem_dmabuf_mmap,
+ .vmap = i915_gem_dmabuf_vmap,
+ .vunmap = i915_gem_dmabuf_vunmap,
};
struct dma_buf *i915_gem_prime_export(struct drm_device *dev,
diff --git a/drivers/gpu/drm/mgag200/mgag200_drv.c b/drivers/gpu/drm/mgag200/mgag200_drv.c
index 3c8e04f54713..93e832d6c328 100644
--- a/drivers/gpu/drm/mgag200/mgag200_drv.c
+++ b/drivers/gpu/drm/mgag200/mgag200_drv.c
@@ -41,9 +41,28 @@ static DEFINE_PCI_DEVICE_TABLE(pciidlist) = {
MODULE_DEVICE_TABLE(pci, pciidlist);
+static void mgag200_kick_out_firmware_fb(struct pci_dev *pdev)
+{
+ struct apertures_struct *ap;
+ bool primary = false;
+
+ ap = alloc_apertures(1);
+ ap->ranges[0].base = pci_resource_start(pdev, 0);
+ ap->ranges[0].size = pci_resource_len(pdev, 0);
+
+#ifdef CONFIG_X86
+ primary = pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW;
+#endif
+ remove_conflicting_framebuffers(ap, "mgag200drmfb", primary);
+ kfree(ap);
+}
+
+
static int __devinit
mga_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
+ mgag200_kick_out_firmware_fb(pdev);
+
return drm_get_pci_dev(pdev, ent, &driver);
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h
index 634d222c93de..8613cb23808c 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.h
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.h
@@ -123,6 +123,9 @@ struct nouveau_bo {
struct drm_gem_object *gem;
int pin_refcnt;
+
+ struct ttm_bo_kmap_obj dma_buf_vmap;
+ int vmapping_count;
};
#define nouveau_bo_tile_layout(nvbo) \
diff --git a/drivers/gpu/drm/nouveau/nouveau_prime.c b/drivers/gpu/drm/nouveau/nouveau_prime.c
index c58aab7370c5..a89240e5fb29 100644
--- a/drivers/gpu/drm/nouveau/nouveau_prime.c
+++ b/drivers/gpu/drm/nouveau/nouveau_prime.c
@@ -61,6 +61,48 @@ static void nouveau_gem_kunmap(struct dma_buf *dma_buf, unsigned long page_num,
}
+static int nouveau_gem_prime_mmap(struct dma_buf *dma_buf, struct vm_area_struct *vma)
+{
+ return -EINVAL;
+}
+
+static void *nouveau_gem_prime_vmap(struct dma_buf *dma_buf)
+{
+ struct nouveau_bo *nvbo = dma_buf->priv;
+ struct drm_device *dev = nvbo->gem->dev;
+ int ret;
+
+ mutex_lock(&dev->struct_mutex);
+ if (nvbo->vmapping_count) {
+ nvbo->vmapping_count++;
+ goto out_unlock;
+ }
+
+ ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.num_pages,
+ &nvbo->dma_buf_vmap);
+ if (ret) {
+ mutex_unlock(&dev->struct_mutex);
+ return ERR_PTR(ret);
+ }
+ nvbo->vmapping_count = 1;
+out_unlock:
+ mutex_unlock(&dev->struct_mutex);
+ return nvbo->dma_buf_vmap.virtual;
+}
+
+static void nouveau_gem_prime_vunmap(struct dma_buf *dma_buf, void *vaddr)
+{
+ struct nouveau_bo *nvbo = dma_buf->priv;
+ struct drm_device *dev = nvbo->gem->dev;
+
+ mutex_lock(&dev->struct_mutex);
+ nvbo->vmapping_count--;
+ if (nvbo->vmapping_count == 0) {
+ ttm_bo_kunmap(&nvbo->dma_buf_vmap);
+ }
+ mutex_unlock(&dev->struct_mutex);
+}
+
static const struct dma_buf_ops nouveau_dmabuf_ops = {
.map_dma_buf = nouveau_gem_map_dma_buf,
.unmap_dma_buf = nouveau_gem_unmap_dma_buf,
@@ -69,6 +111,9 @@ static const struct dma_buf_ops nouveau_dmabuf_ops = {
.kmap_atomic = nouveau_gem_kmap_atomic,
.kunmap = nouveau_gem_kunmap,
.kunmap_atomic = nouveau_gem_kunmap_atomic,
+ .mmap = nouveau_gem_prime_mmap,
+ .vmap = nouveau_gem_prime_vmap,
+ .vunmap = nouveau_gem_prime_vunmap,
};
static int
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index 58991af90502..01550d05e273 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -1029,6 +1029,11 @@ int evergreen_pcie_gart_enable(struct radeon_device *rdev)
WREG32(MC_VM_MD_L1_TLB0_CNTL, tmp);
WREG32(MC_VM_MD_L1_TLB1_CNTL, tmp);
WREG32(MC_VM_MD_L1_TLB2_CNTL, tmp);
+ if ((rdev->family == CHIP_JUNIPER) ||
+ (rdev->family == CHIP_CYPRESS) ||
+ (rdev->family == CHIP_HEMLOCK) ||
+ (rdev->family == CHIP_BARTS))
+ WREG32(MC_VM_MD_L1_TLB3_CNTL, tmp);
}
WREG32(MC_VM_MB_L1_TLB0_CNTL, tmp);
WREG32(MC_VM_MB_L1_TLB1_CNTL, tmp);
@@ -1553,163 +1558,10 @@ int evergreen_cp_resume(struct radeon_device *rdev)
/*
* Core functions
*/
-static u32 evergreen_get_tile_pipe_to_backend_map(struct radeon_device *rdev,
- u32 num_tile_pipes,
- u32 num_backends,
- u32 backend_disable_mask)
-{
- u32 backend_map = 0;
- u32 enabled_backends_mask = 0;
- u32 enabled_backends_count = 0;
- u32 cur_pipe;
- u32 swizzle_pipe[EVERGREEN_MAX_PIPES];
- u32 cur_backend = 0;
- u32 i;
- bool force_no_swizzle;
-
- if (num_tile_pipes > EVERGREEN_MAX_PIPES)
- num_tile_pipes = EVERGREEN_MAX_PIPES;
- if (num_tile_pipes < 1)
- num_tile_pipes = 1;
- if (num_backends > EVERGREEN_MAX_BACKENDS)
- num_backends = EVERGREEN_MAX_BACKENDS;
- if (num_backends < 1)
- num_backends = 1;
-
- for (i = 0; i < EVERGREEN_MAX_BACKENDS; ++i) {
- if (((backend_disable_mask >> i) & 1) == 0) {
- enabled_backends_mask |= (1 << i);
- ++enabled_backends_count;
- }
- if (enabled_backends_count == num_backends)
- break;
- }
-
- if (enabled_backends_count == 0) {
- enabled_backends_mask = 1;
- enabled_backends_count = 1;
- }
-
- if (enabled_backends_count != num_backends)
- num_backends = enabled_backends_count;
-
- memset((uint8_t *)&swizzle_pipe[0], 0, sizeof(u32) * EVERGREEN_MAX_PIPES);
- switch (rdev->family) {
- case CHIP_CEDAR:
- case CHIP_REDWOOD:
- case CHIP_PALM:
- case CHIP_SUMO:
- case CHIP_SUMO2:
- case CHIP_TURKS:
- case CHIP_CAICOS:
- force_no_swizzle = false;
- break;
- case CHIP_CYPRESS:
- case CHIP_HEMLOCK:
- case CHIP_JUNIPER:
- case CHIP_BARTS:
- default:
- force_no_swizzle = true;
- break;
- }
- if (force_no_swizzle) {
- bool last_backend_enabled = false;
-
- force_no_swizzle = false;
- for (i = 0; i < EVERGREEN_MAX_BACKENDS; ++i) {
- if (((enabled_backends_mask >> i) & 1) == 1) {
- if (last_backend_enabled)
- force_no_swizzle = true;
- last_backend_enabled = true;
- } else
- last_backend_enabled = false;
- }
- }
-
- switch (num_tile_pipes) {
- case 1:
- case 3:
- case 5:
- case 7:
- DRM_ERROR("odd number of pipes!\n");
- break;
- case 2:
- swizzle_pipe[0] = 0;
- swizzle_pipe[1] = 1;
- break;
- case 4:
- if (force_no_swizzle) {
- swizzle_pipe[0] = 0;
- swizzle_pipe[1] = 1;
- swizzle_pipe[2] = 2;
- swizzle_pipe[3] = 3;
- } else {
- swizzle_pipe[0] = 0;
- swizzle_pipe[1] = 2;
- swizzle_pipe[2] = 1;
- swizzle_pipe[3] = 3;
- }
- break;
- case 6:
- if (force_no_swizzle) {
- swizzle_pipe[0] = 0;
- swizzle_pipe[1] = 1;
- swizzle_pipe[2] = 2;
- swizzle_pipe[3] = 3;
- swizzle_pipe[4] = 4;
- swizzle_pipe[5] = 5;
- } else {
- swizzle_pipe[0] = 0;
- swizzle_pipe[1] = 2;
- swizzle_pipe[2] = 4;
- swizzle_pipe[3] = 1;
- swizzle_pipe[4] = 3;
- swizzle_pipe[5] = 5;
- }
- break;
- case 8:
- if (force_no_swizzle) {
- swizzle_pipe[0] = 0;
- swizzle_pipe[1] = 1;
- swizzle_pipe[2] = 2;
- swizzle_pipe[3] = 3;
- swizzle_pipe[4] = 4;
- swizzle_pipe[5] = 5;
- swizzle_pipe[6] = 6;
- swizzle_pipe[7] = 7;
- } else {
- swizzle_pipe[0] = 0;
- swizzle_pipe[1] = 2;
- swizzle_pipe[2] = 4;
- swizzle_pipe[3] = 6;
- swizzle_pipe[4] = 1;
- swizzle_pipe[5] = 3;
- swizzle_pipe[6] = 5;
- swizzle_pipe[7] = 7;
- }
- break;
- }
-
- for (cur_pipe = 0; cur_pipe < num_tile_pipes; ++cur_pipe) {
- while (((1 << cur_backend) & enabled_backends_mask) == 0)
- cur_backend = (cur_backend + 1) % EVERGREEN_MAX_BACKENDS;
-
- backend_map |= (((cur_backend & 0xf) << (swizzle_pipe[cur_pipe] * 4)));
-
- cur_backend = (cur_backend + 1) % EVERGREEN_MAX_BACKENDS;
- }
-
- return backend_map;
-}
-
static void evergreen_gpu_init(struct radeon_device *rdev)
{
- u32 cc_rb_backend_disable = 0;
- u32 cc_gc_shader_pipe_config;
- u32 gb_addr_config = 0;
+ u32 gb_addr_config;
u32 mc_shared_chmap, mc_arb_ramcfg;
- u32 gb_backend_map;
- u32 grbm_gfx_index;
u32 sx_debug_1;
u32 smx_dc_ctl0;
u32 sq_config;
@@ -1724,6 +1576,7 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
u32 sq_stack_resource_mgmt_3;
u32 vgt_cache_invalidation;
u32 hdp_host_path_cntl, tmp;
+ u32 disabled_rb_mask;
int i, j, num_shader_engines, ps_thread_count;
switch (rdev->family) {
@@ -1748,6 +1601,7 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
rdev->config.evergreen.sc_prim_fifo_size = 0x100;
rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
+ gb_addr_config = CYPRESS_GB_ADDR_CONFIG_GOLDEN;
break;
case CHIP_JUNIPER:
rdev->config.evergreen.num_ses = 1;
@@ -1769,6 +1623,7 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
rdev->config.evergreen.sc_prim_fifo_size = 0x100;
rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
+ gb_addr_config = JUNIPER_GB_ADDR_CONFIG_GOLDEN;
break;
case CHIP_REDWOOD:
rdev->config.evergreen.num_ses = 1;
@@ -1790,6 +1645,7 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
rdev->config.evergreen.sc_prim_fifo_size = 0x100;
rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
+ gb_addr_config = REDWOOD_GB_ADDR_CONFIG_GOLDEN;
break;
case CHIP_CEDAR:
default:
@@ -1812,6 +1668,7 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
rdev->config.evergreen.sc_prim_fifo_size = 0x40;
rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
+ gb_addr_config = CEDAR_GB_ADDR_CONFIG_GOLDEN;
break;
case CHIP_PALM:
rdev->config.evergreen.num_ses = 1;
@@ -1833,6 +1690,7 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
rdev->config.evergreen.sc_prim_fifo_size = 0x40;
rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
+ gb_addr_config = CEDAR_GB_ADDR_CONFIG_GOLDEN;
break;
case CHIP_SUMO:
rdev->config.evergreen.num_ses = 1;
@@ -1860,6 +1718,7 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
rdev->config.evergreen.sc_prim_fifo_size = 0x40;
rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
+ gb_addr_config = REDWOOD_GB_ADDR_CONFIG_GOLDEN;
break;
case CHIP_SUMO2:
rdev->config.evergreen.num_ses = 1;
@@ -1881,6 +1740,7 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
rdev->config.evergreen.sc_prim_fifo_size = 0x40;
rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
+ gb_addr_config = REDWOOD_GB_ADDR_CONFIG_GOLDEN;
break;
case CHIP_BARTS:
rdev->config.evergreen.num_ses = 2;
@@ -1902,6 +1762,7 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
rdev->config.evergreen.sc_prim_fifo_size = 0x100;
rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
+ gb_addr_config = BARTS_GB_ADDR_CONFIG_GOLDEN;
break;
case CHIP_TURKS:
rdev->config.evergreen.num_ses = 1;
@@ -1923,6 +1784,7 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
rdev->config.evergreen.sc_prim_fifo_size = 0x100;
rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
+ gb_addr_config = TURKS_GB_ADDR_CONFIG_GOLDEN;
break;
case CHIP_CAICOS:
rdev->config.evergreen.num_ses = 1;
@@ -1944,6 +1806,7 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
rdev->config.evergreen.sc_prim_fifo_size = 0x40;
rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
+ gb_addr_config = CAICOS_GB_ADDR_CONFIG_GOLDEN;
break;
}
@@ -1960,20 +1823,6 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
evergreen_fix_pci_max_read_req_size(rdev);
- cc_gc_shader_pipe_config = RREG32(CC_GC_SHADER_PIPE_CONFIG) & ~2;
-
- cc_gc_shader_pipe_config |=
- INACTIVE_QD_PIPES((EVERGREEN_MAX_PIPES_MASK << rdev->config.evergreen.max_pipes)
- & EVERGREEN_MAX_PIPES_MASK);
- cc_gc_shader_pipe_config |=
- INACTIVE_SIMDS((EVERGREEN_MAX_SIMDS_MASK << rdev->config.evergreen.max_simds)
- & EVERGREEN_MAX_SIMDS_MASK);
-
- cc_rb_backend_disable =
- BACKEND_DISABLE((EVERGREEN_MAX_BACKENDS_MASK << rdev->config.evergreen.max_backends)
- & EVERGREEN_MAX_BACKENDS_MASK);
-
-
mc_shared_chmap = RREG32(MC_SHARED_CHMAP);
if ((rdev->family == CHIP_PALM) ||
(rdev->family == CHIP_SUMO) ||
@@ -1982,134 +1831,6 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
else
mc_arb_ramcfg = RREG32(MC_ARB_RAMCFG);
- switch (rdev->config.evergreen.max_tile_pipes) {
- case 1:
- default:
- gb_addr_config |= NUM_PIPES(0);
- break;
- case 2:
- gb_addr_config |= NUM_PIPES(1);
- break;
- case 4:
- gb_addr_config |= NUM_PIPES(2);
- break;
- case 8:
- gb_addr_config |= NUM_PIPES(3);
- break;
- }
-
- gb_addr_config |= PIPE_INTERLEAVE_SIZE((mc_arb_ramcfg & BURSTLENGTH_MASK) >> BURSTLENGTH_SHIFT);
- gb_addr_config |= BANK_INTERLEAVE_SIZE(0);
- gb_addr_config |= NUM_SHADER_ENGINES(rdev->config.evergreen.num_ses - 1);
- gb_addr_config |= SHADER_ENGINE_TILE_SIZE(1);
- gb_addr_config |= NUM_GPUS(0); /* Hemlock? */
- gb_addr_config |= MULTI_GPU_TILE_SIZE(2);
-
- if (((mc_arb_ramcfg & NOOFCOLS_MASK) >> NOOFCOLS_SHIFT) > 2)
- gb_addr_config |= ROW_SIZE(2);
- else
- gb_addr_config |= ROW_SIZE((mc_arb_ramcfg & NOOFCOLS_MASK) >> NOOFCOLS_SHIFT);
-
- if (rdev->ddev->pdev->device == 0x689e) {
- u32 efuse_straps_4;
- u32 efuse_straps_3;
- u8 efuse_box_bit_131_124;
-
- WREG32(RCU_IND_INDEX, 0x204);
- efuse_straps_4 = RREG32(RCU_IND_DATA);
- WREG32(RCU_IND_INDEX, 0x203);
- efuse_straps_3 = RREG32(RCU_IND_DATA);
- efuse_box_bit_131_124 = (u8)(((efuse_straps_4 & 0xf) << 4) | ((efuse_straps_3 & 0xf0000000) >> 28));
-
- switch(efuse_box_bit_131_124) {
- case 0x00:
- gb_backend_map = 0x76543210;
- break;
- case 0x55:
- gb_backend_map = 0x77553311;
- break;
- case 0x56:
- gb_backend_map = 0x77553300;
- break;
- case 0x59:
- gb_backend_map = 0x77552211;
- break;
- case 0x66:
- gb_backend_map = 0x77443300;
- break;
- case 0x99:
- gb_backend_map = 0x66552211;
- break;
- case 0x5a:
- gb_backend_map = 0x77552200;
- break;
- case 0xaa:
- gb_backend_map = 0x66442200;
- break;
- case 0x95:
- gb_backend_map = 0x66553311;
- break;
- default:
- DRM_ERROR("bad backend map, using default\n");
- gb_backend_map =
- evergreen_get_tile_pipe_to_backend_map(rdev,
- rdev->config.evergreen.max_tile_pipes,
- rdev->config.evergreen.max_backends,
- ((EVERGREEN_MAX_BACKENDS_MASK <<
- rdev->config.evergreen.max_backends) &
- EVERGREEN_MAX_BACKENDS_MASK));
- break;
- }
- } else if (rdev->ddev->pdev->device == 0x68b9) {
- u32 efuse_straps_3;
- u8 efuse_box_bit_127_124;
-
- WREG32(RCU_IND_INDEX, 0x203);
- efuse_straps_3 = RREG32(RCU_IND_DATA);
- efuse_box_bit_127_124 = (u8)((efuse_straps_3 & 0xF0000000) >> 28);
-
- switch(efuse_box_bit_127_124) {
- case 0x0:
- gb_backend_map = 0x00003210;
- break;
- case 0x5:
- case 0x6:
- case 0x9:
- case 0xa:
- gb_backend_map = 0x00003311;
- break;
- default:
- DRM_ERROR("bad backend map, using default\n");
- gb_backend_map =
- evergreen_get_tile_pipe_to_backend_map(rdev,
- rdev->config.evergreen.max_tile_pipes,
- rdev->config.evergreen.max_backends,
- ((EVERGREEN_MAX_BACKENDS_MASK <<
- rdev->config.evergreen.max_backends) &
- EVERGREEN_MAX_BACKENDS_MASK));
- break;
- }
- } else {
- switch (rdev->family) {
- case CHIP_CYPRESS:
- case CHIP_HEMLOCK:
- case CHIP_BARTS:
- gb_backend_map = 0x66442200;
- break;
- case CHIP_JUNIPER:
- gb_backend_map = 0x00002200;
- break;
- default:
- gb_backend_map =
- evergreen_get_tile_pipe_to_backend_map(rdev,
- rdev->config.evergreen.max_tile_pipes,
- rdev->config.evergreen.max_backends,
- ((EVERGREEN_MAX_BACKENDS_MASK <<
- rdev->config.evergreen.max_backends) &
- EVERGREEN_MAX_BACKENDS_MASK));
- }
- }
-
/* setup tiling info dword. gb_addr_config is not adequate since it does
* not have bank info, so create a custom tiling dword.
* bits 3:0 num_pipes
@@ -2136,45 +1857,54 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
/* num banks is 8 on all fusion asics. 0 = 4, 1 = 8, 2 = 16 */
if (rdev->flags & RADEON_IS_IGP)
rdev->config.evergreen.tile_config |= 1 << 4;
- else
- rdev->config.evergreen.tile_config |=
- ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT) << 4;
- rdev->config.evergreen.tile_config |=
- ((mc_arb_ramcfg & BURSTLENGTH_MASK) >> BURSTLENGTH_SHIFT) << 8;
+ else {
+ if ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT)
+ rdev->config.evergreen.tile_config |= 1 << 4;
+ else
+ rdev->config.evergreen.tile_config |= 0 << 4;
+ }
+ rdev->config.evergreen.tile_config |= 0 << 8;
rdev->config.evergreen.tile_config |=
((gb_addr_config & 0x30000000) >> 28) << 12;
- rdev->config.evergreen.backend_map = gb_backend_map;
- WREG32(GB_BACKEND_MAP, gb_backend_map);
- WREG32(GB_ADDR_CONFIG, gb_addr_config);
- WREG32(DMIF_ADDR_CONFIG, gb_addr_config);
- WREG32(HDP_ADDR_CONFIG, gb_addr_config);
-
- num_shader_engines = ((RREG32(GB_ADDR_CONFIG) & NUM_SHADER_ENGINES(3)) >> 12) + 1;
- grbm_gfx_index = INSTANCE_BROADCAST_WRITES;
+ num_shader_engines = (gb_addr_config & NUM_SHADER_ENGINES(3) >> 12) + 1;
- for (i = 0; i < rdev->config.evergreen.num_ses; i++) {
- u32 rb = cc_rb_backend_disable | (0xf0 << 16);
- u32 sp = cc_gc_shader_pipe_config;
- u32 gfx = grbm_gfx_index | SE_INDEX(i);
+ if ((rdev->family >= CHIP_CEDAR) && (rdev->family <= CHIP_HEMLOCK)) {
+ u32 efuse_straps_4;
+ u32 efuse_straps_3;
- if (i == num_shader_engines) {
- rb |= BACKEND_DISABLE(EVERGREEN_MAX_BACKENDS_MASK);
- sp |= INACTIVE_SIMDS(EVERGREEN_MAX_SIMDS_MASK);
+ WREG32(RCU_IND_INDEX, 0x204);
+ efuse_straps_4 = RREG32(RCU_IND_DATA);
+ WREG32(RCU_IND_INDEX, 0x203);
+ efuse_straps_3 = RREG32(RCU_IND_DATA);
+ tmp = (((efuse_straps_4 & 0xf) << 4) |
+ ((efuse_straps_3 & 0xf0000000) >> 28));
+ } else {
+ tmp = 0;
+ for (i = (rdev->config.evergreen.num_ses - 1); i >= 0; i--) {
+ u32 rb_disable_bitmap;
+
+ WREG32(GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_INDEX(i));
+ WREG32(RLC_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_INDEX(i));
+ rb_disable_bitmap = (RREG32(CC_RB_BACKEND_DISABLE) & 0x00ff0000) >> 16;
+ tmp <<= 4;
+ tmp |= rb_disable_bitmap;
}
+ }
+ /* enabled rb are just the one not disabled :) */
+ disabled_rb_mask = tmp;
- WREG32(GRBM_GFX_INDEX, gfx);
- WREG32(RLC_GFX_INDEX, gfx);
+ WREG32(GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_BROADCAST_WRITES);
+ WREG32(RLC_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_BROADCAST_WRITES);
- WREG32(CC_RB_BACKEND_DISABLE, rb);
- WREG32(CC_SYS_RB_BACKEND_DISABLE, rb);
- WREG32(GC_USER_RB_BACKEND_DISABLE, rb);
- WREG32(CC_GC_SHADER_PIPE_CONFIG, sp);
- }
+ WREG32(GB_ADDR_CONFIG, gb_addr_config);
+ WREG32(DMIF_ADDR_CONFIG, gb_addr_config);
+ WREG32(HDP_ADDR_CONFIG, gb_addr_config);
- grbm_gfx_index |= SE_BROADCAST_WRITES;
- WREG32(GRBM_GFX_INDEX, grbm_gfx_index);
- WREG32(RLC_GFX_INDEX, grbm_gfx_index);
+ tmp = gb_addr_config & NUM_PIPES_MASK;
+ tmp = r6xx_remap_render_backend(rdev, tmp, rdev->config.evergreen.max_backends,
+ EVERGREEN_MAX_BACKENDS, disabled_rb_mask);
+ WREG32(GB_BACKEND_MAP, tmp);
WREG32(CGTS_SYS_TCC_DISABLE, 0);
WREG32(CGTS_TCC_DISABLE, 0);
diff --git a/drivers/gpu/drm/radeon/evergreend.h b/drivers/gpu/drm/radeon/evergreend.h
index 79130bfd1d6f..2773039b4902 100644
--- a/drivers/gpu/drm/radeon/evergreend.h
+++ b/drivers/gpu/drm/radeon/evergreend.h
@@ -37,6 +37,15 @@
#define EVERGREEN_MAX_PIPES_MASK 0xFF
#define EVERGREEN_MAX_LDS_NUM 0xFFFF
+#define CYPRESS_GB_ADDR_CONFIG_GOLDEN 0x02011003
+#define BARTS_GB_ADDR_CONFIG_GOLDEN 0x02011003
+#define CAYMAN_GB_ADDR_CONFIG_GOLDEN 0x02011003
+#define JUNIPER_GB_ADDR_CONFIG_GOLDEN 0x02010002
+#define REDWOOD_GB_ADDR_CONFIG_GOLDEN 0x02010002
+#define TURKS_GB_ADDR_CONFIG_GOLDEN 0x02010002
+#define CEDAR_GB_ADDR_CONFIG_GOLDEN 0x02010001
+#define CAICOS_GB_ADDR_CONFIG_GOLDEN 0x02010001
+
/* Registers */
#define RCU_IND_INDEX 0x100
@@ -54,6 +63,7 @@
#define BACKEND_DISABLE(x) ((x) << 16)
#define GB_ADDR_CONFIG 0x98F8
#define NUM_PIPES(x) ((x) << 0)
+#define NUM_PIPES_MASK 0x0000000f
#define PIPE_INTERLEAVE_SIZE(x) ((x) << 4)
#define BANK_INTERLEAVE_SIZE(x) ((x) << 8)
#define NUM_SHADER_ENGINES(x) ((x) << 12)
@@ -452,6 +462,7 @@
#define MC_VM_MD_L1_TLB0_CNTL 0x2654
#define MC_VM_MD_L1_TLB1_CNTL 0x2658
#define MC_VM_MD_L1_TLB2_CNTL 0x265C
+#define MC_VM_MD_L1_TLB3_CNTL 0x2698
#define FUS_MC_VM_MD_L1_TLB0_CNTL 0x265C
#define FUS_MC_VM_MD_L1_TLB1_CNTL 0x2660
diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c
index ce4e7cc6c905..3df4efa11942 100644
--- a/drivers/gpu/drm/radeon/ni.c
+++ b/drivers/gpu/drm/radeon/ni.c
@@ -417,215 +417,17 @@ out:
/*
* Core functions
*/
-static u32 cayman_get_tile_pipe_to_backend_map(struct radeon_device *rdev,
- u32 num_tile_pipes,
- u32 num_backends_per_asic,
- u32 *backend_disable_mask_per_asic,
- u32 num_shader_engines)
-{
- u32 backend_map = 0;
- u32 enabled_backends_mask = 0;
- u32 enabled_backends_count = 0;
- u32 num_backends_per_se;
- u32 cur_pipe;
- u32 swizzle_pipe[CAYMAN_MAX_PIPES];
- u32 cur_backend = 0;
- u32 i;
- bool force_no_swizzle;
-
- /* force legal values */
- if (num_tile_pipes < 1)
- num_tile_pipes = 1;
- if (num_tile_pipes > rdev->config.cayman.max_tile_pipes)
- num_tile_pipes = rdev->config.cayman.max_tile_pipes;
- if (num_shader_engines < 1)
- num_shader_engines = 1;
- if (num_shader_engines > rdev->config.cayman.max_shader_engines)
- num_shader_engines = rdev->config.cayman.max_shader_engines;
- if (num_backends_per_asic < num_shader_engines)
- num_backends_per_asic = num_shader_engines;
- if (num_backends_per_asic > (rdev->config.cayman.max_backends_per_se * num_shader_engines))
- num_backends_per_asic = rdev->config.cayman.max_backends_per_se * num_shader_engines;
-
- /* make sure we have the same number of backends per se */
- num_backends_per_asic = ALIGN(num_backends_per_asic, num_shader_engines);
- /* set up the number of backends per se */
- num_backends_per_se = num_backends_per_asic / num_shader_engines;
- if (num_backends_per_se > rdev->config.cayman.max_backends_per_se) {
- num_backends_per_se = rdev->config.cayman.max_backends_per_se;
- num_backends_per_asic = num_backends_per_se * num_shader_engines;
- }
-
- /* create enable mask and count for enabled backends */
- for (i = 0; i < CAYMAN_MAX_BACKENDS; ++i) {
- if (((*backend_disable_mask_per_asic >> i) & 1) == 0) {
- enabled_backends_mask |= (1 << i);
- ++enabled_backends_count;
- }
- if (enabled_backends_count == num_backends_per_asic)
- break;
- }
-
- /* force the backends mask to match the current number of backends */
- if (enabled_backends_count != num_backends_per_asic) {
- u32 this_backend_enabled;
- u32 shader_engine;
- u32 backend_per_se;
-
- enabled_backends_mask = 0;
- enabled_backends_count = 0;
- *backend_disable_mask_per_asic = CAYMAN_MAX_BACKENDS_MASK;
- for (i = 0; i < CAYMAN_MAX_BACKENDS; ++i) {
- /* calc the current se */
- shader_engine = i / rdev->config.cayman.max_backends_per_se;
- /* calc the backend per se */
- backend_per_se = i % rdev->config.cayman.max_backends_per_se;
- /* default to not enabled */
- this_backend_enabled = 0;
- if ((shader_engine < num_shader_engines) &&
- (backend_per_se < num_backends_per_se))
- this_backend_enabled = 1;
- if (this_backend_enabled) {
- enabled_backends_mask |= (1 << i);
- *backend_disable_mask_per_asic &= ~(1 << i);
- ++enabled_backends_count;
- }
- }
- }
-
-
- memset((uint8_t *)&swizzle_pipe[0], 0, sizeof(u32) * CAYMAN_MAX_PIPES);
- switch (rdev->family) {
- case CHIP_CAYMAN:
- case CHIP_ARUBA:
- force_no_swizzle = true;
- break;
- default:
- force_no_swizzle = false;
- break;
- }
- if (force_no_swizzle) {
- bool last_backend_enabled = false;
-
- force_no_swizzle = false;
- for (i = 0; i < CAYMAN_MAX_BACKENDS; ++i) {
- if (((enabled_backends_mask >> i) & 1) == 1) {
- if (last_backend_enabled)
- force_no_swizzle = true;
- last_backend_enabled = true;
- } else
- last_backend_enabled = false;
- }
- }
-
- switch (num_tile_pipes) {
- case 1:
- case 3:
- case 5:
- case 7:
- DRM_ERROR("odd number of pipes!\n");
- break;
- case 2:
- swizzle_pipe[0] = 0;
- swizzle_pipe[1] = 1;
- break;
- case 4:
- if (force_no_swizzle) {
- swizzle_pipe[0] = 0;
- swizzle_pipe[1] = 1;
- swizzle_pipe[2] = 2;
- swizzle_pipe[3] = 3;
- } else {
- swizzle_pipe[0] = 0;
- swizzle_pipe[1] = 2;
- swizzle_pipe[2] = 1;
- swizzle_pipe[3] = 3;
- }
- break;
- case 6:
- if (force_no_swizzle) {
- swizzle_pipe[0] = 0;
- swizzle_pipe[1] = 1;
- swizzle_pipe[2] = 2;
- swizzle_pipe[3] = 3;
- swizzle_pipe[4] = 4;
- swizzle_pipe[5] = 5;
- } else {
- swizzle_pipe[0] = 0;
- swizzle_pipe[1] = 2;
- swizzle_pipe[2] = 4;
- swizzle_pipe[3] = 1;
- swizzle_pipe[4] = 3;
- swizzle_pipe[5] = 5;
- }
- break;
- case 8:
- if (force_no_swizzle) {
- swizzle_pipe[0] = 0;
- swizzle_pipe[1] = 1;
- swizzle_pipe[2] = 2;
- swizzle_pipe[3] = 3;
- swizzle_pipe[4] = 4;
- swizzle_pipe[5] = 5;
- swizzle_pipe[6] = 6;
- swizzle_pipe[7] = 7;
- } else {
- swizzle_pipe[0] = 0;
- swizzle_pipe[1] = 2;
- swizzle_pipe[2] = 4;
- swizzle_pipe[3] = 6;
- swizzle_pipe[4] = 1;
- swizzle_pipe[5] = 3;
- swizzle_pipe[6] = 5;
- swizzle_pipe[7] = 7;
- }
- break;
- }
-
- for (cur_pipe = 0; cur_pipe < num_tile_pipes; ++cur_pipe) {
- while (((1 << cur_backend) & enabled_backends_mask) == 0)
- cur_backend = (cur_backend + 1) % CAYMAN_MAX_BACKENDS;
-
- backend_map |= (((cur_backend & 0xf) << (swizzle_pipe[cur_pipe] * 4)));
-
- cur_backend = (cur_backend + 1) % CAYMAN_MAX_BACKENDS;
- }
-
- return backend_map;
-}
-
-static u32 cayman_get_disable_mask_per_asic(struct radeon_device *rdev,
- u32 disable_mask_per_se,
- u32 max_disable_mask_per_se,
- u32 num_shader_engines)
-{
- u32 disable_field_width_per_se = r600_count_pipe_bits(disable_mask_per_se);
- u32 disable_mask_per_asic = disable_mask_per_se & max_disable_mask_per_se;
-
- if (num_shader_engines == 1)
- return disable_mask_per_asic;
- else if (num_shader_engines == 2)
- return disable_mask_per_asic | (disable_mask_per_asic << disable_field_width_per_se);
- else
- return 0xffffffff;
-}
-
static void cayman_gpu_init(struct radeon_device *rdev)
{
- u32 cc_rb_backend_disable = 0;
- u32 cc_gc_shader_pipe_config;
u32 gb_addr_config = 0;
u32 mc_shared_chmap, mc_arb_ramcfg;
- u32 gb_backend_map;
u32 cgts_tcc_disable;
u32 sx_debug_1;
u32 smx_dc_ctl0;
- u32 gc_user_shader_pipe_config;
- u32 gc_user_rb_backend_disable;
- u32 cgts_user_tcc_disable;
u32 cgts_sm_ctrl_reg;
u32 hdp_host_path_cntl;
u32 tmp;
+ u32 disabled_rb_mask;
int i, j;
switch (rdev->family) {
@@ -650,6 +452,7 @@ static void cayman_gpu_init(struct radeon_device *rdev)
rdev->config.cayman.sc_prim_fifo_size = 0x100;
rdev->config.cayman.sc_hiz_tile_fifo_size = 0x30;
rdev->config.cayman.sc_earlyz_tile_fifo_size = 0x130;
+ gb_addr_config = CAYMAN_GB_ADDR_CONFIG_GOLDEN;
break;
case CHIP_ARUBA:
default:
@@ -687,6 +490,7 @@ static void cayman_gpu_init(struct radeon_device *rdev)
rdev->config.cayman.sc_prim_fifo_size = 0x40;
rdev->config.cayman.sc_hiz_tile_fifo_size = 0x30;
rdev->config.cayman.sc_earlyz_tile_fifo_size = 0x130;
+ gb_addr_config = ARUBA_GB_ADDR_CONFIG_GOLDEN;
break;
}
@@ -706,39 +510,6 @@ static void cayman_gpu_init(struct radeon_device *rdev)
mc_shared_chmap = RREG32(MC_SHARED_CHMAP);
mc_arb_ramcfg = RREG32(MC_ARB_RAMCFG);
- cc_rb_backend_disable = RREG32(CC_RB_BACKEND_DISABLE);
- cc_gc_shader_pipe_config = RREG32(CC_GC_SHADER_PIPE_CONFIG);
- cgts_tcc_disable = 0xffff0000;
- for (i = 0; i < rdev->config.cayman.max_texture_channel_caches; i++)
- cgts_tcc_disable &= ~(1 << (16 + i));
- gc_user_rb_backend_disable = RREG32(GC_USER_RB_BACKEND_DISABLE);
- gc_user_shader_pipe_config = RREG32(GC_USER_SHADER_PIPE_CONFIG);
- cgts_user_tcc_disable = RREG32(CGTS_USER_TCC_DISABLE);
-
- rdev->config.cayman.num_shader_engines = rdev->config.cayman.max_shader_engines;
- tmp = ((~gc_user_shader_pipe_config) & INACTIVE_QD_PIPES_MASK) >> INACTIVE_QD_PIPES_SHIFT;
- rdev->config.cayman.num_shader_pipes_per_simd = r600_count_pipe_bits(tmp);
- rdev->config.cayman.num_tile_pipes = rdev->config.cayman.max_tile_pipes;
- tmp = ((~gc_user_shader_pipe_config) & INACTIVE_SIMDS_MASK) >> INACTIVE_SIMDS_SHIFT;
- rdev->config.cayman.num_simds_per_se = r600_count_pipe_bits(tmp);
- tmp = ((~gc_user_rb_backend_disable) & BACKEND_DISABLE_MASK) >> BACKEND_DISABLE_SHIFT;
- rdev->config.cayman.num_backends_per_se = r600_count_pipe_bits(tmp);
- tmp = (gc_user_rb_backend_disable & BACKEND_DISABLE_MASK) >> BACKEND_DISABLE_SHIFT;
- rdev->config.cayman.backend_disable_mask_per_asic =
- cayman_get_disable_mask_per_asic(rdev, tmp, CAYMAN_MAX_BACKENDS_PER_SE_MASK,
- rdev->config.cayman.num_shader_engines);
- rdev->config.cayman.backend_map =
- cayman_get_tile_pipe_to_backend_map(rdev, rdev->config.cayman.num_tile_pipes,
- rdev->config.cayman.num_backends_per_se *
- rdev->config.cayman.num_shader_engines,
- &rdev->config.cayman.backend_disable_mask_per_asic,
- rdev->config.cayman.num_shader_engines);
- tmp = ((~cgts_user_tcc_disable) & TCC_DISABLE_MASK) >> TCC_DISABLE_SHIFT;
- rdev->config.cayman.num_texture_channel_caches = r600_count_pipe_bits(tmp);
- tmp = (mc_arb_ramcfg & BURSTLENGTH_MASK) >> BURSTLENGTH_SHIFT;
- rdev->config.cayman.mem_max_burst_length_bytes = (tmp + 1) * 256;
- if (rdev->config.cayman.mem_max_burst_length_bytes > 512)
- rdev->config.cayman.mem_max_burst_length_bytes = 512;
tmp = (mc_arb_ramcfg & NOOFCOLS_MASK) >> NOOFCOLS_SHIFT;
rdev->config.cayman.mem_row_size_in_kb = (4 * (1 << (8 + tmp))) / 1024;
if (rdev->config.cayman.mem_row_size_in_kb > 4)
@@ -748,73 +519,6 @@ static void cayman_gpu_init(struct radeon_device *rdev)
rdev->config.cayman.num_gpus = 1;
rdev->config.cayman.multi_gpu_tile_size = 64;
- //gb_addr_config = 0x02011003
-#if 0
- gb_addr_config = RREG32(GB_ADDR_CONFIG);
-#else
- gb_addr_config = 0;
- switch (rdev->config.cayman.num_tile_pipes) {
- case 1:
- default:
- gb_addr_config |= NUM_PIPES(0);
- break;
- case 2:
- gb_addr_config |= NUM_PIPES(1);
- break;
- case 4:
- gb_addr_config |= NUM_PIPES(2);
- break;
- case 8:
- gb_addr_config |= NUM_PIPES(3);
- break;
- }
-
- tmp = (rdev->config.cayman.mem_max_burst_length_bytes / 256) - 1;
- gb_addr_config |= PIPE_INTERLEAVE_SIZE(tmp);
- gb_addr_config |= NUM_SHADER_ENGINES(rdev->config.cayman.num_shader_engines - 1);
- tmp = (rdev->config.cayman.shader_engine_tile_size / 16) - 1;
- gb_addr_config |= SHADER_ENGINE_TILE_SIZE(tmp);
- switch (rdev->config.cayman.num_gpus) {
- case 1:
- default:
- gb_addr_config |= NUM_GPUS(0);
- break;
- case 2:
- gb_addr_config |= NUM_GPUS(1);
- break;
- case 4:
- gb_addr_config |= NUM_GPUS(2);
- break;
- }
- switch (rdev->config.cayman.multi_gpu_tile_size) {
- case 16:
- gb_addr_config |= MULTI_GPU_TILE_SIZE(0);
- break;
- case 32:
- default:
- gb_addr_config |= MULTI_GPU_TILE_SIZE(1);
- break;
- case 64:
- gb_addr_config |= MULTI_GPU_TILE_SIZE(2);
- break;
- case 128:
- gb_addr_config |= MULTI_GPU_TILE_SIZE(3);
- break;
- }
- switch (rdev->config.cayman.mem_row_size_in_kb) {
- case 1:
- default:
- gb_addr_config |= ROW_SIZE(0);
- break;
- case 2:
- gb_addr_config |= ROW_SIZE(1);
- break;
- case 4:
- gb_addr_config |= ROW_SIZE(2);
- break;
- }
-#endif
-
tmp = (gb_addr_config & NUM_PIPES_MASK) >> NUM_PIPES_SHIFT;
rdev->config.cayman.num_tile_pipes = (1 << tmp);
tmp = (gb_addr_config & PIPE_INTERLEAVE_SIZE_MASK) >> PIPE_INTERLEAVE_SIZE_SHIFT;
@@ -828,17 +532,7 @@ static void cayman_gpu_init(struct radeon_device *rdev)
tmp = (gb_addr_config & ROW_SIZE_MASK) >> ROW_SIZE_SHIFT;
rdev->config.cayman.mem_row_size_in_kb = 1 << tmp;
- //gb_backend_map = 0x76541032;
-#if 0
- gb_backend_map = RREG32(GB_BACKEND_MAP);
-#else
- gb_backend_map =
- cayman_get_tile_pipe_to_backend_map(rdev, rdev->config.cayman.num_tile_pipes,
- rdev->config.cayman.num_backends_per_se *
- rdev->config.cayman.num_shader_engines,
- &rdev->config.cayman.backend_disable_mask_per_asic,
- rdev->config.cayman.num_shader_engines);
-#endif
+
/* setup tiling info dword. gb_addr_config is not adequate since it does
* not have bank info, so create a custom tiling dword.
* bits 3:0 num_pipes
@@ -866,33 +560,49 @@ static void cayman_gpu_init(struct radeon_device *rdev)
/* num banks is 8 on all fusion asics. 0 = 4, 1 = 8, 2 = 16 */
if (rdev->flags & RADEON_IS_IGP)
rdev->config.cayman.tile_config |= 1 << 4;
- else
- rdev->config.cayman.tile_config |=
- ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT) << 4;
+ else {
+ if ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT)
+ rdev->config.cayman.tile_config |= 1 << 4;
+ else
+ rdev->config.cayman.tile_config |= 0 << 4;
+ }
rdev->config.cayman.tile_config |=
((gb_addr_config & PIPE_INTERLEAVE_SIZE_MASK) >> PIPE_INTERLEAVE_SIZE_SHIFT) << 8;
rdev->config.cayman.tile_config |=
((gb_addr_config & ROW_SIZE_MASK) >> ROW_SIZE_SHIFT) << 12;
- rdev->config.cayman.backend_map = gb_backend_map;
- WREG32(GB_BACKEND_MAP, gb_backend_map);
+ tmp = 0;
+ for (i = (rdev->config.cayman.max_shader_engines - 1); i >= 0; i--) {
+ u32 rb_disable_bitmap;
+
+ WREG32(GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_INDEX(i));
+ WREG32(RLC_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_INDEX(i));
+ rb_disable_bitmap = (RREG32(CC_RB_BACKEND_DISABLE) & 0x00ff0000) >> 16;
+ tmp <<= 4;
+ tmp |= rb_disable_bitmap;
+ }
+ /* enabled rb are just the one not disabled :) */
+ disabled_rb_mask = tmp;
+
+ WREG32(GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_BROADCAST_WRITES);
+ WREG32(RLC_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_BROADCAST_WRITES);
+
WREG32(GB_ADDR_CONFIG, gb_addr_config);
WREG32(DMIF_ADDR_CONFIG, gb_addr_config);
WREG32(HDP_ADDR_CONFIG, gb_addr_config);
- /* primary versions */
- WREG32(CC_RB_BACKEND_DISABLE, cc_rb_backend_disable);
- WREG32(CC_SYS_RB_BACKEND_DISABLE, cc_rb_backend_disable);
- WREG32(CC_GC_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config);
+ tmp = gb_addr_config & NUM_PIPES_MASK;
+ tmp = r6xx_remap_render_backend(rdev, tmp,
+ rdev->config.cayman.max_backends_per_se *
+ rdev->config.cayman.max_shader_engines,
+ CAYMAN_MAX_BACKENDS, disabled_rb_mask);
+ WREG32(GB_BACKEND_MAP, tmp);
+ cgts_tcc_disable = 0xffff0000;
+ for (i = 0; i < rdev->config.cayman.max_texture_channel_caches; i++)
+ cgts_tcc_disable &= ~(1 << (16 + i));
WREG32(CGTS_TCC_DISABLE, cgts_tcc_disable);
WREG32(CGTS_SYS_TCC_DISABLE, cgts_tcc_disable);
-
- /* user versions */
- WREG32(GC_USER_RB_BACKEND_DISABLE, cc_rb_backend_disable);
- WREG32(GC_USER_SYS_RB_BACKEND_DISABLE, cc_rb_backend_disable);
- WREG32(GC_USER_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config);
-
WREG32(CGTS_USER_SYS_TCC_DISABLE, cgts_tcc_disable);
WREG32(CGTS_USER_TCC_DISABLE, cgts_tcc_disable);
diff --git a/drivers/gpu/drm/radeon/nid.h b/drivers/gpu/drm/radeon/nid.h
index 2aa7046ada56..a0b98066e207 100644
--- a/drivers/gpu/drm/radeon/nid.h
+++ b/drivers/gpu/drm/radeon/nid.h
@@ -41,6 +41,9 @@
#define CAYMAN_MAX_TCC 16
#define CAYMAN_MAX_TCC_MASK 0xFF
+#define CAYMAN_GB_ADDR_CONFIG_GOLDEN 0x02011003
+#define ARUBA_GB_ADDR_CONFIG_GOLDEN 0x12010001
+
#define DMIF_ADDR_CONFIG 0xBD4
#define SRBM_GFX_CNTL 0x0E44
#define RINGID(x) (((x) & 0x3) << 0)
@@ -148,6 +151,8 @@
#define CGTS_SYS_TCC_DISABLE 0x3F90
#define CGTS_USER_SYS_TCC_DISABLE 0x3F94
+#define RLC_GFX_INDEX 0x3FC4
+
#define CONFIG_MEMSIZE 0x5428
#define HDP_MEM_COHERENCY_FLUSH_CNTL 0x5480
@@ -212,6 +217,12 @@
#define SOFT_RESET_VGT (1 << 14)
#define SOFT_RESET_IA (1 << 15)
+#define GRBM_GFX_INDEX 0x802C
+#define INSTANCE_INDEX(x) ((x) << 0)
+#define SE_INDEX(x) ((x) << 16)
+#define INSTANCE_BROADCAST_WRITES (1 << 30)
+#define SE_BROADCAST_WRITES (1 << 31)
+
#define SCRATCH_REG0 0x8500
#define SCRATCH_REG1 0x8504
#define SCRATCH_REG2 0x8508
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index f388a1d73b63..45cfcea63507 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -1376,113 +1376,51 @@ int r600_asic_reset(struct radeon_device *rdev)
return r600_gpu_soft_reset(rdev);
}
-static u32 r600_get_tile_pipe_to_backend_map(u32 num_tile_pipes,
- u32 num_backends,
- u32 backend_disable_mask)
-{
- u32 backend_map = 0;
- u32 enabled_backends_mask;
- u32 enabled_backends_count;
- u32 cur_pipe;
- u32 swizzle_pipe[R6XX_MAX_PIPES];
- u32 cur_backend;
- u32 i;
-
- if (num_tile_pipes > R6XX_MAX_PIPES)
- num_tile_pipes = R6XX_MAX_PIPES;
- if (num_tile_pipes < 1)
- num_tile_pipes = 1;
- if (num_backends > R6XX_MAX_BACKENDS)
- num_backends = R6XX_MAX_BACKENDS;
- if (num_backends < 1)
- num_backends = 1;
-
- enabled_backends_mask = 0;
- enabled_backends_count = 0;
- for (i = 0; i < R6XX_MAX_BACKENDS; ++i) {
- if (((backend_disable_mask >> i) & 1) == 0) {
- enabled_backends_mask |= (1 << i);
- ++enabled_backends_count;
- }
- if (enabled_backends_count == num_backends)
- break;
- }
-
- if (enabled_backends_count == 0) {
- enabled_backends_mask = 1;
- enabled_backends_count = 1;
- }
-
- if (enabled_backends_count != num_backends)
- num_backends = enabled_backends_count;
-
- memset((uint8_t *)&swizzle_pipe[0], 0, sizeof(u32) * R6XX_MAX_PIPES);
- switch (num_tile_pipes) {
- case 1:
- swizzle_pipe[0] = 0;
- break;
- case 2:
- swizzle_pipe[0] = 0;
- swizzle_pipe[1] = 1;
- break;
- case 3:
- swizzle_pipe[0] = 0;
- swizzle_pipe[1] = 1;
- swizzle_pipe[2] = 2;
- break;
- case 4:
- swizzle_pipe[0] = 0;
- swizzle_pipe[1] = 1;
- swizzle_pipe[2] = 2;
- swizzle_pipe[3] = 3;
- break;
- case 5:
- swizzle_pipe[0] = 0;
- swizzle_pipe[1] = 1;
- swizzle_pipe[2] = 2;
- swizzle_pipe[3] = 3;
- swizzle_pipe[4] = 4;
- break;
- case 6:
- swizzle_pipe[0] = 0;
- swizzle_pipe[1] = 2;
- swizzle_pipe[2] = 4;
- swizzle_pipe[3] = 5;
- swizzle_pipe[4] = 1;
- swizzle_pipe[5] = 3;
- break;
- case 7:
- swizzle_pipe[0] = 0;
- swizzle_pipe[1] = 2;
- swizzle_pipe[2] = 4;
- swizzle_pipe[3] = 6;
- swizzle_pipe[4] = 1;
- swizzle_pipe[5] = 3;
- swizzle_pipe[6] = 5;
- break;
- case 8:
- swizzle_pipe[0] = 0;
- swizzle_pipe[1] = 2;
- swizzle_pipe[2] = 4;
- swizzle_pipe[3] = 6;
- swizzle_pipe[4] = 1;
- swizzle_pipe[5] = 3;
- swizzle_pipe[6] = 5;
- swizzle_pipe[7] = 7;
- break;
+u32 r6xx_remap_render_backend(struct radeon_device *rdev,
+ u32 tiling_pipe_num,
+ u32 max_rb_num,
+ u32 total_max_rb_num,
+ u32 disabled_rb_mask)
+{
+ u32 rendering_pipe_num, rb_num_width, req_rb_num;
+ u32 pipe_rb_ratio, pipe_rb_remain;
+ u32 data = 0, mask = 1 << (max_rb_num - 1);
+ unsigned i, j;
+
+ /* mask out the RBs that don't exist on that asic */
+ disabled_rb_mask |= (0xff << max_rb_num) & 0xff;
+
+ rendering_pipe_num = 1 << tiling_pipe_num;
+ req_rb_num = total_max_rb_num - r600_count_pipe_bits(disabled_rb_mask);
+ BUG_ON(rendering_pipe_num < req_rb_num);
+
+ pipe_rb_ratio = rendering_pipe_num / req_rb_num;
+ pipe_rb_remain = rendering_pipe_num - pipe_rb_ratio * req_rb_num;
+
+ if (rdev->family <= CHIP_RV740) {
+ /* r6xx/r7xx */
+ rb_num_width = 2;
+ } else {
+ /* eg+ */
+ rb_num_width = 4;
}
- cur_backend = 0;
- for (cur_pipe = 0; cur_pipe < num_tile_pipes; ++cur_pipe) {
- while (((1 << cur_backend) & enabled_backends_mask) == 0)
- cur_backend = (cur_backend + 1) % R6XX_MAX_BACKENDS;
-
- backend_map |= (u32)(((cur_backend & 3) << (swizzle_pipe[cur_pipe] * 2)));
-
- cur_backend = (cur_backend + 1) % R6XX_MAX_BACKENDS;
+ for (i = 0; i < max_rb_num; i++) {
+ if (!(mask & disabled_rb_mask)) {
+ for (j = 0; j < pipe_rb_ratio; j++) {
+ data <<= rb_num_width;
+ data |= max_rb_num - i - 1;
+ }
+ if (pipe_rb_remain) {
+ data <<= rb_num_width;
+ data |= max_rb_num - i - 1;
+ pipe_rb_remain--;
+ }
+ }
+ mask >>= 1;
}
- return backend_map;
+ return data;
}
int r600_count_pipe_bits(uint32_t val)
@@ -1500,7 +1438,6 @@ void r600_gpu_init(struct radeon_device *rdev)
{
u32 tiling_config;
u32 ramcfg;
- u32 backend_map;
u32 cc_rb_backend_disable;
u32 cc_gc_shader_pipe_config;
u32 tmp;
@@ -1511,8 +1448,9 @@ void r600_gpu_init(struct radeon_device *rdev)
u32 sq_thread_resource_mgmt = 0;
u32 sq_stack_resource_mgmt_1 = 0;
u32 sq_stack_resource_mgmt_2 = 0;
+ u32 disabled_rb_mask;
- /* FIXME: implement */
+ rdev->config.r600.tiling_group_size = 256;
switch (rdev->family) {
case CHIP_R600:
rdev->config.r600.max_pipes = 4;
@@ -1616,10 +1554,7 @@ void r600_gpu_init(struct radeon_device *rdev)
rdev->config.r600.tiling_nbanks = 4 << ((ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT);
tiling_config |= BANK_TILING((ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT);
tiling_config |= GROUP_SIZE((ramcfg & BURSTLENGTH_MASK) >> BURSTLENGTH_SHIFT);
- if ((ramcfg & BURSTLENGTH_MASK) >> BURSTLENGTH_SHIFT)
- rdev->config.r600.tiling_group_size = 512;
- else
- rdev->config.r600.tiling_group_size = 256;
+
tmp = (ramcfg & NOOFROWS_MASK) >> NOOFROWS_SHIFT;
if (tmp > 3) {
tiling_config |= ROW_TILING(3);
@@ -1631,32 +1566,36 @@ void r600_gpu_init(struct radeon_device *rdev)
tiling_config |= BANK_SWAPS(1);
cc_rb_backend_disable = RREG32(CC_RB_BACKEND_DISABLE) & 0x00ff0000;
- cc_rb_backend_disable |=
- BACKEND_DISABLE((R6XX_MAX_BACKENDS_MASK << rdev->config.r600.max_backends) & R6XX_MAX_BACKENDS_MASK);
-
- cc_gc_shader_pipe_config = RREG32(CC_GC_SHADER_PIPE_CONFIG) & 0xffffff00;
- cc_gc_shader_pipe_config |=
- INACTIVE_QD_PIPES((R6XX_MAX_PIPES_MASK << rdev->config.r600.max_pipes) & R6XX_MAX_PIPES_MASK);
- cc_gc_shader_pipe_config |=
- INACTIVE_SIMDS((R6XX_MAX_SIMDS_MASK << rdev->config.r600.max_simds) & R6XX_MAX_SIMDS_MASK);
-
- backend_map = r600_get_tile_pipe_to_backend_map(rdev->config.r600.max_tile_pipes,
- (R6XX_MAX_BACKENDS -
- r600_count_pipe_bits((cc_rb_backend_disable &
- R6XX_MAX_BACKENDS_MASK) >> 16)),
- (cc_rb_backend_disable >> 16));
+ tmp = R6XX_MAX_BACKENDS -
+ r600_count_pipe_bits((cc_rb_backend_disable >> 16) & R6XX_MAX_BACKENDS_MASK);
+ if (tmp < rdev->config.r600.max_backends) {
+ rdev->config.r600.max_backends = tmp;
+ }
+
+ cc_gc_shader_pipe_config = RREG32(CC_GC_SHADER_PIPE_CONFIG) & 0x00ffff00;
+ tmp = R6XX_MAX_PIPES -
+ r600_count_pipe_bits((cc_gc_shader_pipe_config >> 8) & R6XX_MAX_PIPES_MASK);
+ if (tmp < rdev->config.r600.max_pipes) {
+ rdev->config.r600.max_pipes = tmp;
+ }
+ tmp = R6XX_MAX_SIMDS -
+ r600_count_pipe_bits((cc_gc_shader_pipe_config >> 16) & R6XX_MAX_SIMDS_MASK);
+ if (tmp < rdev->config.r600.max_simds) {
+ rdev->config.r600.max_simds = tmp;
+ }
+
+ disabled_rb_mask = (RREG32(CC_RB_BACKEND_DISABLE) >> 16) & R6XX_MAX_BACKENDS_MASK;
+ tmp = (tiling_config & PIPE_TILING__MASK) >> PIPE_TILING__SHIFT;
+ tmp = r6xx_remap_render_backend(rdev, tmp, rdev->config.r600.max_backends,
+ R6XX_MAX_BACKENDS, disabled_rb_mask);
+ tiling_config |= tmp << 16;
+ rdev->config.r600.backend_map = tmp;
+
rdev->config.r600.tile_config = tiling_config;
- rdev->config.r600.backend_map = backend_map;
- tiling_config |= BACKEND_MAP(backend_map);
WREG32(GB_TILING_CONFIG, tiling_config);
WREG32(DCP_TILING_CONFIG, tiling_config & 0xffff);
WREG32(HDP_TILING_CONFIG, tiling_config & 0xffff);
- /* Setup pipes */
- WREG32(CC_RB_BACKEND_DISABLE, cc_rb_backend_disable);
- WREG32(CC_GC_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config);
- WREG32(GC_USER_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config);
-
tmp = R6XX_MAX_PIPES - r600_count_pipe_bits((cc_gc_shader_pipe_config & INACTIVE_QD_PIPES_MASK) >> 8);
WREG32(VGT_OUT_DEALLOC_CNTL, (tmp * 4) & DEALLOC_DIST_MASK);
WREG32(VGT_VERTEX_REUSE_BLOCK_CNTL, ((tmp * 4) - 2) & VTX_REUSE_DEPTH_MASK);
diff --git a/drivers/gpu/drm/radeon/r600d.h b/drivers/gpu/drm/radeon/r600d.h
index 15bd3b216243..a0dbf1fe6a40 100644
--- a/drivers/gpu/drm/radeon/r600d.h
+++ b/drivers/gpu/drm/radeon/r600d.h
@@ -219,6 +219,8 @@
#define BACKEND_MAP(x) ((x) << 16)
#define GB_TILING_CONFIG 0x98F0
+#define PIPE_TILING__SHIFT 1
+#define PIPE_TILING__MASK 0x0000000e
#define GC_USER_SHADER_PIPE_CONFIG 0x8954
#define INACTIVE_QD_PIPES(x) ((x) << 8)
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index 492654f8ee74..85dac33e3cce 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -346,6 +346,9 @@ struct radeon_bo {
/* Constant after initialization */
struct radeon_device *rdev;
struct drm_gem_object gem_base;
+
+ struct ttm_bo_kmap_obj dma_buf_vmap;
+ int vmapping_count;
};
#define gem_to_radeon_bo(gobj) container_of((gobj), struct radeon_bo, gem_base)
@@ -1845,6 +1848,11 @@ extern struct radeon_hdmi_acr r600_hdmi_acr(uint32_t clock);
extern void r600_hdmi_enable(struct drm_encoder *encoder);
extern void r600_hdmi_disable(struct drm_encoder *encoder);
extern void r600_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mode);
+extern u32 r6xx_remap_render_backend(struct radeon_device *rdev,
+ u32 tiling_pipe_num,
+ u32 max_rb_num,
+ u32 total_max_rb_num,
+ u32 enabled_rb_mask);
/*
* evergreen functions used by radeon_encoder.c
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
index 0137689ed461..142f89462aa4 100644
--- a/drivers/gpu/drm/radeon/radeon_cs.c
+++ b/drivers/gpu/drm/radeon/radeon_cs.c
@@ -147,6 +147,7 @@ static int radeon_cs_sync_rings(struct radeon_cs_parser *p)
sync_to_ring, p->ring);
}
+/* XXX: note that this is called from the legacy UMS CS ioctl as well */
int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
{
struct drm_radeon_cs *cs = data;
@@ -245,22 +246,24 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
}
}
- if ((p->cs_flags & RADEON_CS_USE_VM) &&
- !p->rdev->vm_manager.enabled) {
- DRM_ERROR("VM not active on asic!\n");
- return -EINVAL;
- }
-
- /* we only support VM on SI+ */
- if ((p->rdev->family >= CHIP_TAHITI) &&
- ((p->cs_flags & RADEON_CS_USE_VM) == 0)) {
- DRM_ERROR("VM required on SI+!\n");
- return -EINVAL;
- }
+ /* these are KMS only */
+ if (p->rdev) {
+ if ((p->cs_flags & RADEON_CS_USE_VM) &&
+ !p->rdev->vm_manager.enabled) {
+ DRM_ERROR("VM not active on asic!\n");
+ return -EINVAL;
+ }
- if (radeon_cs_get_ring(p, ring, priority))
- return -EINVAL;
+ /* we only support VM on SI+ */
+ if ((p->rdev->family >= CHIP_TAHITI) &&
+ ((p->cs_flags & RADEON_CS_USE_VM) == 0)) {
+ DRM_ERROR("VM required on SI+!\n");
+ return -EINVAL;
+ }
+ if (radeon_cs_get_ring(p, ring, priority))
+ return -EINVAL;
+ }
/* deal with non-vm */
if ((p->chunk_ib_idx != -1) &&
diff --git a/drivers/gpu/drm/radeon/radeon_prime.c b/drivers/gpu/drm/radeon/radeon_prime.c
index b8f835d8ecb4..8ddab4c76710 100644
--- a/drivers/gpu/drm/radeon/radeon_prime.c
+++ b/drivers/gpu/drm/radeon/radeon_prime.c
@@ -85,6 +85,47 @@ static void radeon_gem_kunmap(struct dma_buf *dma_buf, unsigned long page_num, v
}
+static int radeon_gem_prime_mmap(struct dma_buf *dma_buf, struct vm_area_struct *vma)
+{
+ return -EINVAL;
+}
+
+static void *radeon_gem_prime_vmap(struct dma_buf *dma_buf)
+{
+ struct radeon_bo *bo = dma_buf->priv;
+ struct drm_device *dev = bo->rdev->ddev;
+ int ret;
+
+ mutex_lock(&dev->struct_mutex);
+ if (bo->vmapping_count) {
+ bo->vmapping_count++;
+ goto out_unlock;
+ }
+
+ ret = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages,
+ &bo->dma_buf_vmap);
+ if (ret) {
+ mutex_unlock(&dev->struct_mutex);
+ return ERR_PTR(ret);
+ }
+ bo->vmapping_count = 1;
+out_unlock:
+ mutex_unlock(&dev->struct_mutex);
+ return bo->dma_buf_vmap.virtual;
+}
+
+static void radeon_gem_prime_vunmap(struct dma_buf *dma_buf, void *vaddr)
+{
+ struct radeon_bo *bo = dma_buf->priv;
+ struct drm_device *dev = bo->rdev->ddev;
+
+ mutex_lock(&dev->struct_mutex);
+ bo->vmapping_count--;
+ if (bo->vmapping_count == 0) {
+ ttm_bo_kunmap(&bo->dma_buf_vmap);
+ }
+ mutex_unlock(&dev->struct_mutex);
+}
const static struct dma_buf_ops radeon_dmabuf_ops = {
.map_dma_buf = radeon_gem_map_dma_buf,
.unmap_dma_buf = radeon_gem_unmap_dma_buf,
@@ -93,6 +134,9 @@ const static struct dma_buf_ops radeon_dmabuf_ops = {
.kmap_atomic = radeon_gem_kmap_atomic,
.kunmap = radeon_gem_kunmap,
.kunmap_atomic = radeon_gem_kunmap_atomic,
+ .mmap = radeon_gem_prime_mmap,
+ .vmap = radeon_gem_prime_vmap,
+ .vunmap = radeon_gem_prime_vunmap,
};
static int radeon_prime_create(struct drm_device *dev,
diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c
index c2f473bc13b8..04ddc365a908 100644
--- a/drivers/gpu/drm/radeon/rv770.c
+++ b/drivers/gpu/drm/radeon/rv770.c
@@ -151,6 +151,8 @@ int rv770_pcie_gart_enable(struct radeon_device *rdev)
WREG32(MC_VM_MD_L1_TLB0_CNTL, tmp);
WREG32(MC_VM_MD_L1_TLB1_CNTL, tmp);
WREG32(MC_VM_MD_L1_TLB2_CNTL, tmp);
+ if (rdev->family == CHIP_RV740)
+ WREG32(MC_VM_MD_L1_TLB3_CNTL, tmp);
WREG32(MC_VM_MB_L1_TLB0_CNTL, tmp);
WREG32(MC_VM_MB_L1_TLB1_CNTL, tmp);
WREG32(MC_VM_MB_L1_TLB2_CNTL, tmp);
@@ -363,180 +365,6 @@ void r700_cp_fini(struct radeon_device *rdev)
/*
* Core functions
*/
-static u32 r700_get_tile_pipe_to_backend_map(struct radeon_device *rdev,
- u32 num_tile_pipes,
- u32 num_backends,
- u32 backend_disable_mask)
-{
- u32 backend_map = 0;
- u32 enabled_backends_mask;
- u32 enabled_backends_count;
- u32 cur_pipe;
- u32 swizzle_pipe[R7XX_MAX_PIPES];
- u32 cur_backend;
- u32 i;
- bool force_no_swizzle;
-
- if (num_tile_pipes > R7XX_MAX_PIPES)
- num_tile_pipes = R7XX_MAX_PIPES;
- if (num_tile_pipes < 1)
- num_tile_pipes = 1;
- if (num_backends > R7XX_MAX_BACKENDS)
- num_backends = R7XX_MAX_BACKENDS;
- if (num_backends < 1)
- num_backends = 1;
-
- enabled_backends_mask = 0;
- enabled_backends_count = 0;
- for (i = 0; i < R7XX_MAX_BACKENDS; ++i) {
- if (((backend_disable_mask >> i) & 1) == 0) {
- enabled_backends_mask |= (1 << i);
- ++enabled_backends_count;
- }
- if (enabled_backends_count == num_backends)
- break;
- }
-
- if (enabled_backends_count == 0) {
- enabled_backends_mask = 1;
- enabled_backends_count = 1;
- }
-
- if (enabled_backends_count != num_backends)
- num_backends = enabled_backends_count;
-
- switch (rdev->family) {
- case CHIP_RV770:
- case CHIP_RV730:
- force_no_swizzle = false;
- break;
- case CHIP_RV710:
- case CHIP_RV740:
- default:
- force_no_swizzle = true;
- break;
- }
-
- memset((uint8_t *)&swizzle_pipe[0], 0, sizeof(u32) * R7XX_MAX_PIPES);
- switch (num_tile_pipes) {
- case 1:
- swizzle_pipe[0] = 0;
- break;
- case 2:
- swizzle_pipe[0] = 0;
- swizzle_pipe[1] = 1;
- break;
- case 3:
- if (force_no_swizzle) {
- swizzle_pipe[0] = 0;
- swizzle_pipe[1] = 1;
- swizzle_pipe[2] = 2;
- } else {
- swizzle_pipe[0] = 0;
- swizzle_pipe[1] = 2;
- swizzle_pipe[2] = 1;
- }
- break;
- case 4:
- if (force_no_swizzle) {
- swizzle_pipe[0] = 0;
- swizzle_pipe[1] = 1;
- swizzle_pipe[2] = 2;
- swizzle_pipe[3] = 3;
- } else {
- swizzle_pipe[0] = 0;
- swizzle_pipe[1] = 2;
- swizzle_pipe[2] = 3;
- swizzle_pipe[3] = 1;
- }
- break;
- case 5:
- if (force_no_swizzle) {
- swizzle_pipe[0] = 0;
- swizzle_pipe[1] = 1;
- swizzle_pipe[2] = 2;
- swizzle_pipe[3] = 3;
- swizzle_pipe[4] = 4;
- } else {
- swizzle_pipe[0] = 0;
- swizzle_pipe[1] = 2;
- swizzle_pipe[2] = 4;
- swizzle_pipe[3] = 1;
- swizzle_pipe[4] = 3;
- }
- break;
- case 6:
- if (force_no_swizzle) {
- swizzle_pipe[0] = 0;
- swizzle_pipe[1] = 1;
- swizzle_pipe[2] = 2;
- swizzle_pipe[3] = 3;
- swizzle_pipe[4] = 4;
- swizzle_pipe[5] = 5;
- } else {
- swizzle_pipe[0] = 0;
- swizzle_pipe[1] = 2;
- swizzle_pipe[2] = 4;
- swizzle_pipe[3] = 5;
- swizzle_pipe[4] = 3;
- swizzle_pipe[5] = 1;
- }
- break;
- case 7:
- if (force_no_swizzle) {
- swizzle_pipe[0] = 0;
- swizzle_pipe[1] = 1;
- swizzle_pipe[2] = 2;
- swizzle_pipe[3] = 3;
- swizzle_pipe[4] = 4;
- swizzle_pipe[5] = 5;
- swizzle_pipe[6] = 6;
- } else {
- swizzle_pipe[0] = 0;
- swizzle_pipe[1] = 2;
- swizzle_pipe[2] = 4;
- swizzle_pipe[3] = 6;
- swizzle_pipe[4] = 3;
- swizzle_pipe[5] = 1;
- swizzle_pipe[6] = 5;
- }
- break;
- case 8:
- if (force_no_swizzle) {
- swizzle_pipe[0] = 0;
- swizzle_pipe[1] = 1;
- swizzle_pipe[2] = 2;
- swizzle_pipe[3] = 3;
- swizzle_pipe[4] = 4;
- swizzle_pipe[5] = 5;
- swizzle_pipe[6] = 6;
- swizzle_pipe[7] = 7;
- } else {
- swizzle_pipe[0] = 0;
- swizzle_pipe[1] = 2;
- swizzle_pipe[2] = 4;
- swizzle_pipe[3] = 6;
- swizzle_pipe[4] = 3;
- swizzle_pipe[5] = 1;
- swizzle_pipe[6] = 7;
- swizzle_pipe[7] = 5;
- }
- break;
- }
-
- cur_backend = 0;
- for (cur_pipe = 0; cur_pipe < num_tile_pipes; ++cur_pipe) {
- while (((1 << cur_backend) & enabled_backends_mask) == 0)
- cur_backend = (cur_backend + 1) % R7XX_MAX_BACKENDS;
-
- backend_map |= (u32)(((cur_backend & 3) << (swizzle_pipe[cur_pipe] * 2)));
-
- cur_backend = (cur_backend + 1) % R7XX_MAX_BACKENDS;
- }
-
- return backend_map;
-}
-
static void rv770_gpu_init(struct radeon_device *rdev)
{
int i, j, num_qd_pipes;
@@ -552,14 +380,17 @@ static void rv770_gpu_init(struct radeon_device *rdev)
u32 sq_thread_resource_mgmt;
u32 hdp_host_path_cntl;
u32 sq_dyn_gpr_size_simd_ab_0;
- u32 backend_map;
u32 gb_tiling_config = 0;
u32 cc_rb_backend_disable = 0;
u32 cc_gc_shader_pipe_config = 0;
u32 mc_arb_ramcfg;
- u32 db_debug4;
+ u32 db_debug4, tmp;
+ u32 inactive_pipes, shader_pipe_config;
+ u32 disabled_rb_mask;
+ unsigned active_number;
/* setup chip specs */
+ rdev->config.rv770.tiling_group_size = 256;
switch (rdev->family) {
case CHIP_RV770:
rdev->config.rv770.max_pipes = 4;
@@ -670,33 +501,70 @@ static void rv770_gpu_init(struct radeon_device *rdev)
/* setup tiling, simd, pipe config */
mc_arb_ramcfg = RREG32(MC_ARB_RAMCFG);
+ shader_pipe_config = RREG32(CC_GC_SHADER_PIPE_CONFIG);
+ inactive_pipes = (shader_pipe_config & INACTIVE_QD_PIPES_MASK) >> INACTIVE_QD_PIPES_SHIFT;
+ for (i = 0, tmp = 1, active_number = 0; i < R7XX_MAX_PIPES; i++) {
+ if (!(inactive_pipes & tmp)) {
+ active_number++;
+ }
+ tmp <<= 1;
+ }
+ if (active_number == 1) {
+ WREG32(SPI_CONFIG_CNTL, DISABLE_INTERP_1);
+ } else {
+ WREG32(SPI_CONFIG_CNTL, 0);
+ }
+
+ cc_rb_backend_disable = RREG32(CC_RB_BACKEND_DISABLE) & 0x00ff0000;
+ tmp = R7XX_MAX_BACKENDS - r600_count_pipe_bits(cc_rb_backend_disable >> 16);
+ if (tmp < rdev->config.rv770.max_backends) {
+ rdev->config.rv770.max_backends = tmp;
+ }
+
+ cc_gc_shader_pipe_config = RREG32(CC_GC_SHADER_PIPE_CONFIG) & 0xffffff00;
+ tmp = R7XX_MAX_PIPES - r600_count_pipe_bits((cc_gc_shader_pipe_config >> 8) & R7XX_MAX_PIPES_MASK);
+ if (tmp < rdev->config.rv770.max_pipes) {
+ rdev->config.rv770.max_pipes = tmp;
+ }
+ tmp = R7XX_MAX_SIMDS - r600_count_pipe_bits((cc_gc_shader_pipe_config >> 16) & R7XX_MAX_SIMDS_MASK);
+ if (tmp < rdev->config.rv770.max_simds) {
+ rdev->config.rv770.max_simds = tmp;
+ }
+
switch (rdev->config.rv770.max_tile_pipes) {
case 1:
default:
- gb_tiling_config |= PIPE_TILING(0);
+ gb_tiling_config = PIPE_TILING(0);
break;
case 2:
- gb_tiling_config |= PIPE_TILING(1);
+ gb_tiling_config = PIPE_TILING(1);
break;
case 4:
- gb_tiling_config |= PIPE_TILING(2);
+ gb_tiling_config = PIPE_TILING(2);
break;
case 8:
- gb_tiling_config |= PIPE_TILING(3);
+ gb_tiling_config = PIPE_TILING(3);
break;
}
rdev->config.rv770.tiling_npipes = rdev->config.rv770.max_tile_pipes;
+ disabled_rb_mask = (RREG32(CC_RB_BACKEND_DISABLE) >> 16) & R7XX_MAX_BACKENDS_MASK;
+ tmp = (gb_tiling_config & PIPE_TILING__MASK) >> PIPE_TILING__SHIFT;
+ tmp = r6xx_remap_render_backend(rdev, tmp, rdev->config.rv770.max_backends,
+ R7XX_MAX_BACKENDS, disabled_rb_mask);
+ gb_tiling_config |= tmp << 16;
+ rdev->config.rv770.backend_map = tmp;
+
if (rdev->family == CHIP_RV770)
gb_tiling_config |= BANK_TILING(1);
- else
- gb_tiling_config |= BANK_TILING((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT);
+ else {
+ if ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT)
+ gb_tiling_config |= BANK_TILING(1);
+ else
+ gb_tiling_config |= BANK_TILING(0);
+ }
rdev->config.rv770.tiling_nbanks = 4 << ((gb_tiling_config >> 4) & 0x3);
gb_tiling_config |= GROUP_SIZE((mc_arb_ramcfg & BURSTLENGTH_MASK) >> BURSTLENGTH_SHIFT);
- if ((mc_arb_ramcfg & BURSTLENGTH_MASK) >> BURSTLENGTH_SHIFT)
- rdev->config.rv770.tiling_group_size = 512;
- else
- rdev->config.rv770.tiling_group_size = 256;
if (((mc_arb_ramcfg & NOOFROWS_MASK) >> NOOFROWS_SHIFT) > 3) {
gb_tiling_config |= ROW_TILING(3);
gb_tiling_config |= SAMPLE_SPLIT(3);
@@ -708,47 +576,19 @@ static void rv770_gpu_init(struct radeon_device *rdev)
}
gb_tiling_config |= BANK_SWAPS(1);
-
- cc_rb_backend_disable = RREG32(CC_RB_BACKEND_DISABLE) & 0x00ff0000;
- cc_rb_backend_disable |=
- BACKEND_DISABLE((R7XX_MAX_BACKENDS_MASK << rdev->config.rv770.max_backends) & R7XX_MAX_BACKENDS_MASK);
-
- cc_gc_shader_pipe_config = RREG32(CC_GC_SHADER_PIPE_CONFIG) & 0xffffff00;
- cc_gc_shader_pipe_config |=
- INACTIVE_QD_PIPES((R7XX_MAX_PIPES_MASK << rdev->config.rv770.max_pipes) & R7XX_MAX_PIPES_MASK);
- cc_gc_shader_pipe_config |=
- INACTIVE_SIMDS((R7XX_MAX_SIMDS_MASK << rdev->config.rv770.max_simds) & R7XX_MAX_SIMDS_MASK);
-
- if (rdev->family == CHIP_RV740)
- backend_map = 0x28;
- else
- backend_map = r700_get_tile_pipe_to_backend_map(rdev,
- rdev->config.rv770.max_tile_pipes,
- (R7XX_MAX_BACKENDS -
- r600_count_pipe_bits((cc_rb_backend_disable &
- R7XX_MAX_BACKENDS_MASK) >> 16)),
- (cc_rb_backend_disable >> 16));
-
rdev->config.rv770.tile_config = gb_tiling_config;
- rdev->config.rv770.backend_map = backend_map;
- gb_tiling_config |= BACKEND_MAP(backend_map);
WREG32(GB_TILING_CONFIG, gb_tiling_config);
WREG32(DCP_TILING_CONFIG, (gb_tiling_config & 0xffff));
WREG32(HDP_TILING_CONFIG, (gb_tiling_config & 0xffff));
- WREG32(CC_RB_BACKEND_DISABLE, cc_rb_backend_disable);
- WREG32(CC_GC_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config);
- WREG32(GC_USER_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config);
- WREG32(CC_SYS_RB_BACKEND_DISABLE, cc_rb_backend_disable);
-
WREG32(CGTS_SYS_TCC_DISABLE, 0);
WREG32(CGTS_TCC_DISABLE, 0);
WREG32(CGTS_USER_SYS_TCC_DISABLE, 0);
WREG32(CGTS_USER_TCC_DISABLE, 0);
- num_qd_pipes =
- R7XX_MAX_PIPES - r600_count_pipe_bits((cc_gc_shader_pipe_config & INACTIVE_QD_PIPES_MASK) >> 8);
+
+ num_qd_pipes = R7XX_MAX_PIPES - r600_count_pipe_bits((cc_gc_shader_pipe_config & INACTIVE_QD_PIPES_MASK) >> 8);
WREG32(VGT_OUT_DEALLOC_CNTL, (num_qd_pipes * 4) & DEALLOC_DIST_MASK);
WREG32(VGT_VERTEX_REUSE_BLOCK_CNTL, ((num_qd_pipes * 4) - 2) & VTX_REUSE_DEPTH_MASK);
@@ -809,8 +649,6 @@ static void rv770_gpu_init(struct radeon_device *rdev)
WREG32(VGT_NUM_INSTANCES, 1);
- WREG32(SPI_CONFIG_CNTL, GPR_WRITE_PRIORITY(0));
-
WREG32(SPI_CONFIG_CNTL_1, VTX_DONE_DELAY(4));
WREG32(CP_PERFMON_CNTL, 0);
diff --git a/drivers/gpu/drm/radeon/rv770d.h b/drivers/gpu/drm/radeon/rv770d.h
index 9c549f702f2f..fdc089896011 100644
--- a/drivers/gpu/drm/radeon/rv770d.h
+++ b/drivers/gpu/drm/radeon/rv770d.h
@@ -106,10 +106,13 @@
#define BACKEND_MAP(x) ((x) << 16)
#define GB_TILING_CONFIG 0x98F0
+#define PIPE_TILING__SHIFT 1
+#define PIPE_TILING__MASK 0x0000000e
#define GC_USER_SHADER_PIPE_CONFIG 0x8954
#define INACTIVE_QD_PIPES(x) ((x) << 8)
#define INACTIVE_QD_PIPES_MASK 0x0000FF00
+#define INACTIVE_QD_PIPES_SHIFT 8
#define INACTIVE_SIMDS(x) ((x) << 16)
#define INACTIVE_SIMDS_MASK 0x00FF0000
@@ -174,6 +177,7 @@
#define MC_VM_MD_L1_TLB0_CNTL 0x2654
#define MC_VM_MD_L1_TLB1_CNTL 0x2658
#define MC_VM_MD_L1_TLB2_CNTL 0x265C
+#define MC_VM_MD_L1_TLB3_CNTL 0x2698
#define MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR 0x203C
#define MC_VM_SYSTEM_APERTURE_HIGH_ADDR 0x2038
#define MC_VM_SYSTEM_APERTURE_LOW_ADDR 0x2034
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 36792bd4da77..b67cfcaa661f 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -1834,6 +1834,7 @@ static int ttm_bo_swapout(struct ttm_mem_shrink *shrink)
spin_unlock(&glob->lru_lock);
(void) ttm_bo_cleanup_refs(bo, false, false, false);
kref_put(&bo->list_kref, ttm_bo_release_list);
+ spin_lock(&glob->lru_lock);
continue;
}
diff --git a/drivers/gpu/drm/udl/udl_fb.c b/drivers/gpu/drm/udl/udl_fb.c
index a029ee39b0c5..ce9a61179925 100644
--- a/drivers/gpu/drm/udl/udl_fb.c
+++ b/drivers/gpu/drm/udl/udl_fb.c
@@ -156,8 +156,17 @@ int udl_handle_damage(struct udl_framebuffer *fb, int x, int y,
if (!fb->active_16)
return 0;
- if (!fb->obj->vmapping)
- udl_gem_vmap(fb->obj);
+ if (!fb->obj->vmapping) {
+ ret = udl_gem_vmap(fb->obj);
+ if (ret == -ENOMEM) {
+ DRM_ERROR("failed to vmap fb\n");
+ return 0;
+ }
+ if (!fb->obj->vmapping) {
+ DRM_ERROR("failed to vmapping\n");
+ return 0;
+ }
+ }
start_cycles = get_cycles();
diff --git a/drivers/gpu/drm/udl/udl_gem.c b/drivers/gpu/drm/udl/udl_gem.c
index 97acc9c6c95b..7bd65bdd15a8 100644
--- a/drivers/gpu/drm/udl/udl_gem.c
+++ b/drivers/gpu/drm/udl/udl_gem.c
@@ -180,6 +180,18 @@ int udl_gem_vmap(struct udl_gem_object *obj)
int page_count = obj->base.size / PAGE_SIZE;
int ret;
+ if (obj->base.import_attach) {
+ ret = dma_buf_begin_cpu_access(obj->base.import_attach->dmabuf,
+ 0, obj->base.size, DMA_BIDIRECTIONAL);
+ if (ret)
+ return -EINVAL;
+
+ obj->vmapping = dma_buf_vmap(obj->base.import_attach->dmabuf);
+ if (!obj->vmapping)
+ return -ENOMEM;
+ return 0;
+ }
+
ret = udl_gem_get_pages(obj, GFP_KERNEL);
if (ret)
return ret;
@@ -192,6 +204,13 @@ int udl_gem_vmap(struct udl_gem_object *obj)
void udl_gem_vunmap(struct udl_gem_object *obj)
{
+ if (obj->base.import_attach) {
+ dma_buf_vunmap(obj->base.import_attach->dmabuf, obj->vmapping);
+ dma_buf_end_cpu_access(obj->base.import_attach->dmabuf, 0,
+ obj->base.size, DMA_BIDIRECTIONAL);
+ return;
+ }
+
if (obj->vmapping)
vunmap(obj->vmapping);
@@ -202,12 +221,12 @@ void udl_gem_free_object(struct drm_gem_object *gem_obj)
{
struct udl_gem_object *obj = to_udl_bo(gem_obj);
- if (gem_obj->import_attach)
- drm_prime_gem_destroy(gem_obj, obj->sg);
-
if (obj->vmapping)
udl_gem_vunmap(obj);
+ if (gem_obj->import_attach)
+ drm_prime_gem_destroy(gem_obj, obj->sg);
+
if (obj->pages)
udl_gem_put_pages(obj);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c b/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c
index 51c9ba5cd2fb..21ee78226560 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c
@@ -66,7 +66,7 @@ static int vmw_gmr2_bind(struct vmw_private *dev_priv,
cmd += sizeof(remap_cmd) / sizeof(uint32);
for (i = 0; i < num_pages; ++i) {
- if (VMW_PPN_SIZE > 4)
+ if (VMW_PPN_SIZE <= 4)
*cmd = page_to_pfn(*pages++);
else
*((uint64_t *)cmd) = page_to_pfn(*pages++);
diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c
index 444143e5f28c..d99db5623acf 100644
--- a/drivers/message/fusion/mptbase.c
+++ b/drivers/message/fusion/mptbase.c
@@ -1653,7 +1653,6 @@ mpt_mapresources(MPT_ADAPTER *ioc)
unsigned long port;
u32 msize;
u32 psize;
- u8 revision;
int r = -ENODEV;
struct pci_dev *pdev;
@@ -1670,8 +1669,6 @@ mpt_mapresources(MPT_ADAPTER *ioc)
return r;
}
- pci_read_config_byte(pdev, PCI_CLASS_REVISION, &revision);
-
if (sizeof(dma_addr_t) > 4) {
const uint64_t required_mask = dma_get_required_mask
(&pdev->dev);
@@ -1779,7 +1776,6 @@ mpt_attach(struct pci_dev *pdev, const struct pci_device_id *id)
MPT_ADAPTER *ioc;
u8 cb_idx;
int r = -ENODEV;
- u8 revision;
u8 pcixcmd;
static int mpt_ids = 0;
#ifdef CONFIG_PROC_FS
@@ -1887,8 +1883,8 @@ mpt_attach(struct pci_dev *pdev, const struct pci_device_id *id)
dinitprintk(ioc, printk(MYIOC_s_INFO_FMT "facts @ %p, pfacts[0] @ %p\n",
ioc->name, &ioc->facts, &ioc->pfacts[0]));
- pci_read_config_byte(pdev, PCI_CLASS_REVISION, &revision);
- mpt_get_product_name(pdev->vendor, pdev->device, revision, ioc->prod_name);
+ mpt_get_product_name(pdev->vendor, pdev->device, pdev->revision,
+ ioc->prod_name);
switch (pdev->device)
{
@@ -1903,7 +1899,7 @@ mpt_attach(struct pci_dev *pdev, const struct pci_device_id *id)
break;
case MPI_MANUFACTPAGE_DEVICEID_FC929X:
- if (revision < XL_929) {
+ if (pdev->revision < XL_929) {
/* 929X Chip Fix. Set Split transactions level
* for PCIX. Set MOST bits to zero.
*/
@@ -1934,7 +1930,7 @@ mpt_attach(struct pci_dev *pdev, const struct pci_device_id *id)
/* 1030 Chip Fix. Disable Split transactions
* for PCIX. Set MOST bits to zero if Rev < C0( = 8).
*/
- if (revision < C0_1030) {
+ if (pdev->revision < C0_1030) {
pci_read_config_byte(pdev, 0x6a, &pcixcmd);
pcixcmd &= 0x8F;
pci_write_config_byte(pdev, 0x6a, pcixcmd);
diff --git a/drivers/message/fusion/mptctl.c b/drivers/message/fusion/mptctl.c
index 6e6e16aab9da..b383b6961e59 100644
--- a/drivers/message/fusion/mptctl.c
+++ b/drivers/message/fusion/mptctl.c
@@ -1250,7 +1250,6 @@ mptctl_getiocinfo (unsigned long arg, unsigned int data_size)
int iocnum;
unsigned int port;
int cim_rev;
- u8 revision;
struct scsi_device *sdev;
VirtDevice *vdevice;
@@ -1324,8 +1323,7 @@ mptctl_getiocinfo (unsigned long arg, unsigned int data_size)
pdev = (struct pci_dev *) ioc->pcidev;
karg->pciId = pdev->device;
- pci_read_config_byte(pdev, PCI_CLASS_REVISION, &revision);
- karg->hwRev = revision;
+ karg->hwRev = pdev->revision;
karg->subSystemDevice = pdev->subsystem_device;
karg->subSystemVendor = pdev->subsystem_vendor;
diff --git a/drivers/rapidio/Kconfig b/drivers/rapidio/Kconfig
index bc8719238793..6194d35ebb97 100644
--- a/drivers/rapidio/Kconfig
+++ b/drivers/rapidio/Kconfig
@@ -22,6 +22,20 @@ config RAPIDIO_ENABLE_RX_TX_PORTS
ports for Input/Output direction to allow other traffic
than Maintenance transfers.
+config RAPIDIO_DMA_ENGINE
+ bool "DMA Engine support for RapidIO"
+ depends on RAPIDIO
+ select DMADEVICES
+ select DMA_ENGINE
+ help
+ Say Y here if you want to use DMA Engine frameork for RapidIO data
+ transfers to/from target RIO devices. RapidIO uses NREAD and
+ NWRITE (NWRITE_R, SWRITE) requests to transfer data between local
+ memory and memory on remote target device. You need a DMA controller
+ capable to perform data transfers to/from RapidIO.
+
+ If you are unsure about this, say Y here.
+
config RAPIDIO_DEBUG
bool "RapidIO subsystem debug messages"
depends on RAPIDIO
diff --git a/drivers/rapidio/devices/Makefile b/drivers/rapidio/devices/Makefile
index 3b7b4e2dff7c..7b62860f34f8 100644
--- a/drivers/rapidio/devices/Makefile
+++ b/drivers/rapidio/devices/Makefile
@@ -3,3 +3,6 @@
#
obj-$(CONFIG_RAPIDIO_TSI721) += tsi721.o
+ifeq ($(CONFIG_RAPIDIO_DMA_ENGINE),y)
+obj-$(CONFIG_RAPIDIO_TSI721) += tsi721_dma.o
+endif
diff --git a/drivers/rapidio/devices/tsi721.c b/drivers/rapidio/devices/tsi721.c
index 30d2072f480b..722246cf20ab 100644
--- a/drivers/rapidio/devices/tsi721.c
+++ b/drivers/rapidio/devices/tsi721.c
@@ -108,6 +108,7 @@ static int tsi721_maint_dma(struct tsi721_device *priv, u32 sys_size,
u16 destid, u8 hopcount, u32 offset, int len,
u32 *data, int do_wr)
{
+ void __iomem *regs = priv->regs + TSI721_DMAC_BASE(priv->mdma.ch_id);
struct tsi721_dma_desc *bd_ptr;
u32 rd_count, swr_ptr, ch_stat;
int i, err = 0;
@@ -116,10 +117,9 @@ static int tsi721_maint_dma(struct tsi721_device *priv, u32 sys_size,
if (offset > (RIO_MAINT_SPACE_SZ - len) || (len != sizeof(u32)))
return -EINVAL;
- bd_ptr = priv->bdma[TSI721_DMACH_MAINT].bd_base;
+ bd_ptr = priv->mdma.bd_base;
- rd_count = ioread32(
- priv->regs + TSI721_DMAC_DRDCNT(TSI721_DMACH_MAINT));
+ rd_count = ioread32(regs + TSI721_DMAC_DRDCNT);
/* Initialize DMA descriptor */
bd_ptr[0].type_id = cpu_to_le32((DTYPE2 << 29) | (op << 19) | destid);
@@ -134,19 +134,18 @@ static int tsi721_maint_dma(struct tsi721_device *priv, u32 sys_size,
mb();
/* Start DMA operation */
- iowrite32(rd_count + 2,
- priv->regs + TSI721_DMAC_DWRCNT(TSI721_DMACH_MAINT));
- ioread32(priv->regs + TSI721_DMAC_DWRCNT(TSI721_DMACH_MAINT));
+ iowrite32(rd_count + 2, regs + TSI721_DMAC_DWRCNT);
+ ioread32(regs + TSI721_DMAC_DWRCNT);
i = 0;
/* Wait until DMA transfer is finished */
- while ((ch_stat = ioread32(priv->regs +
- TSI721_DMAC_STS(TSI721_DMACH_MAINT))) & TSI721_DMAC_STS_RUN) {
+ while ((ch_stat = ioread32(regs + TSI721_DMAC_STS))
+ & TSI721_DMAC_STS_RUN) {
udelay(1);
if (++i >= 5000000) {
dev_dbg(&priv->pdev->dev,
"%s : DMA[%d] read timeout ch_status=%x\n",
- __func__, TSI721_DMACH_MAINT, ch_stat);
+ __func__, priv->mdma.ch_id, ch_stat);
if (!do_wr)
*data = 0xffffffff;
err = -EIO;
@@ -162,13 +161,10 @@ static int tsi721_maint_dma(struct tsi721_device *priv, u32 sys_size,
__func__, ch_stat);
dev_dbg(&priv->pdev->dev, "OP=%d : destid=%x hc=%x off=%x\n",
do_wr ? MAINT_WR : MAINT_RD, destid, hopcount, offset);
- iowrite32(TSI721_DMAC_INT_ALL,
- priv->regs + TSI721_DMAC_INT(TSI721_DMACH_MAINT));
- iowrite32(TSI721_DMAC_CTL_INIT,
- priv->regs + TSI721_DMAC_CTL(TSI721_DMACH_MAINT));
+ iowrite32(TSI721_DMAC_INT_ALL, regs + TSI721_DMAC_INT);
+ iowrite32(TSI721_DMAC_CTL_INIT, regs + TSI721_DMAC_CTL);
udelay(10);
- iowrite32(0, priv->regs +
- TSI721_DMAC_DWRCNT(TSI721_DMACH_MAINT));
+ iowrite32(0, regs + TSI721_DMAC_DWRCNT);
udelay(1);
if (!do_wr)
*data = 0xffffffff;
@@ -184,8 +180,8 @@ static int tsi721_maint_dma(struct tsi721_device *priv, u32 sys_size,
* NOTE: Skipping check and clear FIFO entries because we are waiting
* for transfer to be completed.
*/
- swr_ptr = ioread32(priv->regs + TSI721_DMAC_DSWP(TSI721_DMACH_MAINT));
- iowrite32(swr_ptr, priv->regs + TSI721_DMAC_DSRP(TSI721_DMACH_MAINT));
+ swr_ptr = ioread32(regs + TSI721_DMAC_DSWP);
+ iowrite32(swr_ptr, regs + TSI721_DMAC_DSRP);
err_out:
return err;
@@ -541,6 +537,22 @@ static irqreturn_t tsi721_irqhandler(int irq, void *ptr)
tsi721_pw_handler(mport);
}
+#ifdef CONFIG_RAPIDIO_DMA_ENGINE
+ if (dev_int & TSI721_DEV_INT_BDMA_CH) {
+ int ch;
+
+ if (dev_ch_int & TSI721_INT_BDMA_CHAN_M) {
+ dev_dbg(&priv->pdev->dev,
+ "IRQ from DMA channel 0x%08x\n", dev_ch_int);
+
+ for (ch = 0; ch < TSI721_DMA_MAXCH; ch++) {
+ if (!(dev_ch_int & TSI721_INT_BDMA_CHAN(ch)))
+ continue;
+ tsi721_bdma_handler(&priv->bdma[ch]);
+ }
+ }
+ }
+#endif
return IRQ_HANDLED;
}
@@ -553,18 +565,26 @@ static void tsi721_interrupts_init(struct tsi721_device *priv)
priv->regs + TSI721_SR_CHINT(IDB_QUEUE));
iowrite32(TSI721_SR_CHINT_IDBQRCV,
priv->regs + TSI721_SR_CHINTE(IDB_QUEUE));
- iowrite32(TSI721_INT_SR2PC_CHAN(IDB_QUEUE),
- priv->regs + TSI721_DEV_CHAN_INTE);
/* Enable SRIO MAC interrupts */
iowrite32(TSI721_RIO_EM_DEV_INT_EN_INT,
priv->regs + TSI721_RIO_EM_DEV_INT_EN);
+ /* Enable interrupts from channels in use */
+#ifdef CONFIG_RAPIDIO_DMA_ENGINE
+ intr = TSI721_INT_SR2PC_CHAN(IDB_QUEUE) |
+ (TSI721_INT_BDMA_CHAN_M &
+ ~TSI721_INT_BDMA_CHAN(TSI721_DMACH_MAINT));
+#else
+ intr = TSI721_INT_SR2PC_CHAN(IDB_QUEUE);
+#endif
+ iowrite32(intr, priv->regs + TSI721_DEV_CHAN_INTE);
+
if (priv->flags & TSI721_USING_MSIX)
intr = TSI721_DEV_INT_SRIO;
else
intr = TSI721_DEV_INT_SR2PC_CH | TSI721_DEV_INT_SRIO |
- TSI721_DEV_INT_SMSG_CH;
+ TSI721_DEV_INT_SMSG_CH | TSI721_DEV_INT_BDMA_CH;
iowrite32(intr, priv->regs + TSI721_DEV_INTE);
ioread32(priv->regs + TSI721_DEV_INTE);
@@ -715,12 +735,29 @@ static int tsi721_enable_msix(struct tsi721_device *priv)
TSI721_MSIX_OMSG_INT(i);
}
+#ifdef CONFIG_RAPIDIO_DMA_ENGINE
+ /*
+ * Initialize MSI-X entries for Block DMA Engine:
+ * this driver supports XXX DMA channels
+ * (one is reserved for SRIO maintenance transactions)
+ */
+ for (i = 0; i < TSI721_DMA_CHNUM; i++) {
+ entries[TSI721_VECT_DMA0_DONE + i].entry =
+ TSI721_MSIX_DMACH_DONE(i);
+ entries[TSI721_VECT_DMA0_INT + i].entry =
+ TSI721_MSIX_DMACH_INT(i);
+ }
+#endif /* CONFIG_RAPIDIO_DMA_ENGINE */
+
err = pci_enable_msix(priv->pdev, entries, ARRAY_SIZE(entries));
if (err) {
if (err > 0)
dev_info(&priv->pdev->dev,
"Only %d MSI-X vectors available, "
"not using MSI-X\n", err);
+ else
+ dev_err(&priv->pdev->dev,
+ "Failed to enable MSI-X (err=%d)\n", err);
return err;
}
@@ -760,6 +797,22 @@ static int tsi721_enable_msix(struct tsi721_device *priv)
i, pci_name(priv->pdev));
}
+#ifdef CONFIG_RAPIDIO_DMA_ENGINE
+ for (i = 0; i < TSI721_DMA_CHNUM; i++) {
+ priv->msix[TSI721_VECT_DMA0_DONE + i].vector =
+ entries[TSI721_VECT_DMA0_DONE + i].vector;
+ snprintf(priv->msix[TSI721_VECT_DMA0_DONE + i].irq_name,
+ IRQ_DEVICE_NAME_MAX, DRV_NAME "-dmad%d@pci:%s",
+ i, pci_name(priv->pdev));
+
+ priv->msix[TSI721_VECT_DMA0_INT + i].vector =
+ entries[TSI721_VECT_DMA0_INT + i].vector;
+ snprintf(priv->msix[TSI721_VECT_DMA0_INT + i].irq_name,
+ IRQ_DEVICE_NAME_MAX, DRV_NAME "-dmai%d@pci:%s",
+ i, pci_name(priv->pdev));
+ }
+#endif /* CONFIG_RAPIDIO_DMA_ENGINE */
+
return 0;
}
#endif /* CONFIG_PCI_MSI */
@@ -888,20 +941,34 @@ static void tsi721_doorbell_free(struct tsi721_device *priv)
priv->idb_base = NULL;
}
-static int tsi721_bdma_ch_init(struct tsi721_device *priv, int chnum)
+/**
+ * tsi721_bdma_maint_init - Initialize maintenance request BDMA channel.
+ * @priv: pointer to tsi721 private data
+ *
+ * Initialize BDMA channel allocated for RapidIO maintenance read/write
+ * request generation
+ * Returns %0 on success or %-ENOMEM on failure.
+ */
+static int tsi721_bdma_maint_init(struct tsi721_device *priv)
{
struct tsi721_dma_desc *bd_ptr;
u64 *sts_ptr;
dma_addr_t bd_phys, sts_phys;
int sts_size;
- int bd_num = priv->bdma[chnum].bd_num;
+ int bd_num = 2;
+ void __iomem *regs;
- dev_dbg(&priv->pdev->dev, "Init Block DMA Engine, CH%d\n", chnum);
+ dev_dbg(&priv->pdev->dev,
+ "Init Block DMA Engine for Maintenance requests, CH%d\n",
+ TSI721_DMACH_MAINT);
/*
* Initialize DMA channel for maintenance requests
*/
+ priv->mdma.ch_id = TSI721_DMACH_MAINT;
+ regs = priv->regs + TSI721_DMAC_BASE(TSI721_DMACH_MAINT);
+
/* Allocate space for DMA descriptors */
bd_ptr = dma_zalloc_coherent(&priv->pdev->dev,
bd_num * sizeof(struct tsi721_dma_desc),
@@ -909,8 +976,9 @@ static int tsi721_bdma_ch_init(struct tsi721_device *priv, int chnum)
if (!bd_ptr)
return -ENOMEM;
- priv->bdma[chnum].bd_phys = bd_phys;
- priv->bdma[chnum].bd_base = bd_ptr;
+ priv->mdma.bd_num = bd_num;
+ priv->mdma.bd_phys = bd_phys;
+ priv->mdma.bd_base = bd_ptr;
dev_dbg(&priv->pdev->dev, "DMA descriptors @ %p (phys = %llx)\n",
bd_ptr, (unsigned long long)bd_phys);
@@ -927,13 +995,13 @@ static int tsi721_bdma_ch_init(struct tsi721_device *priv, int chnum)
dma_free_coherent(&priv->pdev->dev,
bd_num * sizeof(struct tsi721_dma_desc),
bd_ptr, bd_phys);
- priv->bdma[chnum].bd_base = NULL;
+ priv->mdma.bd_base = NULL;
return -ENOMEM;
}
- priv->bdma[chnum].sts_phys = sts_phys;
- priv->bdma[chnum].sts_base = sts_ptr;
- priv->bdma[chnum].sts_size = sts_size;
+ priv->mdma.sts_phys = sts_phys;
+ priv->mdma.sts_base = sts_ptr;
+ priv->mdma.sts_size = sts_size;
dev_dbg(&priv->pdev->dev,
"desc status FIFO @ %p (phys = %llx) size=0x%x\n",
@@ -946,83 +1014,61 @@ static int tsi721_bdma_ch_init(struct tsi721_device *priv, int chnum)
bd_ptr[bd_num - 1].next_hi = cpu_to_le32((u64)bd_phys >> 32);
/* Setup DMA descriptor pointers */
- iowrite32(((u64)bd_phys >> 32),
- priv->regs + TSI721_DMAC_DPTRH(chnum));
+ iowrite32(((u64)bd_phys >> 32), regs + TSI721_DMAC_DPTRH);
iowrite32(((u64)bd_phys & TSI721_DMAC_DPTRL_MASK),
- priv->regs + TSI721_DMAC_DPTRL(chnum));
+ regs + TSI721_DMAC_DPTRL);
/* Setup descriptor status FIFO */
- iowrite32(((u64)sts_phys >> 32),
- priv->regs + TSI721_DMAC_DSBH(chnum));
+ iowrite32(((u64)sts_phys >> 32), regs + TSI721_DMAC_DSBH);
iowrite32(((u64)sts_phys & TSI721_DMAC_DSBL_MASK),
- priv->regs + TSI721_DMAC_DSBL(chnum));
+ regs + TSI721_DMAC_DSBL);
iowrite32(TSI721_DMAC_DSSZ_SIZE(sts_size),
- priv->regs + TSI721_DMAC_DSSZ(chnum));
+ regs + TSI721_DMAC_DSSZ);
/* Clear interrupt bits */
- iowrite32(TSI721_DMAC_INT_ALL,
- priv->regs + TSI721_DMAC_INT(chnum));
+ iowrite32(TSI721_DMAC_INT_ALL, regs + TSI721_DMAC_INT);
- ioread32(priv->regs + TSI721_DMAC_INT(chnum));
+ ioread32(regs + TSI721_DMAC_INT);
/* Toggle DMA channel initialization */
- iowrite32(TSI721_DMAC_CTL_INIT, priv->regs + TSI721_DMAC_CTL(chnum));
- ioread32(priv->regs + TSI721_DMAC_CTL(chnum));
+ iowrite32(TSI721_DMAC_CTL_INIT, regs + TSI721_DMAC_CTL);
+ ioread32(regs + TSI721_DMAC_CTL);
udelay(10);
return 0;
}
-static int tsi721_bdma_ch_free(struct tsi721_device *priv, int chnum)
+static int tsi721_bdma_maint_free(struct tsi721_device *priv)
{
u32 ch_stat;
+ struct tsi721_bdma_maint *mdma = &priv->mdma;
+ void __iomem *regs = priv->regs + TSI721_DMAC_BASE(mdma->ch_id);
- if (priv->bdma[chnum].bd_base == NULL)
+ if (mdma->bd_base == NULL)
return 0;
/* Check if DMA channel still running */
- ch_stat = ioread32(priv->regs + TSI721_DMAC_STS(chnum));
+ ch_stat = ioread32(regs + TSI721_DMAC_STS);
if (ch_stat & TSI721_DMAC_STS_RUN)
return -EFAULT;
/* Put DMA channel into init state */
- iowrite32(TSI721_DMAC_CTL_INIT,
- priv->regs + TSI721_DMAC_CTL(chnum));
+ iowrite32(TSI721_DMAC_CTL_INIT, regs + TSI721_DMAC_CTL);
/* Free space allocated for DMA descriptors */
dma_free_coherent(&priv->pdev->dev,
- priv->bdma[chnum].bd_num * sizeof(struct tsi721_dma_desc),
- priv->bdma[chnum].bd_base, priv->bdma[chnum].bd_phys);
- priv->bdma[chnum].bd_base = NULL;
+ mdma->bd_num * sizeof(struct tsi721_dma_desc),
+ mdma->bd_base, mdma->bd_phys);
+ mdma->bd_base = NULL;
/* Free space allocated for status FIFO */
dma_free_coherent(&priv->pdev->dev,
- priv->bdma[chnum].sts_size * sizeof(struct tsi721_dma_sts),
- priv->bdma[chnum].sts_base, priv->bdma[chnum].sts_phys);
- priv->bdma[chnum].sts_base = NULL;
- return 0;
-}
-
-static int tsi721_bdma_init(struct tsi721_device *priv)
-{
- /* Initialize BDMA channel allocated for RapidIO maintenance read/write
- * request generation
- */
- priv->bdma[TSI721_DMACH_MAINT].bd_num = 2;
- if (tsi721_bdma_ch_init(priv, TSI721_DMACH_MAINT)) {
- dev_err(&priv->pdev->dev, "Unable to initialize maintenance DMA"
- " channel %d, aborting\n", TSI721_DMACH_MAINT);
- return -ENOMEM;
- }
-
+ mdma->sts_size * sizeof(struct tsi721_dma_sts),
+ mdma->sts_base, mdma->sts_phys);
+ mdma->sts_base = NULL;
return 0;
}
-static void tsi721_bdma_free(struct tsi721_device *priv)
-{
- tsi721_bdma_ch_free(priv, TSI721_DMACH_MAINT);
-}
-
/* Enable Inbound Messaging Interrupts */
static void
tsi721_imsg_interrupt_enable(struct tsi721_device *priv, int ch,
@@ -2035,7 +2081,8 @@ static void tsi721_disable_ints(struct tsi721_device *priv)
/* Disable all BDMA Channel interrupts */
for (ch = 0; ch < TSI721_DMA_MAXCH; ch++)
- iowrite32(0, priv->regs + TSI721_DMAC_INTE(ch));
+ iowrite32(0,
+ priv->regs + TSI721_DMAC_BASE(ch) + TSI721_DMAC_INTE);
/* Disable all general BDMA interrupts */
iowrite32(0, priv->regs + TSI721_BDMA_INTE);
@@ -2104,6 +2151,7 @@ static int __devinit tsi721_setup_mport(struct tsi721_device *priv)
mport->phy_type = RIO_PHY_SERIAL;
mport->priv = (void *)priv;
mport->phys_efptr = 0x100;
+ priv->mport = mport;
INIT_LIST_HEAD(&mport->dbells);
@@ -2129,17 +2177,21 @@ static int __devinit tsi721_setup_mport(struct tsi721_device *priv)
if (!err) {
tsi721_interrupts_init(priv);
ops->pwenable = tsi721_pw_enable;
- } else
+ } else {
dev_err(&pdev->dev, "Unable to get assigned PCI IRQ "
"vector %02X err=0x%x\n", pdev->irq, err);
+ goto err_exit;
+ }
+#ifdef CONFIG_RAPIDIO_DMA_ENGINE
+ tsi721_register_dma(priv);
+#endif
/* Enable SRIO link */
iowrite32(ioread32(priv->regs + TSI721_DEVCTL) |
TSI721_DEVCTL_SRBOOT_CMPL,
priv->regs + TSI721_DEVCTL);
rio_register_mport(mport);
- priv->mport = mport;
if (mport->host_deviceid >= 0)
iowrite32(RIO_PORT_GEN_HOST | RIO_PORT_GEN_MASTER |
@@ -2149,6 +2201,11 @@ static int __devinit tsi721_setup_mport(struct tsi721_device *priv)
iowrite32(0, priv->regs + (0x100 + RIO_PORT_GEN_CTL_CSR));
return 0;
+
+err_exit:
+ kfree(mport);
+ kfree(ops);
+ return err;
}
static int __devinit tsi721_probe(struct pci_dev *pdev,
@@ -2294,7 +2351,7 @@ static int __devinit tsi721_probe(struct pci_dev *pdev,
tsi721_init_pc2sr_mapping(priv);
tsi721_init_sr2pc_mapping(priv);
- if (tsi721_bdma_init(priv)) {
+ if (tsi721_bdma_maint_init(priv)) {
dev_err(&pdev->dev, "BDMA initialization failed, aborting\n");
err = -ENOMEM;
goto err_unmap_bars;
@@ -2319,7 +2376,7 @@ static int __devinit tsi721_probe(struct pci_dev *pdev,
err_free_consistent:
tsi721_doorbell_free(priv);
err_free_bdma:
- tsi721_bdma_free(priv);
+ tsi721_bdma_maint_free(priv);
err_unmap_bars:
if (priv->regs)
iounmap(priv->regs);
diff --git a/drivers/rapidio/devices/tsi721.h b/drivers/rapidio/devices/tsi721.h
index 1c226b31af13..59de9d7be346 100644
--- a/drivers/rapidio/devices/tsi721.h
+++ b/drivers/rapidio/devices/tsi721.h
@@ -167,6 +167,8 @@
#define TSI721_DEV_INTE 0x29840
#define TSI721_DEV_INT 0x29844
#define TSI721_DEV_INTSET 0x29848
+#define TSI721_DEV_INT_BDMA_CH 0x00002000
+#define TSI721_DEV_INT_BDMA_NCH 0x00001000
#define TSI721_DEV_INT_SMSG_CH 0x00000800
#define TSI721_DEV_INT_SMSG_NCH 0x00000400
#define TSI721_DEV_INT_SR2PC_CH 0x00000200
@@ -181,6 +183,8 @@
#define TSI721_INT_IMSG_CHAN(x) (1 << (16 + (x)))
#define TSI721_INT_OMSG_CHAN_M 0x0000ff00
#define TSI721_INT_OMSG_CHAN(x) (1 << (8 + (x)))
+#define TSI721_INT_BDMA_CHAN_M 0x000000ff
+#define TSI721_INT_BDMA_CHAN(x) (1 << (x))
/*
* PC2SR block registers
@@ -235,14 +239,16 @@
* x = 0..7
*/
-#define TSI721_DMAC_DWRCNT(x) (0x51000 + (x) * 0x1000)
-#define TSI721_DMAC_DRDCNT(x) (0x51004 + (x) * 0x1000)
+#define TSI721_DMAC_BASE(x) (0x51000 + (x) * 0x1000)
-#define TSI721_DMAC_CTL(x) (0x51008 + (x) * 0x1000)
+#define TSI721_DMAC_DWRCNT 0x000
+#define TSI721_DMAC_DRDCNT 0x004
+
+#define TSI721_DMAC_CTL 0x008
#define TSI721_DMAC_CTL_SUSP 0x00000002
#define TSI721_DMAC_CTL_INIT 0x00000001
-#define TSI721_DMAC_INT(x) (0x5100c + (x) * 0x1000)
+#define TSI721_DMAC_INT 0x00c
#define TSI721_DMAC_INT_STFULL 0x00000010
#define TSI721_DMAC_INT_DONE 0x00000008
#define TSI721_DMAC_INT_SUSP 0x00000004
@@ -250,34 +256,33 @@
#define TSI721_DMAC_INT_IOFDONE 0x00000001
#define TSI721_DMAC_INT_ALL 0x0000001f
-#define TSI721_DMAC_INTSET(x) (0x51010 + (x) * 0x1000)
+#define TSI721_DMAC_INTSET 0x010
-#define TSI721_DMAC_STS(x) (0x51014 + (x) * 0x1000)
+#define TSI721_DMAC_STS 0x014
#define TSI721_DMAC_STS_ABORT 0x00400000
#define TSI721_DMAC_STS_RUN 0x00200000
#define TSI721_DMAC_STS_CS 0x001f0000
-#define TSI721_DMAC_INTE(x) (0x51018 + (x) * 0x1000)
+#define TSI721_DMAC_INTE 0x018
-#define TSI721_DMAC_DPTRL(x) (0x51024 + (x) * 0x1000)
+#define TSI721_DMAC_DPTRL 0x024
#define TSI721_DMAC_DPTRL_MASK 0xffffffe0
-#define TSI721_DMAC_DPTRH(x) (0x51028 + (x) * 0x1000)
+#define TSI721_DMAC_DPTRH 0x028
-#define TSI721_DMAC_DSBL(x) (0x5102c + (x) * 0x1000)
+#define TSI721_DMAC_DSBL 0x02c
#define TSI721_DMAC_DSBL_MASK 0xffffffc0
-#define TSI721_DMAC_DSBH(x) (0x51030 + (x) * 0x1000)
+#define TSI721_DMAC_DSBH 0x030
-#define TSI721_DMAC_DSSZ(x) (0x51034 + (x) * 0x1000)
+#define TSI721_DMAC_DSSZ 0x034
#define TSI721_DMAC_DSSZ_SIZE_M 0x0000000f
#define TSI721_DMAC_DSSZ_SIZE(size) (__fls(size) - 4)
-
-#define TSI721_DMAC_DSRP(x) (0x51038 + (x) * 0x1000)
+#define TSI721_DMAC_DSRP 0x038
#define TSI721_DMAC_DSRP_MASK 0x0007ffff
-#define TSI721_DMAC_DSWP(x) (0x5103c + (x) * 0x1000)
+#define TSI721_DMAC_DSWP 0x03c
#define TSI721_DMAC_DSWP_MASK 0x0007ffff
#define TSI721_BDMA_INTE 0x5f000
@@ -612,6 +617,8 @@ enum dma_rtype {
#define TSI721_DMACH_MAINT 0 /* DMA channel for maint requests */
#define TSI721_DMACH_MAINT_NBD 32 /* Number of BDs for maint requests */
+#define TSI721_DMACH_DMA 1 /* DMA channel for data transfers */
+
#define MSG_DMA_ENTRY_INX_TO_SIZE(x) ((0x10 << (x)) & 0xFFFF0)
enum tsi721_smsg_int_flag {
@@ -626,7 +633,48 @@ enum tsi721_smsg_int_flag {
/* Structures */
+#ifdef CONFIG_RAPIDIO_DMA_ENGINE
+
+struct tsi721_tx_desc {
+ struct dma_async_tx_descriptor txd;
+ struct tsi721_dma_desc *hw_desc;
+ u16 destid;
+ /* low 64-bits of 66-bit RIO address */
+ u64 rio_addr;
+ /* upper 2-bits of 66-bit RIO address */
+ u8 rio_addr_u;
+ bool interrupt;
+ struct list_head desc_node;
+ struct list_head tx_list;
+};
+
struct tsi721_bdma_chan {
+ int id;
+ void __iomem *regs;
+ int bd_num; /* number of buffer descriptors */
+ void *bd_base; /* start of DMA descriptors */
+ dma_addr_t bd_phys;
+ void *sts_base; /* start of DMA BD status FIFO */
+ dma_addr_t sts_phys;
+ int sts_size;
+ u32 sts_rdptr;
+ u32 wr_count;
+ u32 wr_count_next;
+
+ struct dma_chan dchan;
+ struct tsi721_tx_desc *tx_desc;
+ spinlock_t lock;
+ struct list_head active_list;
+ struct list_head queue;
+ struct list_head free_list;
+ dma_cookie_t completed_cookie;
+ struct tasklet_struct tasklet;
+};
+
+#endif /* CONFIG_RAPIDIO_DMA_ENGINE */
+
+struct tsi721_bdma_maint {
+ int ch_id; /* BDMA channel number */
int bd_num; /* number of buffer descriptors */
void *bd_base; /* start of DMA descriptors */
dma_addr_t bd_phys;
@@ -721,6 +769,24 @@ enum tsi721_msix_vect {
TSI721_VECT_IMB1_INT,
TSI721_VECT_IMB2_INT,
TSI721_VECT_IMB3_INT,
+#ifdef CONFIG_RAPIDIO_DMA_ENGINE
+ TSI721_VECT_DMA0_DONE,
+ TSI721_VECT_DMA1_DONE,
+ TSI721_VECT_DMA2_DONE,
+ TSI721_VECT_DMA3_DONE,
+ TSI721_VECT_DMA4_DONE,
+ TSI721_VECT_DMA5_DONE,
+ TSI721_VECT_DMA6_DONE,
+ TSI721_VECT_DMA7_DONE,
+ TSI721_VECT_DMA0_INT,
+ TSI721_VECT_DMA1_INT,
+ TSI721_VECT_DMA2_INT,
+ TSI721_VECT_DMA3_INT,
+ TSI721_VECT_DMA4_INT,
+ TSI721_VECT_DMA5_INT,
+ TSI721_VECT_DMA6_INT,
+ TSI721_VECT_DMA7_INT,
+#endif /* CONFIG_RAPIDIO_DMA_ENGINE */
TSI721_VECT_MAX
};
@@ -754,7 +820,11 @@ struct tsi721_device {
u32 pw_discard_count;
/* BDMA Engine */
+ struct tsi721_bdma_maint mdma; /* Maintenance rd/wr request channel */
+
+#ifdef CONFIG_RAPIDIO_DMA_ENGINE
struct tsi721_bdma_chan bdma[TSI721_DMA_CHNUM];
+#endif
/* Inbound Messaging */
int imsg_init[TSI721_IMSG_CHNUM];
@@ -765,4 +835,9 @@ struct tsi721_device {
struct tsi721_omsg_ring omsg_ring[TSI721_OMSG_CHNUM];
};
+#ifdef CONFIG_RAPIDIO_DMA_ENGINE
+extern void tsi721_bdma_handler(struct tsi721_bdma_chan *bdma_chan);
+extern int __devinit tsi721_register_dma(struct tsi721_device *priv);
+#endif
+
#endif
diff --git a/drivers/rapidio/devices/tsi721_dma.c b/drivers/rapidio/devices/tsi721_dma.c
new file mode 100644
index 000000000000..92e06a5c62ec
--- /dev/null
+++ b/drivers/rapidio/devices/tsi721_dma.c
@@ -0,0 +1,823 @@
+/*
+ * DMA Engine support for Tsi721 PCIExpress-to-SRIO bridge
+ *
+ * Copyright 2011 Integrated Device Technology, Inc.
+ * Alexandre Bounine <alexandre.bounine@idt.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#include <linux/io.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/ioport.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/rio.h>
+#include <linux/rio_drv.h>
+#include <linux/dma-mapping.h>
+#include <linux/interrupt.h>
+#include <linux/kfifo.h>
+#include <linux/delay.h>
+
+#include "tsi721.h"
+
+static inline struct tsi721_bdma_chan *to_tsi721_chan(struct dma_chan *chan)
+{
+ return container_of(chan, struct tsi721_bdma_chan, dchan);
+}
+
+static inline struct tsi721_device *to_tsi721(struct dma_device *ddev)
+{
+ return container_of(ddev, struct rio_mport, dma)->priv;
+}
+
+static inline
+struct tsi721_tx_desc *to_tsi721_desc(struct dma_async_tx_descriptor *txd)
+{
+ return container_of(txd, struct tsi721_tx_desc, txd);
+}
+
+static inline
+struct tsi721_tx_desc *tsi721_dma_first_active(
+ struct tsi721_bdma_chan *bdma_chan)
+{
+ return list_first_entry(&bdma_chan->active_list,
+ struct tsi721_tx_desc, desc_node);
+}
+
+static int tsi721_bdma_ch_init(struct tsi721_bdma_chan *bdma_chan)
+{
+ struct tsi721_dma_desc *bd_ptr;
+ struct device *dev = bdma_chan->dchan.device->dev;
+ u64 *sts_ptr;
+ dma_addr_t bd_phys;
+ dma_addr_t sts_phys;
+ int sts_size;
+ int bd_num = bdma_chan->bd_num;
+
+ dev_dbg(dev, "Init Block DMA Engine, CH%d\n", bdma_chan->id);
+
+ /* Allocate space for DMA descriptors */
+ bd_ptr = dma_zalloc_coherent(dev,
+ bd_num * sizeof(struct tsi721_dma_desc),
+ &bd_phys, GFP_KERNEL);
+ if (!bd_ptr)
+ return -ENOMEM;
+
+ bdma_chan->bd_phys = bd_phys;
+ bdma_chan->bd_base = bd_ptr;
+
+ dev_dbg(dev, "DMA descriptors @ %p (phys = %llx)\n",
+ bd_ptr, (unsigned long long)bd_phys);
+
+ /* Allocate space for descriptor status FIFO */
+ sts_size = (bd_num >= TSI721_DMA_MINSTSSZ) ?
+ bd_num : TSI721_DMA_MINSTSSZ;
+ sts_size = roundup_pow_of_two(sts_size);
+ sts_ptr = dma_zalloc_coherent(dev,
+ sts_size * sizeof(struct tsi721_dma_sts),
+ &sts_phys, GFP_KERNEL);
+ if (!sts_ptr) {
+ /* Free space allocated for DMA descriptors */
+ dma_free_coherent(dev,
+ bd_num * sizeof(struct tsi721_dma_desc),
+ bd_ptr, bd_phys);
+ bdma_chan->bd_base = NULL;
+ return -ENOMEM;
+ }
+
+ bdma_chan->sts_phys = sts_phys;
+ bdma_chan->sts_base = sts_ptr;
+ bdma_chan->sts_size = sts_size;
+
+ dev_dbg(dev,
+ "desc status FIFO @ %p (phys = %llx) size=0x%x\n",
+ sts_ptr, (unsigned long long)sts_phys, sts_size);
+
+ /* Initialize DMA descriptors ring */
+ bd_ptr[bd_num - 1].type_id = cpu_to_le32(DTYPE3 << 29);
+ bd_ptr[bd_num - 1].next_lo = cpu_to_le32((u64)bd_phys &
+ TSI721_DMAC_DPTRL_MASK);
+ bd_ptr[bd_num - 1].next_hi = cpu_to_le32((u64)bd_phys >> 32);
+
+ /* Setup DMA descriptor pointers */
+ iowrite32(((u64)bd_phys >> 32),
+ bdma_chan->regs + TSI721_DMAC_DPTRH);
+ iowrite32(((u64)bd_phys & TSI721_DMAC_DPTRL_MASK),
+ bdma_chan->regs + TSI721_DMAC_DPTRL);
+
+ /* Setup descriptor status FIFO */
+ iowrite32(((u64)sts_phys >> 32),
+ bdma_chan->regs + TSI721_DMAC_DSBH);
+ iowrite32(((u64)sts_phys & TSI721_DMAC_DSBL_MASK),
+ bdma_chan->regs + TSI721_DMAC_DSBL);
+ iowrite32(TSI721_DMAC_DSSZ_SIZE(sts_size),
+ bdma_chan->regs + TSI721_DMAC_DSSZ);
+
+ /* Clear interrupt bits */
+ iowrite32(TSI721_DMAC_INT_ALL,
+ bdma_chan->regs + TSI721_DMAC_INT);
+
+ ioread32(bdma_chan->regs + TSI721_DMAC_INT);
+
+ /* Toggle DMA channel initialization */
+ iowrite32(TSI721_DMAC_CTL_INIT, bdma_chan->regs + TSI721_DMAC_CTL);
+ ioread32(bdma_chan->regs + TSI721_DMAC_CTL);
+ bdma_chan->wr_count = bdma_chan->wr_count_next = 0;
+ bdma_chan->sts_rdptr = 0;
+ udelay(10);
+
+ return 0;
+}
+
+static int tsi721_bdma_ch_free(struct tsi721_bdma_chan *bdma_chan)
+{
+ u32 ch_stat;
+
+ if (bdma_chan->bd_base == NULL)
+ return 0;
+
+ /* Check if DMA channel still running */
+ ch_stat = ioread32(bdma_chan->regs + TSI721_DMAC_STS);
+ if (ch_stat & TSI721_DMAC_STS_RUN)
+ return -EFAULT;
+
+ /* Put DMA channel into init state */
+ iowrite32(TSI721_DMAC_CTL_INIT, bdma_chan->regs + TSI721_DMAC_CTL);
+
+ /* Free space allocated for DMA descriptors */
+ dma_free_coherent(bdma_chan->dchan.device->dev,
+ bdma_chan->bd_num * sizeof(struct tsi721_dma_desc),
+ bdma_chan->bd_base, bdma_chan->bd_phys);
+ bdma_chan->bd_base = NULL;
+
+ /* Free space allocated for status FIFO */
+ dma_free_coherent(bdma_chan->dchan.device->dev,
+ bdma_chan->sts_size * sizeof(struct tsi721_dma_sts),
+ bdma_chan->sts_base, bdma_chan->sts_phys);
+ bdma_chan->sts_base = NULL;
+ return 0;
+}
+
+static void
+tsi721_bdma_interrupt_enable(struct tsi721_bdma_chan *bdma_chan, int enable)
+{
+ if (enable) {
+ /* Clear pending BDMA channel interrupts */
+ iowrite32(TSI721_DMAC_INT_ALL,
+ bdma_chan->regs + TSI721_DMAC_INT);
+ ioread32(bdma_chan->regs + TSI721_DMAC_INT);
+ /* Enable BDMA channel interrupts */
+ iowrite32(TSI721_DMAC_INT_ALL,
+ bdma_chan->regs + TSI721_DMAC_INTE);
+ } else {
+ /* Disable BDMA channel interrupts */
+ iowrite32(0, bdma_chan->regs + TSI721_DMAC_INTE);
+ /* Clear pending BDMA channel interrupts */
+ iowrite32(TSI721_DMAC_INT_ALL,
+ bdma_chan->regs + TSI721_DMAC_INT);
+ }
+
+}
+
+static bool tsi721_dma_is_idle(struct tsi721_bdma_chan *bdma_chan)
+{
+ u32 sts;
+
+ sts = ioread32(bdma_chan->regs + TSI721_DMAC_STS);
+ return ((sts & TSI721_DMAC_STS_RUN) == 0);
+}
+
+void tsi721_bdma_handler(struct tsi721_bdma_chan *bdma_chan)
+{
+ /* Disable BDMA channel interrupts */
+ iowrite32(0, bdma_chan->regs + TSI721_DMAC_INTE);
+
+ tasklet_schedule(&bdma_chan->tasklet);
+}
+
+#ifdef CONFIG_PCI_MSI
+/**
+ * tsi721_omsg_msix - MSI-X interrupt handler for BDMA channels
+ * @irq: Linux interrupt number
+ * @ptr: Pointer to interrupt-specific data (BDMA channel structure)
+ *
+ * Handles BDMA channel interrupts signaled using MSI-X.
+ */
+static irqreturn_t tsi721_bdma_msix(int irq, void *ptr)
+{
+ struct tsi721_bdma_chan *bdma_chan = ptr;
+
+ tsi721_bdma_handler(bdma_chan);
+ return IRQ_HANDLED;
+}
+#endif /* CONFIG_PCI_MSI */
+
+/* Must be called with the spinlock held */
+static void tsi721_start_dma(struct tsi721_bdma_chan *bdma_chan)
+{
+ if (!tsi721_dma_is_idle(bdma_chan)) {
+ dev_err(bdma_chan->dchan.device->dev,
+ "BUG: Attempt to start non-idle channel\n");
+ return;
+ }
+
+ if (bdma_chan->wr_count == bdma_chan->wr_count_next) {
+ dev_err(bdma_chan->dchan.device->dev,
+ "BUG: Attempt to start DMA with no BDs ready\n");
+ return;
+ }
+
+ dev_dbg(bdma_chan->dchan.device->dev,
+ "tx_chan: %p, chan: %d, regs: %p\n",
+ bdma_chan, bdma_chan->dchan.chan_id, bdma_chan->regs);
+
+ iowrite32(bdma_chan->wr_count_next,
+ bdma_chan->regs + TSI721_DMAC_DWRCNT);
+ ioread32(bdma_chan->regs + TSI721_DMAC_DWRCNT);
+
+ bdma_chan->wr_count = bdma_chan->wr_count_next;
+}
+
+static void tsi721_desc_put(struct tsi721_bdma_chan *bdma_chan,
+ struct tsi721_tx_desc *desc)
+{
+ dev_dbg(bdma_chan->dchan.device->dev,
+ "Put desc: %p into free list\n", desc);
+
+ if (desc) {
+ spin_lock_bh(&bdma_chan->lock);
+ list_splice_init(&desc->tx_list, &bdma_chan->free_list);
+ list_add(&desc->desc_node, &bdma_chan->free_list);
+ bdma_chan->wr_count_next = bdma_chan->wr_count;
+ spin_unlock_bh(&bdma_chan->lock);
+ }
+}
+
+static
+struct tsi721_tx_desc *tsi721_desc_get(struct tsi721_bdma_chan *bdma_chan)
+{
+ struct tsi721_tx_desc *tx_desc, *_tx_desc;
+ struct tsi721_tx_desc *ret = NULL;
+ int i;
+
+ spin_lock_bh(&bdma_chan->lock);
+ list_for_each_entry_safe(tx_desc, _tx_desc,
+ &bdma_chan->free_list, desc_node) {
+ if (async_tx_test_ack(&tx_desc->txd)) {
+ list_del(&tx_desc->desc_node);
+ ret = tx_desc;
+ break;
+ }
+ dev_dbg(bdma_chan->dchan.device->dev,
+ "desc %p not ACKed\n", tx_desc);
+ }
+
+ i = bdma_chan->wr_count_next % bdma_chan->bd_num;
+ if (i == bdma_chan->bd_num - 1) {
+ i = 0;
+ bdma_chan->wr_count_next++; /* skip link descriptor */
+ }
+
+ bdma_chan->wr_count_next++;
+ tx_desc->txd.phys = bdma_chan->bd_phys +
+ i * sizeof(struct tsi721_dma_desc);
+ tx_desc->hw_desc = &((struct tsi721_dma_desc *)bdma_chan->bd_base)[i];
+
+ spin_unlock_bh(&bdma_chan->lock);
+
+ return ret;
+}
+
+static int
+tsi721_fill_desc(struct tsi721_bdma_chan *bdma_chan,
+ struct tsi721_tx_desc *desc, struct scatterlist *sg,
+ enum dma_rtype rtype, u32 sys_size)
+{
+ struct tsi721_dma_desc *bd_ptr = desc->hw_desc;
+ u64 rio_addr;
+
+ if (sg_dma_len(sg) > TSI721_DMAD_BCOUNT1 + 1) {
+ dev_err(bdma_chan->dchan.device->dev,
+ "SG element is too large\n");
+ return -EINVAL;
+ }
+
+ dev_dbg(bdma_chan->dchan.device->dev,
+ "desc: 0x%llx, addr: 0x%llx len: 0x%x\n",
+ (u64)desc->txd.phys, (unsigned long long)sg_dma_address(sg),
+ sg_dma_len(sg));
+
+ dev_dbg(bdma_chan->dchan.device->dev,
+ "bd_ptr = %p did=%d raddr=0x%llx\n",
+ bd_ptr, desc->destid, desc->rio_addr);
+
+ /* Initialize DMA descriptor */
+ bd_ptr->type_id = cpu_to_le32((DTYPE1 << 29) |
+ (rtype << 19) | desc->destid);
+ if (desc->interrupt)
+ bd_ptr->type_id |= cpu_to_le32(TSI721_DMAD_IOF);
+ bd_ptr->bcount = cpu_to_le32(((desc->rio_addr & 0x3) << 30) |
+ (sys_size << 26) | sg_dma_len(sg));
+ rio_addr = (desc->rio_addr >> 2) |
+ ((u64)(desc->rio_addr_u & 0x3) << 62);
+ bd_ptr->raddr_lo = cpu_to_le32(rio_addr & 0xffffffff);
+ bd_ptr->raddr_hi = cpu_to_le32(rio_addr >> 32);
+ bd_ptr->t1.bufptr_lo = cpu_to_le32(
+ (u64)sg_dma_address(sg) & 0xffffffff);
+ bd_ptr->t1.bufptr_hi = cpu_to_le32((u64)sg_dma_address(sg) >> 32);
+ bd_ptr->t1.s_dist = 0;
+ bd_ptr->t1.s_size = 0;
+
+ return 0;
+}
+
+static void tsi721_dma_chain_complete(struct tsi721_bdma_chan *bdma_chan,
+ struct tsi721_tx_desc *desc)
+{
+ struct dma_async_tx_descriptor *txd = &desc->txd;
+ dma_async_tx_callback callback = txd->callback;
+ void *param = txd->callback_param;
+
+ list_splice_init(&desc->tx_list, &bdma_chan->free_list);
+ list_move(&desc->desc_node, &bdma_chan->free_list);
+ bdma_chan->completed_cookie = txd->cookie;
+
+ if (callback)
+ callback(param);
+}
+
+static void tsi721_dma_complete_all(struct tsi721_bdma_chan *bdma_chan)
+{
+ struct tsi721_tx_desc *desc, *_d;
+ LIST_HEAD(list);
+
+ BUG_ON(!tsi721_dma_is_idle(bdma_chan));
+
+ if (!list_empty(&bdma_chan->queue))
+ tsi721_start_dma(bdma_chan);
+
+ list_splice_init(&bdma_chan->active_list, &list);
+ list_splice_init(&bdma_chan->queue, &bdma_chan->active_list);
+
+ list_for_each_entry_safe(desc, _d, &list, desc_node)
+ tsi721_dma_chain_complete(bdma_chan, desc);
+}
+
+static void tsi721_clr_stat(struct tsi721_bdma_chan *bdma_chan)
+{
+ u32 srd_ptr;
+ u64 *sts_ptr;
+ int i, j;
+
+ /* Check and clear descriptor status FIFO entries */
+ srd_ptr = bdma_chan->sts_rdptr;
+ sts_ptr = bdma_chan->sts_base;
+ j = srd_ptr * 8;
+ while (sts_ptr[j]) {
+ for (i = 0; i < 8 && sts_ptr[j]; i++, j++)
+ sts_ptr[j] = 0;
+
+ ++srd_ptr;
+ srd_ptr %= bdma_chan->sts_size;
+ j = srd_ptr * 8;
+ }
+
+ iowrite32(srd_ptr, bdma_chan->regs + TSI721_DMAC_DSRP);
+ bdma_chan->sts_rdptr = srd_ptr;
+}
+
+static void tsi721_advance_work(struct tsi721_bdma_chan *bdma_chan)
+{
+ if (list_empty(&bdma_chan->active_list) ||
+ list_is_singular(&bdma_chan->active_list)) {
+ dev_dbg(bdma_chan->dchan.device->dev,
+ "%s: Active_list empty\n", __func__);
+ tsi721_dma_complete_all(bdma_chan);
+ } else {
+ dev_dbg(bdma_chan->dchan.device->dev,
+ "%s: Active_list NOT empty\n", __func__);
+ tsi721_dma_chain_complete(bdma_chan,
+ tsi721_dma_first_active(bdma_chan));
+ tsi721_start_dma(bdma_chan);
+ }
+}
+
+static void tsi721_dma_tasklet(unsigned long data)
+{
+ struct tsi721_bdma_chan *bdma_chan = (struct tsi721_bdma_chan *)data;
+ u32 dmac_int, dmac_sts;
+
+ dmac_int = ioread32(bdma_chan->regs + TSI721_DMAC_INT);
+ dev_dbg(bdma_chan->dchan.device->dev, "%s: DMAC%d_INT = 0x%x\n",
+ __func__, bdma_chan->id, dmac_int);
+ /* Clear channel interrupts */
+ iowrite32(dmac_int, bdma_chan->regs + TSI721_DMAC_INT);
+
+ if (dmac_int & TSI721_DMAC_INT_ERR) {
+ dmac_sts = ioread32(bdma_chan->regs + TSI721_DMAC_STS);
+ dev_err(bdma_chan->dchan.device->dev,
+ "%s: DMA ERROR - DMAC%d_STS = 0x%x\n",
+ __func__, bdma_chan->id, dmac_sts);
+ }
+
+ if (dmac_int & TSI721_DMAC_INT_STFULL) {
+ dev_err(bdma_chan->dchan.device->dev,
+ "%s: DMAC%d descriptor status FIFO is full\n",
+ __func__, bdma_chan->id);
+ }
+
+ if (dmac_int & (TSI721_DMAC_INT_DONE | TSI721_DMAC_INT_IOFDONE)) {
+ tsi721_clr_stat(bdma_chan);
+ spin_lock(&bdma_chan->lock);
+ tsi721_advance_work(bdma_chan);
+ spin_unlock(&bdma_chan->lock);
+ }
+
+ /* Re-Enable BDMA channel interrupts */
+ iowrite32(TSI721_DMAC_INT_ALL, bdma_chan->regs + TSI721_DMAC_INTE);
+}
+
+static dma_cookie_t tsi721_tx_submit(struct dma_async_tx_descriptor *txd)
+{
+ struct tsi721_tx_desc *desc = to_tsi721_desc(txd);
+ struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(txd->chan);
+ dma_cookie_t cookie;
+
+ spin_lock_bh(&bdma_chan->lock);
+
+ cookie = txd->chan->cookie;
+ if (++cookie < 0)
+ cookie = 1;
+ txd->chan->cookie = cookie;
+ txd->cookie = cookie;
+
+ if (list_empty(&bdma_chan->active_list)) {
+ list_add_tail(&desc->desc_node, &bdma_chan->active_list);
+ tsi721_start_dma(bdma_chan);
+ } else {
+ list_add_tail(&desc->desc_node, &bdma_chan->queue);
+ }
+
+ spin_unlock_bh(&bdma_chan->lock);
+ return cookie;
+}
+
+static int tsi721_alloc_chan_resources(struct dma_chan *dchan)
+{
+ struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(dchan);
+#ifdef CONFIG_PCI_MSI
+ struct tsi721_device *priv = to_tsi721(dchan->device);
+#endif
+ struct tsi721_tx_desc *desc = NULL;
+ LIST_HEAD(tmp_list);
+ int i;
+ int rc;
+
+ if (bdma_chan->bd_base)
+ return bdma_chan->bd_num - 1;
+
+ /* Initialize BDMA channel */
+ if (tsi721_bdma_ch_init(bdma_chan)) {
+ dev_err(dchan->device->dev, "Unable to initialize data DMA"
+ " channel %d, aborting\n", bdma_chan->id);
+ return -ENOMEM;
+ }
+
+ /* Alocate matching number of logical descriptors */
+ desc = kcalloc((bdma_chan->bd_num - 1), sizeof(struct tsi721_tx_desc),
+ GFP_KERNEL);
+ if (!desc) {
+ dev_err(dchan->device->dev,
+ "Failed to allocate logical descriptors\n");
+ rc = -ENOMEM;
+ goto err_out;
+ }
+
+ bdma_chan->tx_desc = desc;
+
+ for (i = 0; i < bdma_chan->bd_num - 1; i++) {
+ dma_async_tx_descriptor_init(&desc[i].txd, dchan);
+ desc[i].txd.tx_submit = tsi721_tx_submit;
+ desc[i].txd.flags = DMA_CTRL_ACK;
+ INIT_LIST_HEAD(&desc[i].tx_list);
+ list_add_tail(&desc[i].desc_node, &tmp_list);
+ }
+
+ spin_lock_bh(&bdma_chan->lock);
+ list_splice(&tmp_list, &bdma_chan->free_list);
+ bdma_chan->completed_cookie = dchan->cookie = 1;
+ spin_unlock_bh(&bdma_chan->lock);
+
+#ifdef CONFIG_PCI_MSI
+ if (priv->flags & TSI721_USING_MSIX) {
+ /* Request interrupt service if we are in MSI-X mode */
+ rc = request_irq(
+ priv->msix[TSI721_VECT_DMA0_DONE +
+ bdma_chan->id].vector,
+ tsi721_bdma_msix, 0,
+ priv->msix[TSI721_VECT_DMA0_DONE +
+ bdma_chan->id].irq_name,
+ (void *)bdma_chan);
+
+ if (rc) {
+ dev_dbg(dchan->device->dev,
+ "Unable to allocate MSI-X interrupt for "
+ "BDMA%d-DONE\n", bdma_chan->id);
+ goto err_out;
+ }
+
+ rc = request_irq(priv->msix[TSI721_VECT_DMA0_INT +
+ bdma_chan->id].vector,
+ tsi721_bdma_msix, 0,
+ priv->msix[TSI721_VECT_DMA0_INT +
+ bdma_chan->id].irq_name,
+ (void *)bdma_chan);
+
+ if (rc) {
+ dev_dbg(dchan->device->dev,
+ "Unable to allocate MSI-X interrupt for "
+ "BDMA%d-INT\n", bdma_chan->id);
+ free_irq(
+ priv->msix[TSI721_VECT_DMA0_DONE +
+ bdma_chan->id].vector,
+ (void *)bdma_chan);
+ rc = -EIO;
+ goto err_out;
+ }
+ }
+#endif /* CONFIG_PCI_MSI */
+
+ tasklet_enable(&bdma_chan->tasklet);
+ tsi721_bdma_interrupt_enable(bdma_chan, 1);
+
+ return bdma_chan->bd_num - 1;
+
+err_out:
+ kfree(desc);
+ tsi721_bdma_ch_free(bdma_chan);
+ return rc;
+}
+
+static void tsi721_free_chan_resources(struct dma_chan *dchan)
+{
+ struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(dchan);
+#ifdef CONFIG_PCI_MSI
+ struct tsi721_device *priv = to_tsi721(dchan->device);
+#endif
+ LIST_HEAD(list);
+
+ dev_dbg(dchan->device->dev, "%s: Entry\n", __func__);
+
+ if (bdma_chan->bd_base == NULL)
+ return;
+
+ BUG_ON(!list_empty(&bdma_chan->active_list));
+ BUG_ON(!list_empty(&bdma_chan->queue));
+
+ tasklet_disable(&bdma_chan->tasklet);
+
+ spin_lock_bh(&bdma_chan->lock);
+ list_splice_init(&bdma_chan->free_list, &list);
+ spin_unlock_bh(&bdma_chan->lock);
+
+ tsi721_bdma_interrupt_enable(bdma_chan, 0);
+
+#ifdef CONFIG_PCI_MSI
+ if (priv->flags & TSI721_USING_MSIX) {
+ free_irq(priv->msix[TSI721_VECT_DMA0_DONE +
+ bdma_chan->id].vector, (void *)bdma_chan);
+ free_irq(priv->msix[TSI721_VECT_DMA0_INT +
+ bdma_chan->id].vector, (void *)bdma_chan);
+ }
+#endif /* CONFIG_PCI_MSI */
+
+ tsi721_bdma_ch_free(bdma_chan);
+ kfree(bdma_chan->tx_desc);
+}
+
+static
+enum dma_status tsi721_tx_status(struct dma_chan *dchan, dma_cookie_t cookie,
+ struct dma_tx_state *txstate)
+{
+ struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(dchan);
+ dma_cookie_t last_used;
+ dma_cookie_t last_completed;
+ int ret;
+
+ spin_lock_bh(&bdma_chan->lock);
+ last_completed = bdma_chan->completed_cookie;
+ last_used = dchan->cookie;
+ spin_unlock_bh(&bdma_chan->lock);
+
+ ret = dma_async_is_complete(cookie, last_completed, last_used);
+
+ dma_set_tx_state(txstate, last_completed, last_used, 0);
+
+ dev_dbg(dchan->device->dev,
+ "%s: exit, ret: %d, last_completed: %d, last_used: %d\n",
+ __func__, ret, last_completed, last_used);
+
+ return ret;
+}
+
+static void tsi721_issue_pending(struct dma_chan *dchan)
+{
+ struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(dchan);
+
+ dev_dbg(dchan->device->dev, "%s: Entry\n", __func__);
+
+ if (tsi721_dma_is_idle(bdma_chan)) {
+ spin_lock_bh(&bdma_chan->lock);
+ tsi721_advance_work(bdma_chan);
+ spin_unlock_bh(&bdma_chan->lock);
+ } else
+ dev_dbg(dchan->device->dev,
+ "%s: DMA channel still busy\n", __func__);
+}
+
+static
+struct dma_async_tx_descriptor *tsi721_prep_rio_sg(struct dma_chan *dchan,
+ struct scatterlist *sgl, unsigned int sg_len,
+ enum dma_transfer_direction dir, unsigned long flags,
+ void *tinfo)
+{
+ struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(dchan);
+ struct tsi721_tx_desc *desc = NULL;
+ struct tsi721_tx_desc *first = NULL;
+ struct scatterlist *sg;
+ struct rio_dma_ext *rext = tinfo;
+ u64 rio_addr = rext->rio_addr; /* limited to 64-bit rio_addr for now */
+ unsigned int i;
+ u32 sys_size = dma_to_mport(dchan->device)->sys_size;
+ enum dma_rtype rtype;
+
+ if (!sgl || !sg_len) {
+ dev_err(dchan->device->dev, "%s: No SG list\n", __func__);
+ return NULL;
+ }
+
+ if (dir == DMA_DEV_TO_MEM)
+ rtype = NREAD;
+ else if (dir == DMA_MEM_TO_DEV) {
+ switch (rext->wr_type) {
+ case RDW_ALL_NWRITE:
+ rtype = ALL_NWRITE;
+ break;
+ case RDW_ALL_NWRITE_R:
+ rtype = ALL_NWRITE_R;
+ break;
+ case RDW_LAST_NWRITE_R:
+ default:
+ rtype = LAST_NWRITE_R;
+ break;
+ }
+ } else {
+ dev_err(dchan->device->dev,
+ "%s: Unsupported DMA direction option\n", __func__);
+ return NULL;
+ }
+
+ for_each_sg(sgl, sg, sg_len, i) {
+ int err;
+
+ dev_dbg(dchan->device->dev, "%s: sg #%d\n", __func__, i);
+ desc = tsi721_desc_get(bdma_chan);
+ if (!desc) {
+ dev_err(dchan->device->dev,
+ "Not enough descriptors available\n");
+ goto err_desc_get;
+ }
+
+ if (sg_is_last(sg))
+ desc->interrupt = (flags & DMA_PREP_INTERRUPT) != 0;
+ else
+ desc->interrupt = false;
+
+ desc->destid = rext->destid;
+ desc->rio_addr = rio_addr;
+ desc->rio_addr_u = 0;
+
+ err = tsi721_fill_desc(bdma_chan, desc, sg, rtype, sys_size);
+ if (err) {
+ dev_err(dchan->device->dev,
+ "Failed to build desc: %d\n", err);
+ goto err_desc_get;
+ }
+
+ rio_addr += sg_dma_len(sg);
+
+ if (!first)
+ first = desc;
+ else
+ list_add_tail(&desc->desc_node, &first->tx_list);
+ }
+
+ first->txd.cookie = -EBUSY;
+ desc->txd.flags = flags;
+
+ return &first->txd;
+
+err_desc_get:
+ tsi721_desc_put(bdma_chan, first);
+ return NULL;
+}
+
+static int tsi721_device_control(struct dma_chan *dchan, enum dma_ctrl_cmd cmd,
+ unsigned long arg)
+{
+ struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(dchan);
+ struct tsi721_tx_desc *desc, *_d;
+ LIST_HEAD(list);
+
+ dev_dbg(dchan->device->dev, "%s: Entry\n", __func__);
+
+ if (cmd != DMA_TERMINATE_ALL)
+ return -ENXIO;
+
+ spin_lock_bh(&bdma_chan->lock);
+
+ /* make sure to stop the transfer */
+ iowrite32(TSI721_DMAC_CTL_SUSP, bdma_chan->regs + TSI721_DMAC_CTL);
+
+ list_splice_init(&bdma_chan->active_list, &list);
+ list_splice_init(&bdma_chan->queue, &list);
+
+ list_for_each_entry_safe(desc, _d, &list, desc_node)
+ tsi721_dma_chain_complete(bdma_chan, desc);
+
+ spin_unlock_bh(&bdma_chan->lock);
+
+ return 0;
+}
+
+int __devinit tsi721_register_dma(struct tsi721_device *priv)
+{
+ int i;
+ int nr_channels = TSI721_DMA_MAXCH;
+ int err;
+ struct rio_mport *mport = priv->mport;
+
+ mport->dma.dev = &priv->pdev->dev;
+ mport->dma.chancnt = nr_channels;
+
+ INIT_LIST_HEAD(&mport->dma.channels);
+
+ for (i = 0; i < nr_channels; i++) {
+ struct tsi721_bdma_chan *bdma_chan = &priv->bdma[i];
+
+ if (i == TSI721_DMACH_MAINT)
+ continue;
+
+ bdma_chan->bd_num = 64;
+ bdma_chan->regs = priv->regs + TSI721_DMAC_BASE(i);
+
+ bdma_chan->dchan.device = &mport->dma;
+ bdma_chan->dchan.cookie = 1;
+ bdma_chan->dchan.chan_id = i;
+ bdma_chan->id = i;
+
+ spin_lock_init(&bdma_chan->lock);
+
+ INIT_LIST_HEAD(&bdma_chan->active_list);
+ INIT_LIST_HEAD(&bdma_chan->queue);
+ INIT_LIST_HEAD(&bdma_chan->free_list);
+
+ tasklet_init(&bdma_chan->tasklet, tsi721_dma_tasklet,
+ (unsigned long)bdma_chan);
+ tasklet_disable(&bdma_chan->tasklet);
+ list_add_tail(&bdma_chan->dchan.device_node,
+ &mport->dma.channels);
+ }
+
+ dma_cap_zero(mport->dma.cap_mask);
+ dma_cap_set(DMA_PRIVATE, mport->dma.cap_mask);
+ dma_cap_set(DMA_SLAVE, mport->dma.cap_mask);
+
+ mport->dma.device_alloc_chan_resources = tsi721_alloc_chan_resources;
+ mport->dma.device_free_chan_resources = tsi721_free_chan_resources;
+ mport->dma.device_tx_status = tsi721_tx_status;
+ mport->dma.device_issue_pending = tsi721_issue_pending;
+ mport->dma.device_prep_slave_sg = tsi721_prep_rio_sg;
+ mport->dma.device_control = tsi721_device_control;
+
+ err = dma_async_device_register(&mport->dma);
+ if (err)
+ dev_err(&priv->pdev->dev, "Failed to register DMA device\n");
+
+ return err;
+}
diff --git a/drivers/rapidio/rio.c b/drivers/rapidio/rio.c
index 86c9a091a2ff..c40665a4fa33 100644
--- a/drivers/rapidio/rio.c
+++ b/drivers/rapidio/rio.c
@@ -1121,6 +1121,87 @@ int rio_std_route_clr_table(struct rio_mport *mport, u16 destid, u8 hopcount,
return 0;
}
+#ifdef CONFIG_RAPIDIO_DMA_ENGINE
+
+static bool rio_chan_filter(struct dma_chan *chan, void *arg)
+{
+ struct rio_dev *rdev = arg;
+
+ /* Check that DMA device belongs to the right MPORT */
+ return (rdev->net->hport ==
+ container_of(chan->device, struct rio_mport, dma));
+}
+
+/**
+ * rio_request_dma - request RapidIO capable DMA channel that supports
+ * specified target RapidIO device.
+ * @rdev: RIO device control structure
+ *
+ * Returns pointer to allocated DMA channel or NULL if failed.
+ */
+struct dma_chan *rio_request_dma(struct rio_dev *rdev)
+{
+ dma_cap_mask_t mask;
+ struct dma_chan *dchan;
+
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_SLAVE, mask);
+ dchan = dma_request_channel(mask, rio_chan_filter, rdev);
+
+ return dchan;
+}
+EXPORT_SYMBOL_GPL(rio_request_dma);
+
+/**
+ * rio_release_dma - release specified DMA channel
+ * @dchan: DMA channel to release
+ */
+void rio_release_dma(struct dma_chan *dchan)
+{
+ dma_release_channel(dchan);
+}
+EXPORT_SYMBOL_GPL(rio_release_dma);
+
+/**
+ * rio_dma_prep_slave_sg - RapidIO specific wrapper
+ * for device_prep_slave_sg callback defined by DMAENGINE.
+ * @rdev: RIO device control structure
+ * @dchan: DMA channel to configure
+ * @data: RIO specific data descriptor
+ * @direction: DMA data transfer direction (TO or FROM the device)
+ * @flags: dmaengine defined flags
+ *
+ * Initializes RapidIO capable DMA channel for the specified data transfer.
+ * Uses DMA channel private extension to pass information related to remote
+ * target RIO device.
+ * Returns pointer to DMA transaction descriptor or NULL if failed.
+ */
+struct dma_async_tx_descriptor *rio_dma_prep_slave_sg(struct rio_dev *rdev,
+ struct dma_chan *dchan, struct rio_dma_data *data,
+ enum dma_transfer_direction direction, unsigned long flags)
+{
+ struct dma_async_tx_descriptor *txd = NULL;
+ struct rio_dma_ext rio_ext;
+
+ if (dchan->device->device_prep_slave_sg == NULL) {
+ pr_err("%s: prep_rio_sg == NULL\n", __func__);
+ return NULL;
+ }
+
+ rio_ext.destid = rdev->destid;
+ rio_ext.rio_addr_u = data->rio_addr_u;
+ rio_ext.rio_addr = data->rio_addr;
+ rio_ext.wr_type = data->wr_type;
+
+ txd = dmaengine_prep_rio_sg(dchan, data->sg, data->sg_len,
+ direction, flags, &rio_ext);
+
+ return txd;
+}
+EXPORT_SYMBOL_GPL(rio_dma_prep_slave_sg);
+
+#endif /* CONFIG_RAPIDIO_DMA_ENGINE */
+
static void rio_fixup_device(struct rio_dev *dev)
{
}