summaryrefslogtreecommitdiff
path: root/drivers/dma
diff options
context:
space:
mode:
authorRob Herring <rob.herring@calxeda.com>2013-11-07 10:34:46 -0600
committerRob Herring <rob.herring@calxeda.com>2013-11-07 10:34:46 -0600
commitb5480950c6cbb7b07ab1c1a5af0dc661a1cb6f24 (patch)
treeb5fcb00387a838beb2bcf2f8ed2fd3d6d460c8ae /drivers/dma
parente363bbac316ffb5daaf45d855f82680148cafe20 (diff)
parent355e62f5ad12b005c862838156262eb2df2f8dff (diff)
Merge remote-tracking branch 'grant/devicetree/next' into for-next
Diffstat (limited to 'drivers/dma')
-rw-r--r--drivers/dma/Kconfig1
-rw-r--r--drivers/dma/edma.c3
-rw-r--r--drivers/dma/imx-dma.c31
-rw-r--r--drivers/dma/sh/rcar-hpbdma.c9
4 files changed, 23 insertions, 21 deletions
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index 526ec77c7ba0..f238cfd33847 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -198,6 +198,7 @@ config TI_EDMA
depends on ARCH_DAVINCI || ARCH_OMAP
select DMA_ENGINE
select DMA_VIRTUAL_CHANNELS
+ select TI_PRIV_EDMA
default n
help
Enable support for the TI EDMA controller. This DMA
diff --git a/drivers/dma/edma.c b/drivers/dma/edma.c
index ff50ff4c6a57..3519111c566b 100644
--- a/drivers/dma/edma.c
+++ b/drivers/dma/edma.c
@@ -306,6 +306,7 @@ static struct dma_async_tx_descriptor *edma_prep_slave_sg(
EDMA_SLOT_ANY);
if (echan->slot[i] < 0) {
dev_err(dev, "Failed to allocate slot\n");
+ kfree(edesc);
return NULL;
}
}
@@ -749,6 +750,6 @@ static void __exit edma_exit(void)
}
module_exit(edma_exit);
-MODULE_AUTHOR("Matt Porter <mporter@ti.com>");
+MODULE_AUTHOR("Matt Porter <matt.porter@linaro.org>");
MODULE_DESCRIPTION("TI EDMA DMA engine driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/dma/imx-dma.c b/drivers/dma/imx-dma.c
index 78f8ca5fccee..55852c026791 100644
--- a/drivers/dma/imx-dma.c
+++ b/drivers/dma/imx-dma.c
@@ -437,17 +437,18 @@ static void dma_irq_handle_channel(struct imxdma_channel *imxdmac)
struct imxdma_engine *imxdma = imxdmac->imxdma;
int chno = imxdmac->channel;
struct imxdma_desc *desc;
+ unsigned long flags;
- spin_lock(&imxdma->lock);
+ spin_lock_irqsave(&imxdma->lock, flags);
if (list_empty(&imxdmac->ld_active)) {
- spin_unlock(&imxdma->lock);
+ spin_unlock_irqrestore(&imxdma->lock, flags);
goto out;
}
desc = list_first_entry(&imxdmac->ld_active,
struct imxdma_desc,
node);
- spin_unlock(&imxdma->lock);
+ spin_unlock_irqrestore(&imxdma->lock, flags);
if (desc->sg) {
u32 tmp;
@@ -519,7 +520,6 @@ static int imxdma_xfer_desc(struct imxdma_desc *d)
{
struct imxdma_channel *imxdmac = to_imxdma_chan(d->desc.chan);
struct imxdma_engine *imxdma = imxdmac->imxdma;
- unsigned long flags;
int slot = -1;
int i;
@@ -527,7 +527,6 @@ static int imxdma_xfer_desc(struct imxdma_desc *d)
switch (d->type) {
case IMXDMA_DESC_INTERLEAVED:
/* Try to get a free 2D slot */
- spin_lock_irqsave(&imxdma->lock, flags);
for (i = 0; i < IMX_DMA_2D_SLOTS; i++) {
if ((imxdma->slots_2d[i].count > 0) &&
((imxdma->slots_2d[i].xsr != d->x) ||
@@ -537,10 +536,8 @@ static int imxdma_xfer_desc(struct imxdma_desc *d)
slot = i;
break;
}
- if (slot < 0) {
- spin_unlock_irqrestore(&imxdma->lock, flags);
+ if (slot < 0)
return -EBUSY;
- }
imxdma->slots_2d[slot].xsr = d->x;
imxdma->slots_2d[slot].ysr = d->y;
@@ -549,7 +546,6 @@ static int imxdma_xfer_desc(struct imxdma_desc *d)
imxdmac->slot_2d = slot;
imxdmac->enabled_2d = true;
- spin_unlock_irqrestore(&imxdma->lock, flags);
if (slot == IMX_DMA_2D_SLOT_A) {
d->config_mem &= ~CCR_MSEL_B;
@@ -625,18 +621,17 @@ static void imxdma_tasklet(unsigned long data)
struct imxdma_channel *imxdmac = (void *)data;
struct imxdma_engine *imxdma = imxdmac->imxdma;
struct imxdma_desc *desc;
+ unsigned long flags;
- spin_lock(&imxdma->lock);
+ spin_lock_irqsave(&imxdma->lock, flags);
if (list_empty(&imxdmac->ld_active)) {
/* Someone might have called terminate all */
- goto out;
+ spin_unlock_irqrestore(&imxdma->lock, flags);
+ return;
}
desc = list_first_entry(&imxdmac->ld_active, struct imxdma_desc, node);
- if (desc->desc.callback)
- desc->desc.callback(desc->desc.callback_param);
-
/* If we are dealing with a cyclic descriptor, keep it on ld_active
* and dont mark the descriptor as complete.
* Only in non-cyclic cases it would be marked as complete
@@ -663,7 +658,11 @@ static void imxdma_tasklet(unsigned long data)
__func__, imxdmac->channel);
}
out:
- spin_unlock(&imxdma->lock);
+ spin_unlock_irqrestore(&imxdma->lock, flags);
+
+ if (desc->desc.callback)
+ desc->desc.callback(desc->desc.callback_param);
+
}
static int imxdma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
@@ -883,7 +882,7 @@ static struct dma_async_tx_descriptor *imxdma_prep_dma_cyclic(
kfree(imxdmac->sg_list);
imxdmac->sg_list = kcalloc(periods + 1,
- sizeof(struct scatterlist), GFP_KERNEL);
+ sizeof(struct scatterlist), GFP_ATOMIC);
if (!imxdmac->sg_list)
return NULL;
diff --git a/drivers/dma/sh/rcar-hpbdma.c b/drivers/dma/sh/rcar-hpbdma.c
index 45a520281ce1..ebad84591a6e 100644
--- a/drivers/dma/sh/rcar-hpbdma.c
+++ b/drivers/dma/sh/rcar-hpbdma.c
@@ -93,6 +93,7 @@ struct hpb_dmae_chan {
void __iomem *base;
const struct hpb_dmae_slave_config *cfg;
char dev_id[16]; /* unique name per DMAC of channel */
+ dma_addr_t slave_addr;
};
struct hpb_dmae_device {
@@ -432,7 +433,6 @@ hpb_dmae_alloc_chan_resources(struct hpb_dmae_chan *hpb_chan,
hpb_chan->xfer_mode = XFER_DOUBLE;
} else {
dev_err(hpb_chan->shdma_chan.dev, "DCR setting error");
- shdma_free_irq(&hpb_chan->shdma_chan);
return -EINVAL;
}
@@ -446,7 +446,8 @@ hpb_dmae_alloc_chan_resources(struct hpb_dmae_chan *hpb_chan,
return 0;
}
-static int hpb_dmae_set_slave(struct shdma_chan *schan, int slave_id, bool try)
+static int hpb_dmae_set_slave(struct shdma_chan *schan, int slave_id,
+ dma_addr_t slave_addr, bool try)
{
struct hpb_dmae_chan *chan = to_chan(schan);
const struct hpb_dmae_slave_config *sc =
@@ -457,6 +458,7 @@ static int hpb_dmae_set_slave(struct shdma_chan *schan, int slave_id, bool try)
if (try)
return 0;
chan->cfg = sc;
+ chan->slave_addr = slave_addr ? : sc->addr;
return hpb_dmae_alloc_chan_resources(chan, sc);
}
@@ -468,7 +470,7 @@ static dma_addr_t hpb_dmae_slave_addr(struct shdma_chan *schan)
{
struct hpb_dmae_chan *chan = to_chan(schan);
- return chan->cfg->addr;
+ return chan->slave_addr;
}
static struct shdma_desc *hpb_dmae_embedded_desc(void *buf, int i)
@@ -614,7 +616,6 @@ static void hpb_dmae_chan_remove(struct hpb_dmae_device *hpbdev)
shdma_for_each_chan(schan, &hpbdev->shdma_dev, i) {
BUG_ON(!schan);
- shdma_free_irq(schan);
shdma_chan_remove(schan);
}
dma_dev->chancnt = 0;