From c3635c78e500a52c9fcd55de381a72928d9e054d Mon Sep 17 00:00:00 2001 From: Linus Walleij Date: Fri, 26 Mar 2010 16:44:01 -0700 Subject: DMAENGINE: generic slave control v2 Convert the device_terminate_all() operation on the DMA engine to a generic device_control() operation which can now optionally support also pausing and resuming DMA on a certain channel. Implemented for the COH 901 318 DMAC as an example. [dan.j.williams@intel.com: update for timberdale] Signed-off-by: Linus Walleij Acked-by: Mark Brown Cc: Maciej Sosnowski Cc: Nicolas Ferre Cc: Pavel Machek Cc: Li Yang Cc: Guennadi Liakhovetski Cc: Paul Mundt Cc: Ralf Baechle Cc: Haavard Skinnemoen Cc: Magnus Damm Cc: Liam Girdwood Cc: Joe Perches Cc: Roland Dreier Signed-off-by: Dan Williams --- drivers/dma/dmaengine.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/dma/dmaengine.c') diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c index 87399cafce37..ffc4ee9c5e21 100644 --- a/drivers/dma/dmaengine.c +++ b/drivers/dma/dmaengine.c @@ -694,7 +694,7 @@ int dma_async_device_register(struct dma_device *device) BUG_ON(dma_has_cap(DMA_SLAVE, device->cap_mask) && !device->device_prep_slave_sg); BUG_ON(dma_has_cap(DMA_SLAVE, device->cap_mask) && - !device->device_terminate_all); + !device->device_control); BUG_ON(!device->device_alloc_chan_resources); BUG_ON(!device->device_free_chan_resources); -- cgit v1.2.3 From 0793448187643b50af89d36b08470baf45a3cab4 Mon Sep 17 00:00:00 2001 From: Linus Walleij Date: Fri, 26 Mar 2010 16:50:49 -0700 Subject: DMAENGINE: generic channel status v2 Convert the device_is_tx_complete() operation on the DMA engine to a generic device_tx_status()operation which can return three states, DMA_TX_RUNNING, DMA_TX_COMPLETE, DMA_TX_PAUSED. [dan.j.williams@intel.com: update for timberdale] Signed-off-by: Linus Walleij Acked-by: Mark Brown Cc: Maciej Sosnowski Cc: Nicolas Ferre Cc: Pavel Machek Cc: Li Yang Cc: Guennadi Liakhovetski Cc: Paul Mundt Cc: Ralf Baechle Cc: Haavard Skinnemoen Cc: Magnus Damm Cc: Liam Girdwood Cc: Joe Perches Cc: Roland Dreier Signed-off-by: Dan Williams --- arch/arm/mach-u300/include/mach/coh901318.h | 7 ----- drivers/dma/at_hdmac.c | 29 ++++++++++--------- drivers/dma/coh901318.c | 25 ++++++++-------- drivers/dma/dmaengine.c | 2 +- drivers/dma/dw_dmac.c | 17 +++++------ drivers/dma/fsldma.c | 19 ++++++------- drivers/dma/ioat/dma.c | 12 ++++---- drivers/dma/ioat/dma.h | 22 +++++++-------- drivers/dma/ioat/dma_v2.c | 2 +- drivers/dma/ioat/dma_v3.c | 20 ++++++------- drivers/dma/iop-adma.c | 44 +++++++++++++++-------------- drivers/dma/ipu/ipu_idmac.c | 15 +++++----- drivers/dma/mpc512x_dma.c | 16 +++++------ drivers/dma/mv_xor.c | 32 +++++++++++---------- drivers/dma/ppc4xx/adma.c | 27 ++++++++++-------- drivers/dma/shdma.c | 17 ++++++----- drivers/dma/timb_dma.c | 15 +++++----- drivers/dma/txx9dmac.c | 16 +++++------ include/linux/dmaengine.h | 38 +++++++++++++++++++++---- 19 files changed, 203 insertions(+), 172 deletions(-) (limited to 'drivers/dma/dmaengine.c') diff --git a/arch/arm/mach-u300/include/mach/coh901318.h b/arch/arm/mach-u300/include/mach/coh901318.h index 43ec040e765b..193da2df732c 100644 --- a/arch/arm/mach-u300/include/mach/coh901318.h +++ b/arch/arm/mach-u300/include/mach/coh901318.h @@ -102,13 +102,6 @@ struct coh901318_platform { const int max_channels; }; -/** - * coh901318_get_bytes_left() - Get number of bytes left on a current transfer - * @chan: dma channel handle - * return number of bytes left, or negative on error - */ -u32 coh901318_get_bytes_left(struct dma_chan *chan); - /** * coh901318_filter_id() - DMA channel filter function * @chan: dma channel handle diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c index f9143cf9e50a..ff75cf18d32e 100644 --- a/drivers/dma/at_hdmac.c +++ b/drivers/dma/at_hdmac.c @@ -798,29 +798,25 @@ static int atc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd) } /** - * atc_is_tx_complete - poll for transaction completion + * atc_tx_status - poll for transaction completion * @chan: DMA channel * @cookie: transaction identifier to check status of - * @done: if not %NULL, updated with last completed transaction - * @used: if not %NULL, updated with last used transaction + * @txstate: if not %NULL updated with transaction state * - * If @done and @used are passed in, upon return they reflect the driver + * If @txstate is passed in, upon return it reflect the driver * internal state and can be used with dma_async_is_complete() to check * the status of multiple cookies without re-checking hardware state. */ static enum dma_status -atc_is_tx_complete(struct dma_chan *chan, +atc_tx_status(struct dma_chan *chan, dma_cookie_t cookie, - dma_cookie_t *done, dma_cookie_t *used) + struct dma_tx_state *txstate) { struct at_dma_chan *atchan = to_at_dma_chan(chan); dma_cookie_t last_used; dma_cookie_t last_complete; enum dma_status ret; - dev_vdbg(chan2dev(chan), "is_tx_complete: %d (d%d, u%d)\n", - cookie, done ? *done : 0, used ? *used : 0); - spin_lock_bh(&atchan->lock); last_complete = atchan->completed_cookie; @@ -838,10 +834,15 @@ atc_is_tx_complete(struct dma_chan *chan, spin_unlock_bh(&atchan->lock); - if (done) - *done = last_complete; - if (used) - *used = last_used; + if (txstate) { + txstate->last = last_complete; + txstate->used = last_used; + txstate->residue = 0; + } + + dev_vdbg(chan2dev(chan), "tx_status: %d (d%d, u%d)\n", + cookie, last_complete ? last_complete : 0, + last_used ? last_used : 0); return ret; } @@ -1087,7 +1088,7 @@ static int __init at_dma_probe(struct platform_device *pdev) /* set base routines */ atdma->dma_common.device_alloc_chan_resources = atc_alloc_chan_resources; atdma->dma_common.device_free_chan_resources = atc_free_chan_resources; - atdma->dma_common.device_is_tx_complete = atc_is_tx_complete; + atdma->dma_common.device_tx_status = atc_tx_status; atdma->dma_common.device_issue_pending = atc_issue_pending; atdma->dma_common.dev = &pdev->dev; diff --git a/drivers/dma/coh901318.c b/drivers/dma/coh901318.c index 53c54e034aa3..309db3beef16 100644 --- a/drivers/dma/coh901318.c +++ b/drivers/dma/coh901318.c @@ -426,7 +426,7 @@ static inline u32 coh901318_get_bytes_in_lli(struct coh901318_lli *in_lli) * absolute measures, but for a rough guess you can still call * it. */ -u32 coh901318_get_bytes_left(struct dma_chan *chan) +static u32 coh901318_get_bytes_left(struct dma_chan *chan) { struct coh901318_chan *cohc = to_coh901318_chan(chan); struct coh901318_desc *cohd; @@ -503,8 +503,6 @@ u32 coh901318_get_bytes_left(struct dma_chan *chan) return left; } -EXPORT_SYMBOL(coh901318_get_bytes_left); - /* * Pauses a transfer without losing data. Enables power save. @@ -1136,9 +1134,8 @@ coh901318_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, } static enum dma_status -coh901318_is_tx_complete(struct dma_chan *chan, - dma_cookie_t cookie, dma_cookie_t *done, - dma_cookie_t *used) +coh901318_tx_status(struct dma_chan *chan, dma_cookie_t cookie, + struct dma_tx_state *txstate) { struct coh901318_chan *cohc = to_coh901318_chan(chan); dma_cookie_t last_used; @@ -1150,10 +1147,14 @@ coh901318_is_tx_complete(struct dma_chan *chan, ret = dma_async_is_complete(cookie, last_complete, last_used); - if (done) - *done = last_complete; - if (used) - *used = last_used; + if (txstate) { + txstate->last = last_complete; + txstate->used = last_used; + txstate->residue = coh901318_get_bytes_left(chan); + } + + if (ret == DMA_IN_PROGRESS && cohc->stopped) + ret = DMA_PAUSED; return ret; } @@ -1356,7 +1357,7 @@ static int __init coh901318_probe(struct platform_device *pdev) base->dma_slave.device_alloc_chan_resources = coh901318_alloc_chan_resources; base->dma_slave.device_free_chan_resources = coh901318_free_chan_resources; base->dma_slave.device_prep_slave_sg = coh901318_prep_slave_sg; - base->dma_slave.device_is_tx_complete = coh901318_is_tx_complete; + base->dma_slave.device_tx_status = coh901318_tx_status; base->dma_slave.device_issue_pending = coh901318_issue_pending; base->dma_slave.device_control = coh901318_control; base->dma_slave.dev = &pdev->dev; @@ -1376,7 +1377,7 @@ static int __init coh901318_probe(struct platform_device *pdev) base->dma_memcpy.device_alloc_chan_resources = coh901318_alloc_chan_resources; base->dma_memcpy.device_free_chan_resources = coh901318_free_chan_resources; base->dma_memcpy.device_prep_dma_memcpy = coh901318_prep_memcpy; - base->dma_memcpy.device_is_tx_complete = coh901318_is_tx_complete; + base->dma_memcpy.device_tx_status = coh901318_tx_status; base->dma_memcpy.device_issue_pending = coh901318_issue_pending; base->dma_memcpy.device_control = coh901318_control; base->dma_memcpy.dev = &pdev->dev; diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c index ffc4ee9c5e21..790caeeb4ccd 100644 --- a/drivers/dma/dmaengine.c +++ b/drivers/dma/dmaengine.c @@ -698,7 +698,7 @@ int dma_async_device_register(struct dma_device *device) BUG_ON(!device->device_alloc_chan_resources); BUG_ON(!device->device_free_chan_resources); - BUG_ON(!device->device_is_tx_complete); + BUG_ON(!device->device_tx_status); BUG_ON(!device->device_issue_pending); BUG_ON(!device->dev); diff --git a/drivers/dma/dw_dmac.c b/drivers/dma/dw_dmac.c index 8a6b85f61176..263b70ee8562 100644 --- a/drivers/dma/dw_dmac.c +++ b/drivers/dma/dw_dmac.c @@ -819,9 +819,9 @@ static int dwc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd) } static enum dma_status -dwc_is_tx_complete(struct dma_chan *chan, - dma_cookie_t cookie, - dma_cookie_t *done, dma_cookie_t *used) +dwc_tx_status(struct dma_chan *chan, + dma_cookie_t cookie, + struct dma_tx_state *txstate) { struct dw_dma_chan *dwc = to_dw_dma_chan(chan); dma_cookie_t last_used; @@ -841,10 +841,11 @@ dwc_is_tx_complete(struct dma_chan *chan, ret = dma_async_is_complete(cookie, last_complete, last_used); } - if (done) - *done = last_complete; - if (used) - *used = last_used; + if (txstate) { + txstate->last = last_complete; + txstate->used = last_used; + txstate->residue = 0; + } return ret; } @@ -1346,7 +1347,7 @@ static int __init dw_probe(struct platform_device *pdev) dw->dma.device_prep_slave_sg = dwc_prep_slave_sg; dw->dma.device_control = dwc_control; - dw->dma.device_is_tx_complete = dwc_is_tx_complete; + dw->dma.device_tx_status = dwc_tx_status; dw->dma.device_issue_pending = dwc_issue_pending; dma_writel(dw, CFG, DW_CFG_DMA_EN); diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c index 714fc46e7695..ca5e8a3dce72 100644 --- a/drivers/dma/fsldma.c +++ b/drivers/dma/fsldma.c @@ -971,13 +971,12 @@ static void fsl_dma_memcpy_issue_pending(struct dma_chan *dchan) } /** - * fsl_dma_is_complete - Determine the DMA status + * fsl_tx_status - Determine the DMA status * @chan : Freescale DMA channel */ -static enum dma_status fsl_dma_is_complete(struct dma_chan *dchan, +static enum dma_status fsl_tx_status(struct dma_chan *dchan, dma_cookie_t cookie, - dma_cookie_t *done, - dma_cookie_t *used) + struct dma_tx_state *txstate) { struct fsldma_chan *chan = to_fsl_chan(dchan); dma_cookie_t last_used; @@ -988,11 +987,11 @@ static enum dma_status fsl_dma_is_complete(struct dma_chan *dchan, last_used = dchan->cookie; last_complete = chan->completed_cookie; - if (done) - *done = last_complete; - - if (used) - *used = last_used; + if (txstate) { + txstate->last = last_complete; + txstate->used = last_used; + txstate->residue = 0; + } return dma_async_is_complete(cookie, last_complete, last_used); } @@ -1336,7 +1335,7 @@ static int __devinit fsldma_of_probe(struct of_device *op, fdev->common.device_free_chan_resources = fsl_dma_free_chan_resources; fdev->common.device_prep_dma_interrupt = fsl_dma_prep_interrupt; fdev->common.device_prep_dma_memcpy = fsl_dma_prep_memcpy; - fdev->common.device_is_tx_complete = fsl_dma_is_complete; + fdev->common.device_tx_status = fsl_tx_status; fdev->common.device_issue_pending = fsl_dma_memcpy_issue_pending; fdev->common.device_prep_slave_sg = fsl_dma_prep_slave_sg; fdev->common.device_control = fsl_dma_device_control; diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c index 0099340b9616..59cebbfc89ec 100644 --- a/drivers/dma/ioat/dma.c +++ b/drivers/dma/ioat/dma.c @@ -726,18 +726,18 @@ static void ioat1_timer_event(unsigned long data) } enum dma_status -ioat_is_dma_complete(struct dma_chan *c, dma_cookie_t cookie, - dma_cookie_t *done, dma_cookie_t *used) +ioat_dma_tx_status(struct dma_chan *c, dma_cookie_t cookie, + struct dma_tx_state *txstate) { struct ioat_chan_common *chan = to_chan_common(c); struct ioatdma_device *device = chan->device; - if (ioat_is_complete(c, cookie, done, used) == DMA_SUCCESS) + if (ioat_tx_status(c, cookie, txstate) == DMA_SUCCESS) return DMA_SUCCESS; device->cleanup_fn((unsigned long) c); - return ioat_is_complete(c, cookie, done, used); + return ioat_tx_status(c, cookie, txstate); } static void ioat1_dma_start_null_desc(struct ioat_dma_chan *ioat) @@ -857,7 +857,7 @@ int __devinit ioat_dma_self_test(struct ioatdma_device *device) tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000)); if (tmo == 0 || - dma->device_is_tx_complete(dma_chan, cookie, NULL, NULL) + dma->device_tx_status(dma_chan, cookie, NULL) != DMA_SUCCESS) { dev_err(dev, "Self-test copy timed out, disabling\n"); err = -ENODEV; @@ -1198,7 +1198,7 @@ int __devinit ioat1_dma_probe(struct ioatdma_device *device, int dca) dma->device_issue_pending = ioat1_dma_memcpy_issue_pending; dma->device_alloc_chan_resources = ioat1_dma_alloc_chan_resources; dma->device_free_chan_resources = ioat1_dma_free_chan_resources; - dma->device_is_tx_complete = ioat_is_dma_complete; + dma->device_tx_status = ioat_dma_tx_status; err = ioat_probe(device); if (err) diff --git a/drivers/dma/ioat/dma.h b/drivers/dma/ioat/dma.h index 86b97ac8774e..23399672239e 100644 --- a/drivers/dma/ioat/dma.h +++ b/drivers/dma/ioat/dma.h @@ -142,15 +142,14 @@ static inline struct ioat_dma_chan *to_ioat_chan(struct dma_chan *c) } /** - * ioat_is_complete - poll the status of an ioat transaction + * ioat_tx_status - poll the status of an ioat transaction * @c: channel handle * @cookie: transaction identifier - * @done: if set, updated with last completed transaction - * @used: if set, updated with last used transaction + * @txstate: if set, updated with the transaction state */ static inline enum dma_status -ioat_is_complete(struct dma_chan *c, dma_cookie_t cookie, - dma_cookie_t *done, dma_cookie_t *used) +ioat_tx_status(struct dma_chan *c, dma_cookie_t cookie, + struct dma_tx_state *txstate) { struct ioat_chan_common *chan = to_chan_common(c); dma_cookie_t last_used; @@ -159,10 +158,11 @@ ioat_is_complete(struct dma_chan *c, dma_cookie_t cookie, last_used = c->cookie; last_complete = chan->completed_cookie; - if (done) - *done = last_complete; - if (used) - *used = last_used; + if (txstate) { + txstate->last = last_complete; + txstate->used = last_used; + txstate->residue = 0; + } return dma_async_is_complete(cookie, last_complete, last_used); } @@ -338,8 +338,8 @@ struct dca_provider * __devinit ioat_dca_init(struct pci_dev *pdev, unsigned long ioat_get_current_completion(struct ioat_chan_common *chan); void ioat_init_channel(struct ioatdma_device *device, struct ioat_chan_common *chan, int idx); -enum dma_status ioat_is_dma_complete(struct dma_chan *c, dma_cookie_t cookie, - dma_cookie_t *done, dma_cookie_t *used); +enum dma_status ioat_dma_tx_status(struct dma_chan *c, dma_cookie_t cookie, + struct dma_tx_state *txstate); void ioat_dma_unmap(struct ioat_chan_common *chan, enum dma_ctrl_flags flags, size_t len, struct ioat_dma_descriptor *hw); bool ioat_cleanup_preamble(struct ioat_chan_common *chan, diff --git a/drivers/dma/ioat/dma_v2.c b/drivers/dma/ioat/dma_v2.c index 1ed5d66d7dca..f540e0be7f31 100644 --- a/drivers/dma/ioat/dma_v2.c +++ b/drivers/dma/ioat/dma_v2.c @@ -854,7 +854,7 @@ int __devinit ioat2_dma_probe(struct ioatdma_device *device, int dca) dma->device_issue_pending = ioat2_issue_pending; dma->device_alloc_chan_resources = ioat2_alloc_chan_resources; dma->device_free_chan_resources = ioat2_free_chan_resources; - dma->device_is_tx_complete = ioat_is_dma_complete; + dma->device_tx_status = ioat_tx_status; err = ioat_probe(device); if (err) diff --git a/drivers/dma/ioat/dma_v3.c b/drivers/dma/ioat/dma_v3.c index 26febc56dab1..d1adbf35268c 100644 --- a/drivers/dma/ioat/dma_v3.c +++ b/drivers/dma/ioat/dma_v3.c @@ -438,17 +438,17 @@ static void ioat3_timer_event(unsigned long data) } static enum dma_status -ioat3_is_complete(struct dma_chan *c, dma_cookie_t cookie, - dma_cookie_t *done, dma_cookie_t *used) +ioat3_tx_status(struct dma_chan *c, dma_cookie_t cookie, + struct dma_tx_state *txstate) { struct ioat2_dma_chan *ioat = to_ioat2_chan(c); - if (ioat_is_complete(c, cookie, done, used) == DMA_SUCCESS) + if (ioat_tx_status(c, cookie, txstate) == DMA_SUCCESS) return DMA_SUCCESS; ioat3_cleanup_poll(ioat); - return ioat_is_complete(c, cookie, done, used); + return ioat_tx_status(c, cookie, txstate); } static struct dma_async_tx_descriptor * @@ -976,7 +976,7 @@ static int __devinit ioat_xor_val_self_test(struct ioatdma_device *device) tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000)); - if (dma->device_is_tx_complete(dma_chan, cookie, NULL, NULL) != DMA_SUCCESS) { + if (dma->device_tx_status(dma_chan, cookie, NULL) != DMA_SUCCESS) { dev_err(dev, "Self-test xor timed out\n"); err = -ENODEV; goto free_resources; @@ -1030,7 +1030,7 @@ static int __devinit ioat_xor_val_self_test(struct ioatdma_device *device) tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000)); - if (dma->device_is_tx_complete(dma_chan, cookie, NULL, NULL) != DMA_SUCCESS) { + if (dma->device_tx_status(dma_chan, cookie, NULL) != DMA_SUCCESS) { dev_err(dev, "Self-test validate timed out\n"); err = -ENODEV; goto free_resources; @@ -1071,7 +1071,7 @@ static int __devinit ioat_xor_val_self_test(struct ioatdma_device *device) tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000)); - if (dma->device_is_tx_complete(dma_chan, cookie, NULL, NULL) != DMA_SUCCESS) { + if (dma->device_tx_status(dma_chan, cookie, NULL) != DMA_SUCCESS) { dev_err(dev, "Self-test memset timed out\n"); err = -ENODEV; goto free_resources; @@ -1114,7 +1114,7 @@ static int __devinit ioat_xor_val_self_test(struct ioatdma_device *device) tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000)); - if (dma->device_is_tx_complete(dma_chan, cookie, NULL, NULL) != DMA_SUCCESS) { + if (dma->device_tx_status(dma_chan, cookie, NULL) != DMA_SUCCESS) { dev_err(dev, "Self-test 2nd validate timed out\n"); err = -ENODEV; goto free_resources; @@ -1258,11 +1258,11 @@ int __devinit ioat3_dma_probe(struct ioatdma_device *device, int dca) if (is_raid_device) { - dma->device_is_tx_complete = ioat3_is_complete; + dma->device_tx_status = ioat3_tx_status; device->cleanup_fn = ioat3_cleanup_event; device->timer_fn = ioat3_timer_event; } else { - dma->device_is_tx_complete = ioat_is_dma_complete; + dma->device_tx_status = ioat_dma_tx_status; device->cleanup_fn = ioat2_cleanup_event; device->timer_fn = ioat2_timer_event; } diff --git a/drivers/dma/iop-adma.c b/drivers/dma/iop-adma.c index ca6e6a0cb793..ee40dbba1879 100644 --- a/drivers/dma/iop-adma.c +++ b/drivers/dma/iop-adma.c @@ -893,14 +893,14 @@ static void iop_adma_free_chan_resources(struct dma_chan *chan) } /** - * iop_adma_is_complete - poll the status of an ADMA transaction + * iop_adma_status - poll the status of an ADMA transaction * @chan: ADMA channel handle * @cookie: ADMA transaction identifier + * @txstate: a holder for the current state of the channel or NULL */ -static enum dma_status iop_adma_is_complete(struct dma_chan *chan, +static enum dma_status iop_adma_status(struct dma_chan *chan, dma_cookie_t cookie, - dma_cookie_t *done, - dma_cookie_t *used) + struct dma_tx_state *txstate) { struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan); dma_cookie_t last_used; @@ -910,10 +910,11 @@ static enum dma_status iop_adma_is_complete(struct dma_chan *chan, last_used = chan->cookie; last_complete = iop_chan->completed_cookie; - if (done) - *done = last_complete; - if (used) - *used = last_used; + if (txstate) { + txstate->last = last_complete; + txstate->used = last_used; + txstate->residue = 0; + } ret = dma_async_is_complete(cookie, last_complete, last_used); if (ret == DMA_SUCCESS) @@ -924,10 +925,11 @@ static enum dma_status iop_adma_is_complete(struct dma_chan *chan, last_used = chan->cookie; last_complete = iop_chan->completed_cookie; - if (done) - *done = last_complete; - if (used) - *used = last_used; + if (txstate) { + txstate->last = last_complete; + txstate->used = last_used; + txstate->residue = 0; + } return dma_async_is_complete(cookie, last_complete, last_used); } @@ -1042,7 +1044,7 @@ static int __devinit iop_adma_memcpy_self_test(struct iop_adma_device *device) iop_adma_issue_pending(dma_chan); msleep(1); - if (iop_adma_is_complete(dma_chan, cookie, NULL, NULL) != + if (iop_adma_status(dma_chan, cookie, NULL) != DMA_SUCCESS) { dev_printk(KERN_ERR, dma_chan->device->dev, "Self-test copy timed out, disabling\n"); @@ -1142,7 +1144,7 @@ iop_adma_xor_val_self_test(struct iop_adma_device *device) iop_adma_issue_pending(dma_chan); msleep(8); - if (iop_adma_is_complete(dma_chan, cookie, NULL, NULL) != + if (iop_adma_status(dma_chan, cookie, NULL) != DMA_SUCCESS) { dev_printk(KERN_ERR, dma_chan->device->dev, "Self-test xor timed out, disabling\n"); @@ -1189,7 +1191,7 @@ iop_adma_xor_val_self_test(struct iop_adma_device *device) iop_adma_issue_pending(dma_chan); msleep(8); - if (iop_adma_is_complete(dma_chan, cookie, NULL, NULL) != DMA_SUCCESS) { + if (iop_adma_status(dma_chan, cookie, NULL) != DMA_SUCCESS) { dev_printk(KERN_ERR, dma_chan->device->dev, "Self-test zero sum timed out, disabling\n"); err = -ENODEV; @@ -1213,7 +1215,7 @@ iop_adma_xor_val_self_test(struct iop_adma_device *device) iop_adma_issue_pending(dma_chan); msleep(8); - if (iop_adma_is_complete(dma_chan, cookie, NULL, NULL) != DMA_SUCCESS) { + if (iop_adma_status(dma_chan, cookie, NULL) != DMA_SUCCESS) { dev_printk(KERN_ERR, dma_chan->device->dev, "Self-test memset timed out, disabling\n"); err = -ENODEV; @@ -1245,7 +1247,7 @@ iop_adma_xor_val_self_test(struct iop_adma_device *device) iop_adma_issue_pending(dma_chan); msleep(8); - if (iop_adma_is_complete(dma_chan, cookie, NULL, NULL) != DMA_SUCCESS) { + if (iop_adma_status(dma_chan, cookie, NULL) != DMA_SUCCESS) { dev_printk(KERN_ERR, dma_chan->device->dev, "Self-test non-zero sum timed out, disabling\n"); err = -ENODEV; @@ -1340,7 +1342,7 @@ iop_adma_pq_zero_sum_self_test(struct iop_adma_device *device) iop_adma_issue_pending(dma_chan); msleep(8); - if (iop_adma_is_complete(dma_chan, cookie, NULL, NULL) != + if (iop_adma_status(dma_chan, cookie, NULL) != DMA_SUCCESS) { dev_err(dev, "Self-test pq timed out, disabling\n"); err = -ENODEV; @@ -1377,7 +1379,7 @@ iop_adma_pq_zero_sum_self_test(struct iop_adma_device *device) iop_adma_issue_pending(dma_chan); msleep(8); - if (iop_adma_is_complete(dma_chan, cookie, NULL, NULL) != + if (iop_adma_status(dma_chan, cookie, NULL) != DMA_SUCCESS) { dev_err(dev, "Self-test pq-zero-sum timed out, disabling\n"); err = -ENODEV; @@ -1409,7 +1411,7 @@ iop_adma_pq_zero_sum_self_test(struct iop_adma_device *device) iop_adma_issue_pending(dma_chan); msleep(8); - if (iop_adma_is_complete(dma_chan, cookie, NULL, NULL) != + if (iop_adma_status(dma_chan, cookie, NULL) != DMA_SUCCESS) { dev_err(dev, "Self-test !pq-zero-sum timed out, disabling\n"); err = -ENODEV; @@ -1507,7 +1509,7 @@ static int __devinit iop_adma_probe(struct platform_device *pdev) /* set base routines */ dma_dev->device_alloc_chan_resources = iop_adma_alloc_chan_resources; dma_dev->device_free_chan_resources = iop_adma_free_chan_resources; - dma_dev->device_is_tx_complete = iop_adma_is_complete; + dma_dev->device_tx_status = iop_adma_status; dma_dev->device_issue_pending = iop_adma_issue_pending; dma_dev->dev = &pdev->dev; diff --git a/drivers/dma/ipu/ipu_idmac.c b/drivers/dma/ipu/ipu_idmac.c index 39e7fb2a90e3..b9cef8b1701c 100644 --- a/drivers/dma/ipu/ipu_idmac.c +++ b/drivers/dma/ipu/ipu_idmac.c @@ -1646,15 +1646,16 @@ static void idmac_free_chan_resources(struct dma_chan *chan) tasklet_schedule(&to_ipu(idmac)->tasklet); } -static enum dma_status idmac_is_tx_complete(struct dma_chan *chan, - dma_cookie_t cookie, dma_cookie_t *done, dma_cookie_t *used) +static enum dma_status idmac_tx_status(struct dma_chan *chan, + dma_cookie_t cookie, struct dma_tx_state *txstate) { struct idmac_channel *ichan = to_idmac_chan(chan); - if (done) - *done = ichan->completed; - if (used) - *used = chan->cookie; + if (txstate) { + txstate->last = ichan->completed; + txstate->used = chan->cookie; + txstate->residue = 0; + } if (cookie != chan->cookie) return DMA_ERROR; return DMA_SUCCESS; @@ -1673,7 +1674,7 @@ static int __init ipu_idmac_init(struct ipu *ipu) dma->dev = ipu->dev; dma->device_alloc_chan_resources = idmac_alloc_chan_resources; dma->device_free_chan_resources = idmac_free_chan_resources; - dma->device_is_tx_complete = idmac_is_tx_complete; + dma->device_tx_status = idmac_tx_status; dma->device_issue_pending = idmac_issue_pending; /* Compulsory for DMA_SLAVE fields */ diff --git a/drivers/dma/mpc512x_dma.c b/drivers/dma/mpc512x_dma.c index 3fdf1f46bd63..cb3a8e94ea48 100644 --- a/drivers/dma/mpc512x_dma.c +++ b/drivers/dma/mpc512x_dma.c @@ -540,8 +540,8 @@ static void mpc_dma_issue_pending(struct dma_chan *chan) /* Check request completion status */ static enum dma_status -mpc_dma_is_tx_complete(struct dma_chan *chan, dma_cookie_t cookie, - dma_cookie_t *done, dma_cookie_t *used) +mpc_dma_tx_status(struct dma_chan *chan, dma_cookie_t cookie, + struct dma_tx_state *txstate) { struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(chan); unsigned long flags; @@ -553,11 +553,11 @@ mpc_dma_is_tx_complete(struct dma_chan *chan, dma_cookie_t cookie, last_complete = mchan->completed_cookie; spin_unlock_irqrestore(&mchan->lock, flags); - if (done) - *done = last_complete; - - if (used) - *used = last_used; + if (txstate) { + txstate->last = last_complete; + txstate->used = last_used; + txstate->residue = 0; + } return dma_async_is_complete(cookie, last_complete, last_used); } @@ -693,7 +693,7 @@ static int __devinit mpc_dma_probe(struct of_device *op, dma->device_alloc_chan_resources = mpc_dma_alloc_chan_resources; dma->device_free_chan_resources = mpc_dma_free_chan_resources; dma->device_issue_pending = mpc_dma_issue_pending; - dma->device_is_tx_complete = mpc_dma_is_tx_complete; + dma->device_tx_status = mpc_dma_tx_status; dma->device_prep_dma_memcpy = mpc_dma_prep_memcpy; INIT_LIST_HEAD(&dma->channels); diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c index 466ab10c1ff1..79fb1dea691b 100644 --- a/drivers/dma/mv_xor.c +++ b/drivers/dma/mv_xor.c @@ -809,14 +809,14 @@ static void mv_xor_free_chan_resources(struct dma_chan *chan) } /** - * mv_xor_is_complete - poll the status of an XOR transaction + * mv_xor_status - poll the status of an XOR transaction * @chan: XOR channel handle * @cookie: XOR transaction identifier + * @txstate: XOR transactions state holder (or NULL) */ -static enum dma_status mv_xor_is_complete(struct dma_chan *chan, +static enum dma_status mv_xor_status(struct dma_chan *chan, dma_cookie_t cookie, - dma_cookie_t *done, - dma_cookie_t *used) + struct dma_tx_state *txstate) { struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan); dma_cookie_t last_used; @@ -826,10 +826,11 @@ static enum dma_status mv_xor_is_complete(struct dma_chan *chan, last_used = chan->cookie; last_complete = mv_chan->completed_cookie; mv_chan->is_complete_cookie = cookie; - if (done) - *done = last_complete; - if (used) - *used = last_used; + if (txstate) { + txstate->last = last_complete; + txstate->used = last_used; + txstate->residue = 0; + } ret = dma_async_is_complete(cookie, last_complete, last_used); if (ret == DMA_SUCCESS) { @@ -841,10 +842,11 @@ static enum dma_status mv_xor_is_complete(struct dma_chan *chan, last_used = chan->cookie; last_complete = mv_chan->completed_cookie; - if (done) - *done = last_complete; - if (used) - *used = last_used; + if (txstate) { + txstate->last = last_complete; + txstate->used = last_used; + txstate->residue = 0; + } return dma_async_is_complete(cookie, last_complete, last_used); } @@ -974,7 +976,7 @@ static int __devinit mv_xor_memcpy_self_test(struct mv_xor_device *device) async_tx_ack(tx); msleep(1); - if (mv_xor_is_complete(dma_chan, cookie, NULL, NULL) != + if (mv_xor_status(dma_chan, cookie, NULL) != DMA_SUCCESS) { dev_printk(KERN_ERR, dma_chan->device->dev, "Self-test copy timed out, disabling\n"); @@ -1072,7 +1074,7 @@ mv_xor_xor_self_test(struct mv_xor_device *device) async_tx_ack(tx); msleep(8); - if (mv_xor_is_complete(dma_chan, cookie, NULL, NULL) != + if (mv_xor_status(dma_chan, cookie, NULL) != DMA_SUCCESS) { dev_printk(KERN_ERR, dma_chan->device->dev, "Self-test xor timed out, disabling\n"); @@ -1167,7 +1169,7 @@ static int __devinit mv_xor_probe(struct platform_device *pdev) /* set base routines */ dma_dev->device_alloc_chan_resources = mv_xor_alloc_chan_resources; dma_dev->device_free_chan_resources = mv_xor_free_chan_resources; - dma_dev->device_is_tx_complete = mv_xor_is_complete; + dma_dev->device_tx_status = mv_xor_status; dma_dev->device_issue_pending = mv_xor_issue_pending; dma_dev->dev = &pdev->dev; diff --git a/drivers/dma/ppc4xx/adma.c b/drivers/dma/ppc4xx/adma.c index e69d87f24a25..d9a54c018652 100644 --- a/drivers/dma/ppc4xx/adma.c +++ b/drivers/dma/ppc4xx/adma.c @@ -3934,12 +3934,13 @@ static void ppc440spe_adma_free_chan_resources(struct dma_chan *chan) } /** - * ppc440spe_adma_is_complete - poll the status of an ADMA transaction + * ppc440spe_adma_tx_status - poll the status of an ADMA transaction * @chan: ADMA channel handle * @cookie: ADMA transaction identifier + * @txstate: a holder for the current state of the channel */ -static enum dma_status ppc440spe_adma_is_complete(struct dma_chan *chan, - dma_cookie_t cookie, dma_cookie_t *done, dma_cookie_t *used) +static enum dma_status ppc440spe_adma_tx_status(struct dma_chan *chan, + dma_cookie_t cookie, struct dma_tx_state *txstate) { struct ppc440spe_adma_chan *ppc440spe_chan; dma_cookie_t last_used; @@ -3950,10 +3951,11 @@ static enum dma_status ppc440spe_adma_is_complete(struct dma_chan *chan, last_used = chan->cookie; last_complete = ppc440spe_chan->completed_cookie; - if (done) - *done = last_complete; - if (used) - *used = last_used; + if (txstate) { + txstate->last = last_complete; + txstate->used = last_used; + txstate->residue = 0; + } ret = dma_async_is_complete(cookie, last_complete, last_used); if (ret == DMA_SUCCESS) @@ -3964,10 +3966,11 @@ static enum dma_status ppc440spe_adma_is_complete(struct dma_chan *chan, last_used = chan->cookie; last_complete = ppc440spe_chan->completed_cookie; - if (done) - *done = last_complete; - if (used) - *used = last_used; + if (txstate) { + txstate->last = last_complete; + txstate->used = last_used; + txstate->residue = 0; + } return dma_async_is_complete(cookie, last_complete, last_used); } @@ -4179,7 +4182,7 @@ static void ppc440spe_adma_init_capabilities(struct ppc440spe_adma_device *adev) ppc440spe_adma_alloc_chan_resources; adev->common.device_free_chan_resources = ppc440spe_adma_free_chan_resources; - adev->common.device_is_tx_complete = ppc440spe_adma_is_complete; + adev->common.device_tx_status = ppc440spe_adma_tx_status; adev->common.device_issue_pending = ppc440spe_adma_issue_pending; /* Set prep routines based on capability */ diff --git a/drivers/dma/shdma.c b/drivers/dma/shdma.c index ce28c1e22825..8aeda9ceb225 100644 --- a/drivers/dma/shdma.c +++ b/drivers/dma/shdma.c @@ -738,10 +738,9 @@ static void sh_dmae_memcpy_issue_pending(struct dma_chan *chan) sh_chan_xfer_ld_queue(sh_chan); } -static enum dma_status sh_dmae_is_complete(struct dma_chan *chan, +static enum dma_status sh_dmae_tx_status(struct dma_chan *chan, dma_cookie_t cookie, - dma_cookie_t *done, - dma_cookie_t *used) + struct dma_tx_state *txstate) { struct sh_dmae_chan *sh_chan = to_sh_chan(chan); dma_cookie_t last_used; @@ -754,11 +753,11 @@ static enum dma_status sh_dmae_is_complete(struct dma_chan *chan, last_complete = sh_chan->completed_cookie; BUG_ON(last_complete < 0); - if (done) - *done = last_complete; - - if (used) - *used = last_used; + if (txstate) { + txstate->last = last_complete; + txstate->used = last_used; + txstate->residue = 0; + } spin_lock_bh(&sh_chan->desc_lock); @@ -1030,7 +1029,7 @@ static int __init sh_dmae_probe(struct platform_device *pdev) = sh_dmae_alloc_chan_resources; shdev->common.device_free_chan_resources = sh_dmae_free_chan_resources; shdev->common.device_prep_dma_memcpy = sh_dmae_prep_memcpy; - shdev->common.device_is_tx_complete = sh_dmae_is_complete; + shdev->common.device_tx_status = sh_dmae_tx_status; shdev->common.device_issue_pending = sh_dmae_memcpy_issue_pending; /* Compulsory for DMA_SLAVE fields */ diff --git a/drivers/dma/timb_dma.c b/drivers/dma/timb_dma.c index 7c06471ef863..8fc28814561a 100644 --- a/drivers/dma/timb_dma.c +++ b/drivers/dma/timb_dma.c @@ -511,8 +511,8 @@ static void td_free_chan_resources(struct dma_chan *chan) } } -static enum dma_status td_is_tx_complete(struct dma_chan *chan, - dma_cookie_t cookie, dma_cookie_t *done, dma_cookie_t *used) +static enum dma_status td_tx_status(struct dma_chan *chan, dma_cookie_t cookie, + struct dma_tx_state *txstate) { struct timb_dma_chan *td_chan = container_of(chan, struct timb_dma_chan, chan); @@ -527,10 +527,11 @@ static enum dma_status td_is_tx_complete(struct dma_chan *chan, ret = dma_async_is_complete(cookie, last_complete, last_used); - if (done) - *done = last_complete; - if (used) - *used = last_used; + if (txstate) { + txstate->last = last_complete; + txstate->used = last_used; + txstate->residue = 0; + } dev_dbg(chan2dev(chan), "%s: exit, ret: %d, last_complete: %d, last_used: %d\n", @@ -742,7 +743,7 @@ static int __devinit td_probe(struct platform_device *pdev) td->dma.device_alloc_chan_resources = td_alloc_chan_resources; td->dma.device_free_chan_resources = td_free_chan_resources; - td->dma.device_is_tx_complete = td_is_tx_complete; + td->dma.device_tx_status = td_tx_status; td->dma.device_issue_pending = td_issue_pending; dma_cap_set(DMA_SLAVE, td->dma.cap_mask); diff --git a/drivers/dma/txx9dmac.c b/drivers/dma/txx9dmac.c index e528e15f44ab..a44e422cbc27 100644 --- a/drivers/dma/txx9dmac.c +++ b/drivers/dma/txx9dmac.c @@ -967,9 +967,8 @@ static int txx9dmac_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd) } static enum dma_status -txx9dmac_is_tx_complete(struct dma_chan *chan, - dma_cookie_t cookie, - dma_cookie_t *done, dma_cookie_t *used) +txx9dmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie, + struct dma_tx_state *txstate) { struct txx9dmac_chan *dc = to_txx9dmac_chan(chan); dma_cookie_t last_used; @@ -991,10 +990,11 @@ txx9dmac_is_tx_complete(struct dma_chan *chan, ret = dma_async_is_complete(cookie, last_complete, last_used); } - if (done) - *done = last_complete; - if (used) - *used = last_used; + if (txstate) { + txstate->last = last_complete; + txstate->used = last_used; + txstate->residue = 0; + } return ret; } @@ -1160,7 +1160,7 @@ static int __init txx9dmac_chan_probe(struct platform_device *pdev) dc->dma.device_alloc_chan_resources = txx9dmac_alloc_chan_resources; dc->dma.device_free_chan_resources = txx9dmac_free_chan_resources; dc->dma.device_control = txx9dmac_control; - dc->dma.device_is_tx_complete = txx9dmac_is_tx_complete; + dc->dma.device_tx_status = txx9dmac_tx_status; dc->dma.device_issue_pending = txx9dmac_issue_pending; if (pdata && pdata->memcpy_chan == ch) { dc->dma.device_prep_dma_memcpy = txx9dmac_prep_dma_memcpy; diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h index 0731802f876f..55b08e84ac8d 100644 --- a/include/linux/dmaengine.h +++ b/include/linux/dmaengine.h @@ -40,11 +40,13 @@ typedef s32 dma_cookie_t; * enum dma_status - DMA transaction status * @DMA_SUCCESS: transaction completed successfully * @DMA_IN_PROGRESS: transaction not yet processed + * @DMA_PAUSED: transaction is paused * @DMA_ERROR: transaction failed */ enum dma_status { DMA_SUCCESS, DMA_IN_PROGRESS, + DMA_PAUSED, DMA_ERROR, }; @@ -248,6 +250,21 @@ struct dma_async_tx_descriptor { spinlock_t lock; }; +/** + * struct dma_tx_state - filled in to report the status of + * a transfer. + * @last: last completed DMA cookie + * @used: last issued DMA cookie (i.e. the one in progress) + * @residue: the remaining number of bytes left to transmit + * on the selected transfer for states DMA_IN_PROGRESS and + * DMA_PAUSED if this is implemented in the driver, else 0 + */ +struct dma_tx_state { + dma_cookie_t last; + dma_cookie_t used; + u32 residue; +}; + /** * struct dma_device - info on the entity supplying DMA services * @chancnt: how many DMA channels are supported @@ -276,7 +293,10 @@ struct dma_async_tx_descriptor { * @device_prep_slave_sg: prepares a slave dma operation * @device_control: manipulate all pending operations on a channel, returns * zero or error code - * @device_is_tx_complete: poll for transaction completion + * @device_tx_status: poll for transaction completion, the optional + * txstate parameter can be supplied with a pointer to get a + * struct with auxilary transfer status information, otherwise the call + * will just return a simple status code * @device_issue_pending: push pending transactions to hardware */ struct dma_device { @@ -329,9 +349,9 @@ struct dma_device { unsigned long flags); int (*device_control)(struct dma_chan *chan, enum dma_ctrl_cmd cmd); - enum dma_status (*device_is_tx_complete)(struct dma_chan *chan, - dma_cookie_t cookie, dma_cookie_t *last, - dma_cookie_t *used); + enum dma_status (*device_tx_status)(struct dma_chan *chan, + dma_cookie_t cookie, + struct dma_tx_state *txstate); void (*device_issue_pending)(struct dma_chan *chan); }; @@ -572,7 +592,15 @@ static inline void dma_async_issue_pending(struct dma_chan *chan) static inline enum dma_status dma_async_is_tx_complete(struct dma_chan *chan, dma_cookie_t cookie, dma_cookie_t *last, dma_cookie_t *used) { - return chan->device->device_is_tx_complete(chan, cookie, last, used); + struct dma_tx_state state; + enum dma_status status; + + status = chan->device->device_tx_status(chan, cookie, &state); + if (last) + *last = state.last; + if (used) + *used = state.used; + return status; } #define dma_async_memcpy_complete(chan, cookie, last, used)\ -- cgit v1.2.3 From cc05ea0cd63437da2033b3ce6e033b1f1aaaf640 Mon Sep 17 00:00:00 2001 From: Jassi Brar Date: Tue, 4 May 2010 18:22:15 +0900 Subject: DMA ENGINE: Do not reset 'private' of channel The member 'private' of 'struct dma_chan' is meant for passing data between client and the controller driver. The DMA client driver may point it to platform specific stuff after acquiring the channel. So, it is the responsiblity of the same code to reset it, if it must. The DMA engine doesn't set it and hence, shouldn't reset it either. This reseting of private by DMA Engine comes in the way of implementing default channel settings during DMAC probe. That capability is useful for not having the clients to always provide platform specific data, like Rx/Tx FIFO addresses, which usually doesn't change across channel requests. Signed-off-by: Jassi Brar Signed-off-by: Dan Williams --- drivers/dma/dmaengine.c | 2 -- 1 file changed, 2 deletions(-) (limited to 'drivers/dma/dmaengine.c') diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c index 790caeeb4ccd..21eb896f4dfd 100644 --- a/drivers/dma/dmaengine.c +++ b/drivers/dma/dmaengine.c @@ -514,7 +514,6 @@ struct dma_chan *__dma_request_channel(dma_cap_mask_t *mask, dma_filter_fn fn, v break; if (--device->privatecnt == 0) dma_cap_clear(DMA_PRIVATE, device->cap_mask); - chan->private = NULL; chan = NULL; } } @@ -536,7 +535,6 @@ void dma_release_channel(struct dma_chan *chan) /* drop PRIVATE cap enabled by __dma_request_channel() */ if (--chan->device->privatecnt == 0) dma_cap_clear(DMA_PRIVATE, chan->device->cap_mask); - chan->private = NULL; mutex_unlock(&dma_list_mutex); } EXPORT_SYMBOL_GPL(dma_release_channel); -- cgit v1.2.3 From caa20d974c86af496b419eef70010e63b7fab7ac Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Mon, 17 May 2010 16:24:16 -0700 Subject: async_tx: trim dma_async_tx_descriptor in 'no channel switch' case Saves 24 bytes per descriptor (64-bit) when the channel-switching capabilities of async_tx are not required. Signed-off-by: Dan Williams --- crypto/async_tx/async_tx.c | 46 +++++++++++++++-------------------- drivers/dma/dmaengine.c | 16 +++++++------ include/linux/dmaengine.h | 60 ++++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 88 insertions(+), 34 deletions(-) (limited to 'drivers/dma/dmaengine.c') diff --git a/crypto/async_tx/async_tx.c b/crypto/async_tx/async_tx.c index f9cdf04fe7c0..7f2c00a45205 100644 --- a/crypto/async_tx/async_tx.c +++ b/crypto/async_tx/async_tx.c @@ -81,18 +81,13 @@ async_tx_channel_switch(struct dma_async_tx_descriptor *depend_tx, struct dma_device *device = chan->device; struct dma_async_tx_descriptor *intr_tx = (void *) ~0; - #ifdef CONFIG_ASYNC_TX_DISABLE_CHANNEL_SWITCH - BUG(); - #endif - /* first check to see if we can still append to depend_tx */ - spin_lock_bh(&depend_tx->lock); - if (depend_tx->parent && depend_tx->chan == tx->chan) { - tx->parent = depend_tx; - depend_tx->next = tx; + txd_lock(depend_tx); + if (txd_parent(depend_tx) && depend_tx->chan == tx->chan) { + txd_chain(depend_tx, tx); intr_tx = NULL; } - spin_unlock_bh(&depend_tx->lock); + txd_unlock(depend_tx); /* attached dependency, flush the parent channel */ if (!intr_tx) { @@ -111,24 +106,22 @@ async_tx_channel_switch(struct dma_async_tx_descriptor *depend_tx, if (intr_tx) { intr_tx->callback = NULL; intr_tx->callback_param = NULL; - tx->parent = intr_tx; - /* safe to set ->next outside the lock since we know we are + /* safe to chain outside the lock since we know we are * not submitted yet */ - intr_tx->next = tx; + txd_chain(intr_tx, tx); /* check if we need to append */ - spin_lock_bh(&depend_tx->lock); - if (depend_tx->parent) { - intr_tx->parent = depend_tx; - depend_tx->next = intr_tx; + txd_lock(depend_tx); + if (txd_parent(depend_tx)) { + txd_chain(depend_tx, intr_tx); async_tx_ack(intr_tx); intr_tx = NULL; } - spin_unlock_bh(&depend_tx->lock); + txd_unlock(depend_tx); if (intr_tx) { - intr_tx->parent = NULL; + txd_clear_parent(intr_tx); intr_tx->tx_submit(intr_tx); async_tx_ack(intr_tx); } @@ -176,21 +169,20 @@ async_tx_submit(struct dma_chan *chan, struct dma_async_tx_descriptor *tx, * 2/ dependencies are 1:1 i.e. two transactions can * not depend on the same parent */ - BUG_ON(async_tx_test_ack(depend_tx) || depend_tx->next || - tx->parent); + BUG_ON(async_tx_test_ack(depend_tx) || txd_next(depend_tx) || + txd_parent(tx)); /* the lock prevents async_tx_run_dependencies from missing * the setting of ->next when ->parent != NULL */ - spin_lock_bh(&depend_tx->lock); - if (depend_tx->parent) { + txd_lock(depend_tx); + if (txd_parent(depend_tx)) { /* we have a parent so we can not submit directly * if we are staying on the same channel: append * else: channel switch */ if (depend_tx->chan == chan) { - tx->parent = depend_tx; - depend_tx->next = tx; + txd_chain(depend_tx, tx); s = ASYNC_TX_SUBMITTED; } else s = ASYNC_TX_CHANNEL_SWITCH; @@ -203,7 +195,7 @@ async_tx_submit(struct dma_chan *chan, struct dma_async_tx_descriptor *tx, else s = ASYNC_TX_CHANNEL_SWITCH; } - spin_unlock_bh(&depend_tx->lock); + txd_unlock(depend_tx); switch (s) { case ASYNC_TX_SUBMITTED: @@ -212,12 +204,12 @@ async_tx_submit(struct dma_chan *chan, struct dma_async_tx_descriptor *tx, async_tx_channel_switch(depend_tx, tx); break; case ASYNC_TX_DIRECT_SUBMIT: - tx->parent = NULL; + txd_clear_parent(tx); tx->tx_submit(tx); break; } } else { - tx->parent = NULL; + txd_clear_parent(tx); tx->tx_submit(tx); } diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c index d18b5d069d7e..fcfe1a62ffa5 100644 --- a/drivers/dma/dmaengine.c +++ b/drivers/dma/dmaengine.c @@ -978,7 +978,9 @@ void dma_async_tx_descriptor_init(struct dma_async_tx_descriptor *tx, struct dma_chan *chan) { tx->chan = chan; + #ifndef CONFIG_ASYNC_TX_DISABLE_CHANNEL_SWITCH spin_lock_init(&tx->lock); + #endif } EXPORT_SYMBOL(dma_async_tx_descriptor_init); @@ -1011,7 +1013,7 @@ EXPORT_SYMBOL_GPL(dma_wait_for_async_tx); */ void dma_run_dependencies(struct dma_async_tx_descriptor *tx) { - struct dma_async_tx_descriptor *dep = tx->next; + struct dma_async_tx_descriptor *dep = txd_next(tx); struct dma_async_tx_descriptor *dep_next; struct dma_chan *chan; @@ -1019,7 +1021,7 @@ void dma_run_dependencies(struct dma_async_tx_descriptor *tx) return; /* we'll submit tx->next now, so clear the link */ - tx->next = NULL; + txd_clear_next(tx); chan = dep->chan; /* keep submitting up until a channel switch is detected @@ -1027,14 +1029,14 @@ void dma_run_dependencies(struct dma_async_tx_descriptor *tx) * processing the interrupt from async_tx_channel_switch */ for (; dep; dep = dep_next) { - spin_lock_bh(&dep->lock); - dep->parent = NULL; - dep_next = dep->next; + txd_lock(dep); + txd_clear_parent(dep); + dep_next = txd_next(dep); if (dep_next && dep_next->chan == chan) - dep->next = NULL; /* ->next will be submitted */ + txd_clear_next(dep); /* ->next will be submitted */ else dep_next = NULL; /* submit current dep and terminate */ - spin_unlock_bh(&dep->lock); + txd_unlock(dep); dep->tx_submit(dep); } diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h index 20ea12c86fd0..cb234979fc6b 100644 --- a/include/linux/dmaengine.h +++ b/include/linux/dmaengine.h @@ -230,11 +230,71 @@ struct dma_async_tx_descriptor { dma_cookie_t (*tx_submit)(struct dma_async_tx_descriptor *tx); dma_async_tx_callback callback; void *callback_param; +#ifndef CONFIG_ASYNC_TX_DISABLE_CHANNEL_SWITCH struct dma_async_tx_descriptor *next; struct dma_async_tx_descriptor *parent; spinlock_t lock; +#endif }; +#ifdef CONFIG_ASYNC_TX_DISABLE_CHANNEL_SWITCH +static inline void txd_lock(struct dma_async_tx_descriptor *txd) +{ +} +static inline void txd_unlock(struct dma_async_tx_descriptor *txd) +{ +} +static inline void txd_chain(struct dma_async_tx_descriptor *txd, struct dma_async_tx_descriptor *next) +{ + BUG(); +} +static inline void txd_clear_parent(struct dma_async_tx_descriptor *txd) +{ +} +static inline void txd_clear_next(struct dma_async_tx_descriptor *txd) +{ +} +static inline struct dma_async_tx_descriptor *txd_next(struct dma_async_tx_descriptor *txd) +{ + return NULL; +} +static inline struct dma_async_tx_descriptor *txd_parent(struct dma_async_tx_descriptor *txd) +{ + return NULL; +} + +#else +static inline void txd_lock(struct dma_async_tx_descriptor *txd) +{ + spin_lock_bh(&txd->lock); +} +static inline void txd_unlock(struct dma_async_tx_descriptor *txd) +{ + spin_unlock_bh(&txd->lock); +} +static inline void txd_chain(struct dma_async_tx_descriptor *txd, struct dma_async_tx_descriptor *next) +{ + txd->next = next; + next->parent = txd; +} +static inline void txd_clear_parent(struct dma_async_tx_descriptor *txd) +{ + txd->parent = NULL; +} +static inline void txd_clear_next(struct dma_async_tx_descriptor *txd) +{ + txd->next = NULL; +} +static inline struct dma_async_tx_descriptor *txd_parent(struct dma_async_tx_descriptor *txd) +{ + return txd->parent; +} +static inline struct dma_async_tx_descriptor *txd_next(struct dma_async_tx_descriptor *txd) +{ + return txd->next; +} +#endif + /** * struct dma_device - info on the entity supplying DMA services * @chancnt: how many DMA channels are supported -- cgit v1.2.3