From 2eca73f707b676e07cc9e2d7e3ad5cb2d099a15b Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Mon, 10 Mar 2008 10:50:13 -0700 Subject: async_tx: checkpatch says s/__FUNCTION__/__func__/g Signed-off-by: Dan Williams --- crypto/async_tx/async_memcpy.c | 6 +++--- crypto/async_tx/async_memset.c | 6 +++--- crypto/async_tx/async_tx.c | 6 +++--- crypto/async_tx/async_xor.c | 12 ++++++------ 4 files changed, 15 insertions(+), 15 deletions(-) diff --git a/crypto/async_tx/async_memcpy.c b/crypto/async_tx/async_memcpy.c index 0f6282207b32..84caa4efc0d4 100644 --- a/crypto/async_tx/async_memcpy.c +++ b/crypto/async_tx/async_memcpy.c @@ -66,11 +66,11 @@ async_memcpy(struct page *dest, struct page *src, unsigned int dest_offset, } if (tx) { - pr_debug("%s: (async) len: %zu\n", __FUNCTION__, len); + pr_debug("%s: (async) len: %zu\n", __func__, len); async_tx_submit(chan, tx, flags, depend_tx, cb_fn, cb_param); } else { void *dest_buf, *src_buf; - pr_debug("%s: (sync) len: %zu\n", __FUNCTION__, len); + pr_debug("%s: (sync) len: %zu\n", __func__, len); /* wait for any prerequisite operations */ if (depend_tx) { @@ -80,7 +80,7 @@ async_memcpy(struct page *dest, struct page *src, unsigned int dest_offset, BUG_ON(depend_tx->ack); if (dma_wait_for_async_tx(depend_tx) == DMA_ERROR) panic("%s: DMA_ERROR waiting for depend_tx\n", - __FUNCTION__); + __func__); } dest_buf = kmap_atomic(dest, KM_USER0) + dest_offset; diff --git a/crypto/async_tx/async_memset.c b/crypto/async_tx/async_memset.c index 09c0e83664bc..f5ff3906b035 100644 --- a/crypto/async_tx/async_memset.c +++ b/crypto/async_tx/async_memset.c @@ -63,11 +63,11 @@ async_memset(struct page *dest, int val, unsigned int offset, } if (tx) { - pr_debug("%s: (async) len: %zu\n", __FUNCTION__, len); + pr_debug("%s: (async) len: %zu\n", __func__, len); async_tx_submit(chan, tx, flags, depend_tx, cb_fn, cb_param); } else { /* run the memset synchronously */ void *dest_buf; - pr_debug("%s: (sync) len: %zu\n", __FUNCTION__, len); + pr_debug("%s: (sync) len: %zu\n", __func__, len); dest_buf = (void *) (((char *) page_address(dest)) + offset); @@ -79,7 +79,7 @@ async_memset(struct page *dest, int val, unsigned int offset, BUG_ON(depend_tx->ack); if (dma_wait_for_async_tx(depend_tx) == DMA_ERROR) panic("%s: DMA_ERROR waiting for depend_tx\n", - __FUNCTION__); + __func__); } memset(dest_buf, val, len); diff --git a/crypto/async_tx/async_tx.c b/crypto/async_tx/async_tx.c index 562882189de5..2be3bae89930 100644 --- a/crypto/async_tx/async_tx.c +++ b/crypto/async_tx/async_tx.c @@ -472,11 +472,11 @@ async_trigger_callback(enum async_tx_flags flags, tx = NULL; if (tx) { - pr_debug("%s: (async)\n", __FUNCTION__); + pr_debug("%s: (async)\n", __func__); async_tx_submit(chan, tx, flags, depend_tx, cb_fn, cb_param); } else { - pr_debug("%s: (sync)\n", __FUNCTION__); + pr_debug("%s: (sync)\n", __func__); /* wait for any prerequisite operations */ if (depend_tx) { @@ -486,7 +486,7 @@ async_trigger_callback(enum async_tx_flags flags, BUG_ON(depend_tx->ack); if (dma_wait_for_async_tx(depend_tx) == DMA_ERROR) panic("%s: DMA_ERROR waiting for depend_tx\n", - __FUNCTION__); + __func__); } async_tx_sync_epilog(flags, depend_tx, cb_fn, cb_param); diff --git a/crypto/async_tx/async_xor.c b/crypto/async_tx/async_xor.c index 2259a4ff15cb..7a9db353f198 100644 --- a/crypto/async_tx/async_xor.c +++ b/crypto/async_tx/async_xor.c @@ -47,7 +47,7 @@ do_async_xor(struct dma_device *device, int i; unsigned long dma_prep_flags = cb_fn ? DMA_PREP_INTERRUPT : 0; - pr_debug("%s: len: %zu\n", __FUNCTION__, len); + pr_debug("%s: len: %zu\n", __func__, len); dma_dest = dma_map_page(device->dev, dest, offset, len, DMA_FROM_DEVICE); @@ -86,7 +86,7 @@ do_sync_xor(struct page *dest, struct page **src_list, unsigned int offset, void *_dest; int i; - pr_debug("%s: len: %zu\n", __FUNCTION__, len); + pr_debug("%s: len: %zu\n", __func__, len); /* reuse the 'src_list' array to convert to buffer pointers */ for (i = 0; i < src_cnt; i++) @@ -196,7 +196,7 @@ async_xor(struct page *dest, struct page **src_list, unsigned int offset, DMA_ERROR) panic("%s: DMA_ERROR waiting for " "depend_tx\n", - __FUNCTION__); + __func__); } do_sync_xor(dest, &src_list[src_off], offset, @@ -276,7 +276,7 @@ async_xor_zero_sum(struct page *dest, struct page **src_list, unsigned long dma_prep_flags = cb_fn ? DMA_PREP_INTERRUPT : 0; int i; - pr_debug("%s: (async) len: %zu\n", __FUNCTION__, len); + pr_debug("%s: (async) len: %zu\n", __func__, len); for (i = 0; i < src_cnt; i++) dma_src[i] = dma_map_page(device->dev, src_list[i], @@ -299,7 +299,7 @@ async_xor_zero_sum(struct page *dest, struct page **src_list, } else { unsigned long xor_flags = flags; - pr_debug("%s: (sync) len: %zu\n", __FUNCTION__, len); + pr_debug("%s: (sync) len: %zu\n", __func__, len); xor_flags |= ASYNC_TX_XOR_DROP_DST; xor_flags &= ~ASYNC_TX_ACK; @@ -310,7 +310,7 @@ async_xor_zero_sum(struct page *dest, struct page **src_list, if (tx) { if (dma_wait_for_async_tx(tx) == DMA_ERROR) panic("%s: DMA_ERROR waiting for tx\n", - __FUNCTION__); + __func__); async_tx_ack(tx); } -- cgit v1.2.3 From 35083252b5688254dfea7f20146a2e1018ffc5b4 Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Mon, 10 Mar 2008 17:50:13 -0700 Subject: async_tx: fix multiple dependency submission Shrink struct dma_async_tx_descriptor and introduce async_tx_channel_switch to properly inject a channel switch interrupt in the descriptor stream. This simplifies the locking model as drivers no longer need to handle dma_async_tx_descriptor.lock. Signed-off-by: Dan Williams Acked-by: Shannon Nelson --- crypto/async_tx/async_tx.c | 197 +++++++++++++++++++++++++++++++++++++-------- drivers/dma/dmaengine.c | 2 - drivers/dma/iop-adma.c | 9 ++- include/linux/dmaengine.h | 9 +-- 4 files changed, 170 insertions(+), 47 deletions(-) diff --git a/crypto/async_tx/async_tx.c b/crypto/async_tx/async_tx.c index 2be3bae89930..69756164b61d 100644 --- a/crypto/async_tx/async_tx.c +++ b/crypto/async_tx/async_tx.c @@ -89,13 +89,19 @@ dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx) iter = tx; /* find the root of the unsubmitted dependency chain */ - while (iter->cookie == -EBUSY) { + do { parent = iter->parent; - if (parent && parent->cookie == -EBUSY) - iter = iter->parent; - else + if (!parent) break; - } + else + iter = parent; + } while (parent); + + /* there is a small window for ->parent == NULL and + * ->cookie == -EBUSY + */ + while (iter->cookie == -EBUSY) + cpu_relax(); status = dma_sync_wait(iter->chan, iter->cookie); } while (status == DMA_IN_PROGRESS || (iter != tx)); @@ -111,24 +117,33 @@ EXPORT_SYMBOL_GPL(dma_wait_for_async_tx); void async_tx_run_dependencies(struct dma_async_tx_descriptor *tx) { - struct dma_async_tx_descriptor *dep_tx, *_dep_tx; - struct dma_device *dev; + struct dma_async_tx_descriptor *next = tx->next; struct dma_chan *chan; - list_for_each_entry_safe(dep_tx, _dep_tx, &tx->depend_list, - depend_node) { - chan = dep_tx->chan; - dev = chan->device; - /* we can't depend on ourselves */ - BUG_ON(chan == tx->chan); - list_del(&dep_tx->depend_node); - tx->tx_submit(dep_tx); - - /* we need to poke the engine as client code does not - * know about dependency submission events - */ - dev->device_issue_pending(chan); + if (!next) + return; + + tx->next = NULL; + chan = next->chan; + + /* keep submitting up until a channel switch is detected + * in that case we will be called again as a result of + * processing the interrupt from async_tx_channel_switch + */ + while (next && next->chan == chan) { + struct dma_async_tx_descriptor *_next; + + spin_lock_bh(&next->lock); + next->parent = NULL; + _next = next->next; + next->next = NULL; + spin_unlock_bh(&next->lock); + + next->tx_submit(next); + next = _next; } + + chan->device->device_issue_pending(chan); } EXPORT_SYMBOL_GPL(async_tx_run_dependencies); @@ -397,6 +412,92 @@ static void __exit async_tx_exit(void) } #endif + +/** + * async_tx_channel_switch - queue an interrupt descriptor with a dependency + * pre-attached. + * @depend_tx: the operation that must finish before the new operation runs + * @tx: the new operation + */ +static void +async_tx_channel_switch(struct dma_async_tx_descriptor *depend_tx, + struct dma_async_tx_descriptor *tx) +{ + struct dma_chan *chan; + struct dma_device *device; + struct dma_async_tx_descriptor *intr_tx = (void *) ~0; + + /* first check to see if we can still append to depend_tx */ + spin_lock_bh(&depend_tx->lock); + if (depend_tx->parent && depend_tx->chan == tx->chan) { + tx->parent = depend_tx; + depend_tx->next = tx; + intr_tx = NULL; + } + spin_unlock_bh(&depend_tx->lock); + + if (!intr_tx) + return; + + chan = depend_tx->chan; + device = chan->device; + + /* see if we can schedule an interrupt + * otherwise poll for completion + */ + if (dma_has_cap(DMA_INTERRUPT, device->cap_mask)) + intr_tx = device->device_prep_dma_interrupt(chan); + else + intr_tx = NULL; + + if (intr_tx) { + intr_tx->callback = NULL; + intr_tx->callback_param = NULL; + tx->parent = intr_tx; + /* safe to set ->next outside the lock since we know we are + * not submitted yet + */ + intr_tx->next = tx; + + /* check if we need to append */ + spin_lock_bh(&depend_tx->lock); + if (depend_tx->parent) { + intr_tx->parent = depend_tx; + depend_tx->next = intr_tx; + async_tx_ack(intr_tx); + intr_tx = NULL; + } + spin_unlock_bh(&depend_tx->lock); + + if (intr_tx) { + intr_tx->parent = NULL; + intr_tx->tx_submit(intr_tx); + async_tx_ack(intr_tx); + } + } else { + if (dma_wait_for_async_tx(depend_tx) == DMA_ERROR) + panic("%s: DMA_ERROR waiting for depend_tx\n", + __func__); + tx->tx_submit(tx); + } +} + + +/** + * submit_disposition - while holding depend_tx->lock we must avoid submitting + * new operations to prevent a circular locking dependency with + * drivers that already hold a channel lock when calling + * async_tx_run_dependencies. + * @ASYNC_TX_SUBMITTED: we were able to append the new operation under the lock + * @ASYNC_TX_CHANNEL_SWITCH: when the lock is dropped schedule a channel switch + * @ASYNC_TX_DIRECT_SUBMIT: when the lock is dropped submit directly + */ +enum submit_disposition { + ASYNC_TX_SUBMITTED, + ASYNC_TX_CHANNEL_SWITCH, + ASYNC_TX_DIRECT_SUBMIT, +}; + void async_tx_submit(struct dma_chan *chan, struct dma_async_tx_descriptor *tx, enum async_tx_flags flags, struct dma_async_tx_descriptor *depend_tx, @@ -405,28 +506,54 @@ async_tx_submit(struct dma_chan *chan, struct dma_async_tx_descriptor *tx, tx->callback = cb_fn; tx->callback_param = cb_param; - /* set this new tx to run after depend_tx if: - * 1/ a dependency exists (depend_tx is !NULL) - * 2/ the tx can not be submitted to the current channel - */ - if (depend_tx && depend_tx->chan != chan) { - /* if ack is already set then we cannot be sure + if (depend_tx) { + enum submit_disposition s; + + /* sanity check the dependency chain: + * 1/ if ack is already set then we cannot be sure * we are referring to the correct operation + * 2/ dependencies are 1:1 i.e. two transactions can + * not depend on the same parent */ - BUG_ON(depend_tx->ack); + BUG_ON(depend_tx->ack || depend_tx->next || tx->parent); - tx->parent = depend_tx; + /* the lock prevents async_tx_run_dependencies from missing + * the setting of ->next when ->parent != NULL + */ spin_lock_bh(&depend_tx->lock); - list_add_tail(&tx->depend_node, &depend_tx->depend_list); - if (depend_tx->cookie == 0) { - struct dma_chan *dep_chan = depend_tx->chan; - struct dma_device *dep_dev = dep_chan->device; - dep_dev->device_dependency_added(dep_chan); + if (depend_tx->parent) { + /* we have a parent so we can not submit directly + * if we are staying on the same channel: append + * else: channel switch + */ + if (depend_tx->chan == chan) { + tx->parent = depend_tx; + depend_tx->next = tx; + s = ASYNC_TX_SUBMITTED; + } else + s = ASYNC_TX_CHANNEL_SWITCH; + } else { + /* we do not have a parent so we may be able to submit + * directly if we are staying on the same channel + */ + if (depend_tx->chan == chan) + s = ASYNC_TX_DIRECT_SUBMIT; + else + s = ASYNC_TX_CHANNEL_SWITCH; } spin_unlock_bh(&depend_tx->lock); - /* schedule an interrupt to trigger the channel switch */ - async_trigger_callback(ASYNC_TX_ACK, depend_tx, NULL, NULL); + switch (s) { + case ASYNC_TX_SUBMITTED: + break; + case ASYNC_TX_CHANNEL_SWITCH: + async_tx_channel_switch(depend_tx, tx); + break; + case ASYNC_TX_DIRECT_SUBMIT: + tx->parent = NULL; + tx->tx_submit(tx); + break; + } } else { tx->parent = NULL; tx->tx_submit(tx); diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c index 29965231b912..936978182121 100644 --- a/drivers/dma/dmaengine.c +++ b/drivers/dma/dmaengine.c @@ -600,8 +600,6 @@ void dma_async_tx_descriptor_init(struct dma_async_tx_descriptor *tx, { tx->chan = chan; spin_lock_init(&tx->lock); - INIT_LIST_HEAD(&tx->depend_node); - INIT_LIST_HEAD(&tx->depend_list); } EXPORT_SYMBOL(dma_async_tx_descriptor_init); diff --git a/drivers/dma/iop-adma.c b/drivers/dma/iop-adma.c index 3986d54492bd..a6171dada30b 100644 --- a/drivers/dma/iop-adma.c +++ b/drivers/dma/iop-adma.c @@ -63,7 +63,6 @@ iop_adma_run_tx_complete_actions(struct iop_adma_desc_slot *desc, struct iop_adma_chan *iop_chan, dma_cookie_t cookie) { BUG_ON(desc->async_tx.cookie < 0); - spin_lock_bh(&desc->async_tx.lock); if (desc->async_tx.cookie > 0) { cookie = desc->async_tx.cookie; desc->async_tx.cookie = 0; @@ -101,7 +100,6 @@ iop_adma_run_tx_complete_actions(struct iop_adma_desc_slot *desc, /* run dependent operations */ async_tx_run_dependencies(&desc->async_tx); - spin_unlock_bh(&desc->async_tx.lock); return cookie; } @@ -275,8 +273,11 @@ iop_adma_slot_cleanup(struct iop_adma_chan *iop_chan) static void iop_adma_tasklet(unsigned long data) { - struct iop_adma_chan *chan = (struct iop_adma_chan *) data; - __iop_adma_slot_cleanup(chan); + struct iop_adma_chan *iop_chan = (struct iop_adma_chan *) data; + + spin_lock(&iop_chan->lock); + __iop_adma_slot_cleanup(iop_chan); + spin_unlock(&iop_chan->lock); } static struct iop_adma_desc_slot * diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h index 261e43a4c873..f47bdbf4c09a 100644 --- a/include/linux/dmaengine.h +++ b/include/linux/dmaengine.h @@ -221,11 +221,9 @@ typedef void (*dma_async_tx_callback)(void *dma_async_param); * @callback: routine to call after this operation is complete * @callback_param: general parameter to pass to the callback routine * ---async_tx api specific fields--- - * @depend_list: at completion this list of transactions are submitted - * @depend_node: allow this transaction to be executed after another - * transaction has completed, possibly on another channel + * @next: at completion submit this descriptor * @parent: pointer to the next level up in the dependency chain - * @lock: protect the dependency list + * @lock: protect the parent and next pointers */ struct dma_async_tx_descriptor { dma_cookie_t cookie; @@ -236,8 +234,7 @@ struct dma_async_tx_descriptor { dma_cookie_t (*tx_submit)(struct dma_async_tx_descriptor *tx); dma_async_tx_callback callback; void *callback_param; - struct list_head depend_list; - struct list_head depend_node; + struct dma_async_tx_descriptor *next; struct dma_async_tx_descriptor *parent; spinlock_t lock; }; -- cgit v1.2.3 From 71bcf415903aefb57757b598a848730a534ff0e9 Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Mon, 10 Mar 2008 17:50:13 -0700 Subject: async_tx: kill ->device_dependency_added DMA drivers no longer need to be notified of dependency submission events as async_tx_run_dependencies and async_tx_channel_switch will handle the scheduling and execution of dependent operations. [sfr@canb.auug.org.au: extend this for fsldma] Signed-off-by: Dan Williams Acked-by: Shannon Nelson --- drivers/dma/dmaengine.c | 1 - drivers/dma/fsldma.c | 8 -------- drivers/dma/ioat_dma.c | 12 ------------ drivers/dma/iop-adma.c | 7 ------- include/linux/dmaengine.h | 2 -- 5 files changed, 30 deletions(-) diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c index 936978182121..5ca0d947462d 100644 --- a/drivers/dma/dmaengine.c +++ b/drivers/dma/dmaengine.c @@ -362,7 +362,6 @@ int dma_async_device_register(struct dma_device *device) BUG_ON(!device->device_alloc_chan_resources); BUG_ON(!device->device_free_chan_resources); - BUG_ON(!device->device_dependency_added); BUG_ON(!device->device_is_tx_complete); BUG_ON(!device->device_issue_pending); BUG_ON(!device->dev); diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c index cc9a68158d99..5f72c4a74f09 100644 --- a/drivers/dma/fsldma.c +++ b/drivers/dma/fsldma.c @@ -625,13 +625,6 @@ static void fsl_dma_memcpy_issue_pending(struct dma_chan *chan) fsl_chan_xfer_ld_queue(fsl_chan); } -static void fsl_dma_dependency_added(struct dma_chan *chan) -{ - struct fsl_dma_chan *fsl_chan = to_fsl_chan(chan); - - fsl_chan_ld_cleanup(fsl_chan); -} - /** * fsl_dma_is_complete - Determine the DMA status * @fsl_chan : Freescale DMA channel @@ -1020,7 +1013,6 @@ static int __devinit of_fsl_dma_probe(struct of_device *dev, fdev->common.device_prep_dma_memcpy = fsl_dma_prep_memcpy; fdev->common.device_is_tx_complete = fsl_dma_is_complete; fdev->common.device_issue_pending = fsl_dma_memcpy_issue_pending; - fdev->common.device_dependency_added = fsl_dma_dependency_added; fdev->common.dev = &dev->dev; irq = irq_of_parse_and_map(dev->node, 0); diff --git a/drivers/dma/ioat_dma.c b/drivers/dma/ioat_dma.c index 4017d9e7acd2..1517fe4e2d14 100644 --- a/drivers/dma/ioat_dma.c +++ b/drivers/dma/ioat_dma.c @@ -924,17 +924,6 @@ static void ioat_dma_memcpy_cleanup(struct ioat_dma_chan *ioat_chan) spin_unlock_bh(&ioat_chan->cleanup_lock); } -static void ioat_dma_dependency_added(struct dma_chan *chan) -{ - struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan); - spin_lock_bh(&ioat_chan->desc_lock); - if (ioat_chan->pending == 0) { - spin_unlock_bh(&ioat_chan->desc_lock); - ioat_dma_memcpy_cleanup(ioat_chan); - } else - spin_unlock_bh(&ioat_chan->desc_lock); -} - /** * ioat_dma_is_complete - poll the status of a IOAT DMA transaction * @chan: IOAT DMA channel handle @@ -1316,7 +1305,6 @@ struct ioatdma_device *ioat_dma_probe(struct pci_dev *pdev, dma_cap_set(DMA_MEMCPY, device->common.cap_mask); device->common.device_is_tx_complete = ioat_dma_is_complete; - device->common.device_dependency_added = ioat_dma_dependency_added; switch (device->version) { case IOAT_VER_1_2: device->common.device_prep_dma_memcpy = ioat1_dma_prep_memcpy; diff --git a/drivers/dma/iop-adma.c b/drivers/dma/iop-adma.c index a6171dada30b..1cb42840e202 100644 --- a/drivers/dma/iop-adma.c +++ b/drivers/dma/iop-adma.c @@ -672,12 +672,6 @@ iop_adma_prep_dma_zero_sum(struct dma_chan *chan, dma_addr_t *dma_src, return sw_desc ? &sw_desc->async_tx : NULL; } -static void iop_adma_dependency_added(struct dma_chan *chan) -{ - struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan); - tasklet_schedule(&iop_chan->irq_tasklet); -} - static void iop_adma_free_chan_resources(struct dma_chan *chan) { struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan); @@ -1178,7 +1172,6 @@ static int __devinit iop_adma_probe(struct platform_device *pdev) dma_dev->device_free_chan_resources = iop_adma_free_chan_resources; dma_dev->device_is_tx_complete = iop_adma_is_complete; dma_dev->device_issue_pending = iop_adma_issue_pending; - dma_dev->device_dependency_added = iop_adma_dependency_added; dma_dev->dev = &pdev->dev; /* set prep routines based on capability */ diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h index f47bdbf4c09a..4e413c76d440 100644 --- a/include/linux/dmaengine.h +++ b/include/linux/dmaengine.h @@ -258,7 +258,6 @@ struct dma_async_tx_descriptor { * @device_prep_dma_zero_sum: prepares a zero_sum operation * @device_prep_dma_memset: prepares a memset operation * @device_prep_dma_interrupt: prepares an end of chain interrupt operation - * @device_dependency_added: async_tx notifies the channel about new deps * @device_issue_pending: push pending transactions to hardware */ struct dma_device { @@ -293,7 +292,6 @@ struct dma_device { struct dma_async_tx_descriptor *(*device_prep_dma_interrupt)( struct dma_chan *chan); - void (*device_dependency_added)(struct dma_chan *chan); enum dma_status (*device_is_tx_complete)(struct dma_chan *chan, dma_cookie_t cookie, dma_cookie_t *last, dma_cookie_t *used); -- cgit v1.2.3 From 3202c7323959701725fcb08a518aa2f4330e11f0 Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Mon, 10 Mar 2008 17:50:14 -0700 Subject: iop-adma: remove the workaround for missed interrupts on iop3xx This workaround was covering the dependency submission bug in async_tx. Signed-off-by: Dan Williams --- drivers/dma/iop-adma.c | 5 ----- include/asm-arm/arch-iop13xx/adma.h | 5 ----- include/asm-arm/hardware/iop3xx-adma.h | 8 -------- include/asm-arm/hardware/iop_adma.h | 2 -- 4 files changed, 20 deletions(-) diff --git a/drivers/dma/iop-adma.c b/drivers/dma/iop-adma.c index 1cb42840e202..821bd1775796 100644 --- a/drivers/dma/iop-adma.c +++ b/drivers/dma/iop-adma.c @@ -255,8 +255,6 @@ static void __iop_adma_slot_cleanup(struct iop_adma_chan *iop_chan) BUG_ON(!seen_current); - iop_chan_idle(busy, iop_chan); - if (cookie > 0) { iop_chan->completed_cookie = cookie; pr_debug("\tcompleted cookie %d\n", cookie); @@ -1226,9 +1224,6 @@ static int __devinit iop_adma_probe(struct platform_device *pdev) } spin_lock_init(&iop_chan->lock); - init_timer(&iop_chan->cleanup_watchdog); - iop_chan->cleanup_watchdog.data = (unsigned long) iop_chan; - iop_chan->cleanup_watchdog.function = iop_adma_tasklet; INIT_LIST_HEAD(&iop_chan->chain); INIT_LIST_HEAD(&iop_chan->all_slots); INIT_RCU_HEAD(&iop_chan->common.rcu); diff --git a/include/asm-arm/arch-iop13xx/adma.h b/include/asm-arm/arch-iop13xx/adma.h index efd9a5eb1008..90d14ee564f5 100644 --- a/include/asm-arm/arch-iop13xx/adma.h +++ b/include/asm-arm/arch-iop13xx/adma.h @@ -454,11 +454,6 @@ static inline void iop_chan_append(struct iop_adma_chan *chan) __raw_writel(adma_accr, ADMA_ACCR(chan)); } -static inline void iop_chan_idle(int busy, struct iop_adma_chan *chan) -{ - do { } while (0); -} - static inline u32 iop_chan_get_status(struct iop_adma_chan *chan) { return __raw_readl(ADMA_ACSR(chan)); diff --git a/include/asm-arm/hardware/iop3xx-adma.h b/include/asm-arm/hardware/iop3xx-adma.h index 5c529e6a5e3b..84d635b0a71a 100644 --- a/include/asm-arm/hardware/iop3xx-adma.h +++ b/include/asm-arm/hardware/iop3xx-adma.h @@ -767,20 +767,12 @@ static inline int iop_desc_get_zero_result(struct iop_adma_desc_slot *desc) static inline void iop_chan_append(struct iop_adma_chan *chan) { u32 dma_chan_ctrl; - /* workaround dropped interrupts on 3xx */ - mod_timer(&chan->cleanup_watchdog, jiffies + msecs_to_jiffies(3)); dma_chan_ctrl = __raw_readl(DMA_CCR(chan)); dma_chan_ctrl |= 0x2; __raw_writel(dma_chan_ctrl, DMA_CCR(chan)); } -static inline void iop_chan_idle(int busy, struct iop_adma_chan *chan) -{ - if (!busy) - del_timer(&chan->cleanup_watchdog); -} - static inline u32 iop_chan_get_status(struct iop_adma_chan *chan) { return __raw_readl(DMA_CSR(chan)); diff --git a/include/asm-arm/hardware/iop_adma.h b/include/asm-arm/hardware/iop_adma.h index ca8e71f44346..cb7e3611bcba 100644 --- a/include/asm-arm/hardware/iop_adma.h +++ b/include/asm-arm/hardware/iop_adma.h @@ -51,7 +51,6 @@ struct iop_adma_device { * @common: common dmaengine channel object members * @last_used: place holder for allocation to continue from where it left off * @all_slots: complete domain of slots usable by the channel - * @cleanup_watchdog: workaround missed interrupts on iop3xx * @slots_allocated: records the actual size of the descriptor slot pool * @irq_tasklet: bottom half where iop_adma_slot_cleanup runs */ @@ -65,7 +64,6 @@ struct iop_adma_chan { struct dma_chan common; struct iop_adma_desc_slot *last_used; struct list_head all_slots; - struct timer_list cleanup_watchdog; int slots_allocated; struct tasklet_struct irq_tasklet; }; -- cgit v1.2.3 From 7f1c6955958a5ecd0a05d342a63cd9864077bab3 Mon Sep 17 00:00:00 2001 From: Harvey Harrison Date: Mon, 10 Mar 2008 17:50:14 -0700 Subject: iop-adma.c: replace remaining __FUNCTION__ occurrences __FUNCTION__ is gcc-specific, use __func__ Signed-off-by: Harvey Harrison Signed-off-by: Dan Williams --- drivers/dma/iop-adma.c | 32 ++++++++++++++++---------------- 1 file changed, 16 insertions(+), 16 deletions(-) diff --git a/drivers/dma/iop-adma.c b/drivers/dma/iop-adma.c index 821bd1775796..93252294f32b 100644 --- a/drivers/dma/iop-adma.c +++ b/drivers/dma/iop-adma.c @@ -138,7 +138,7 @@ static void __iop_adma_slot_cleanup(struct iop_adma_chan *iop_chan) int busy = iop_chan_is_busy(iop_chan); int seen_current = 0, slot_cnt = 0, slots_per_op = 0; - dev_dbg(iop_chan->device->common.dev, "%s\n", __FUNCTION__); + dev_dbg(iop_chan->device->common.dev, "%s\n", __func__); /* free completed slots from the chain starting with * the oldest descriptor */ @@ -437,7 +437,7 @@ iop_adma_tx_submit(struct dma_async_tx_descriptor *tx) spin_unlock_bh(&iop_chan->lock); dev_dbg(iop_chan->device->common.dev, "%s cookie: %d slot: %d\n", - __FUNCTION__, sw_desc->async_tx.cookie, sw_desc->idx); + __func__, sw_desc->async_tx.cookie, sw_desc->idx); return cookie; } @@ -519,7 +519,7 @@ iop_adma_prep_dma_interrupt(struct dma_chan *chan) struct iop_adma_desc_slot *sw_desc, *grp_start; int slot_cnt, slots_per_op; - dev_dbg(iop_chan->device->common.dev, "%s\n", __FUNCTION__); + dev_dbg(iop_chan->device->common.dev, "%s\n", __func__); spin_lock_bh(&iop_chan->lock); slot_cnt = iop_chan_interrupt_slot_count(&slots_per_op, iop_chan); @@ -547,7 +547,7 @@ iop_adma_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dma_dest, BUG_ON(unlikely(len > IOP_ADMA_MAX_BYTE_COUNT)); dev_dbg(iop_chan->device->common.dev, "%s len: %u\n", - __FUNCTION__, len); + __func__, len); spin_lock_bh(&iop_chan->lock); slot_cnt = iop_chan_memcpy_slot_count(len, &slots_per_op); @@ -579,7 +579,7 @@ iop_adma_prep_dma_memset(struct dma_chan *chan, dma_addr_t dma_dest, BUG_ON(unlikely(len > IOP_ADMA_MAX_BYTE_COUNT)); dev_dbg(iop_chan->device->common.dev, "%s len: %u\n", - __FUNCTION__, len); + __func__, len); spin_lock_bh(&iop_chan->lock); slot_cnt = iop_chan_memset_slot_count(len, &slots_per_op); @@ -613,7 +613,7 @@ iop_adma_prep_dma_xor(struct dma_chan *chan, dma_addr_t dma_dest, dev_dbg(iop_chan->device->common.dev, "%s src_cnt: %d len: %u flags: %lx\n", - __FUNCTION__, src_cnt, len, flags); + __func__, src_cnt, len, flags); spin_lock_bh(&iop_chan->lock); slot_cnt = iop_chan_xor_slot_count(len, src_cnt, &slots_per_op); @@ -647,7 +647,7 @@ iop_adma_prep_dma_zero_sum(struct dma_chan *chan, dma_addr_t *dma_src, return NULL; dev_dbg(iop_chan->device->common.dev, "%s src_cnt: %d len: %u\n", - __FUNCTION__, src_cnt, len); + __func__, src_cnt, len); spin_lock_bh(&iop_chan->lock); slot_cnt = iop_chan_zero_sum_slot_count(len, src_cnt, &slots_per_op); @@ -658,7 +658,7 @@ iop_adma_prep_dma_zero_sum(struct dma_chan *chan, dma_addr_t *dma_src, iop_desc_set_zero_sum_byte_count(grp_start, len); grp_start->xor_check_result = result; pr_debug("\t%s: grp_start->xor_check_result: %p\n", - __FUNCTION__, grp_start->xor_check_result); + __func__, grp_start->xor_check_result); sw_desc->unmap_src_cnt = src_cnt; sw_desc->unmap_len = len; while (src_cnt--) @@ -693,7 +693,7 @@ static void iop_adma_free_chan_resources(struct dma_chan *chan) iop_chan->last_used = NULL; dev_dbg(iop_chan->device->common.dev, "%s slots_allocated %d\n", - __FUNCTION__, iop_chan->slots_allocated); + __func__, iop_chan->slots_allocated); spin_unlock_bh(&iop_chan->lock); /* one is ok since we left it on there on purpose */ @@ -746,7 +746,7 @@ static irqreturn_t iop_adma_eot_handler(int irq, void *data) { struct iop_adma_chan *chan = data; - dev_dbg(chan->device->common.dev, "%s\n", __FUNCTION__); + dev_dbg(chan->device->common.dev, "%s\n", __func__); tasklet_schedule(&chan->irq_tasklet); @@ -759,7 +759,7 @@ static irqreturn_t iop_adma_eoc_handler(int irq, void *data) { struct iop_adma_chan *chan = data; - dev_dbg(chan->device->common.dev, "%s\n", __FUNCTION__); + dev_dbg(chan->device->common.dev, "%s\n", __func__); tasklet_schedule(&chan->irq_tasklet); @@ -816,7 +816,7 @@ static int __devinit iop_adma_memcpy_self_test(struct iop_adma_device *device) int err = 0; struct iop_adma_chan *iop_chan; - dev_dbg(device->common.dev, "%s\n", __FUNCTION__); + dev_dbg(device->common.dev, "%s\n", __func__); src = kzalloc(sizeof(u8) * IOP_ADMA_TEST_SIZE, GFP_KERNEL); if (!src) @@ -899,7 +899,7 @@ iop_adma_xor_zero_sum_self_test(struct iop_adma_device *device) int err = 0; struct iop_adma_chan *iop_chan; - dev_dbg(device->common.dev, "%s\n", __FUNCTION__); + dev_dbg(device->common.dev, "%s\n", __func__); for (src_idx = 0; src_idx < IOP_ADMA_NUM_SRC_TEST; src_idx++) { xor_srcs[src_idx] = alloc_page(GFP_KERNEL); @@ -1152,7 +1152,7 @@ static int __devinit iop_adma_probe(struct platform_device *pdev) } dev_dbg(&pdev->dev, "%s: allocted descriptor pool virt %p phys %p\n", - __FUNCTION__, adev->dma_desc_pool_virt, + __func__, adev->dma_desc_pool_virt, (void *) adev->dma_desc_pool); adev->id = plat_data->hw_id; @@ -1278,7 +1278,7 @@ static void iop_chan_start_null_memcpy(struct iop_adma_chan *iop_chan) dma_cookie_t cookie; int slot_cnt, slots_per_op; - dev_dbg(iop_chan->device->common.dev, "%s\n", __FUNCTION__); + dev_dbg(iop_chan->device->common.dev, "%s\n", __func__); spin_lock_bh(&iop_chan->lock); slot_cnt = iop_chan_memcpy_slot_count(0, &slots_per_op); @@ -1335,7 +1335,7 @@ static void iop_chan_start_null_xor(struct iop_adma_chan *iop_chan) dma_cookie_t cookie; int slot_cnt, slots_per_op; - dev_dbg(iop_chan->device->common.dev, "%s\n", __FUNCTION__); + dev_dbg(iop_chan->device->common.dev, "%s\n", __func__); spin_lock_bh(&iop_chan->lock); slot_cnt = iop_chan_xor_slot_count(0, 2, &slots_per_op); -- cgit v1.2.3 From 227a5acda4cd1caa799d74c6d16d88d34f3cd5b1 Mon Sep 17 00:00:00 2001 From: Zhang Wei Date: Mon, 10 Mar 2008 18:34:05 -0700 Subject: dma: Fix fsldma.c warning messages when it's compiled under PPC64. There are warning messages reported by Stephen Rothwell with ARCH=powerpc allmodconfig build: drivers/dma/fsldma.c: In function 'fsl_dma_prep_memcpy': drivers/dma/fsldma.c:439: warning: comparison of distinct pointer types lacks a cast drivers/dma/fsldma.c: In function 'fsl_chan_xfer_ld_queue': drivers/dma/fsldma.c:584: warning: format '%016llx' expects type 'long long unsigned int', but argument 4 has type 'dma_addr_t' drivers/dma/fsldma.c: In function 'fsl_dma_chan_do_interrupt': drivers/dma/fsldma.c:668: warning: format '%x' expects type 'unsigned int', but argument 5 has type 'dma_addr_t' drivers/dma/fsldma.c:684: warning: format '%016llx' expects type 'long long unsigned int', but argument 4 has type 'dma_addr_t' drivers/dma/fsldma.c:684: warning: format '%016llx' expects type 'long long unsigned int', but argument 5 has type 'dma_addr_t' drivers/dma/fsldma.c:701: warning: format '%02x' expects type 'unsigned int', but argument 4 has type 'dma_addr_t' drivers/dma/fsldma.c: In function 'fsl_dma_self_test': drivers/dma/fsldma.c:840: warning: format '%d' expects type 'int', but argument 5 has type 'size_t' drivers/dma/fsldma.c: In function 'of_fsl_dma_probe': drivers/dma/fsldma.c:1010: warning: format '%08x' expects type 'unsigned int', but argument 5 has type 'resource_size_t' This patch fixed the above warning messages. Signed-off-by: Zhang Wei Signed-off-by: Dan Williams --- drivers/dma/fsldma.c | 29 ++++++++++++++++------------- 1 file changed, 16 insertions(+), 13 deletions(-) diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c index 5f72c4a74f09..9ee8f5c352b8 100644 --- a/drivers/dma/fsldma.c +++ b/drivers/dma/fsldma.c @@ -57,12 +57,12 @@ static void dma_init(struct fsl_dma_chan *fsl_chan) } -static void set_sr(struct fsl_dma_chan *fsl_chan, dma_addr_t val) +static void set_sr(struct fsl_dma_chan *fsl_chan, u32 val) { DMA_OUT(fsl_chan, &fsl_chan->reg_base->sr, val, 32); } -static dma_addr_t get_sr(struct fsl_dma_chan *fsl_chan) +static u32 get_sr(struct fsl_dma_chan *fsl_chan) { return DMA_IN(fsl_chan, &fsl_chan->reg_base->sr, 32); } @@ -436,7 +436,7 @@ static struct dma_async_tx_descriptor *fsl_dma_prep_memcpy( dev_dbg(fsl_chan->dev, "new link desc alloc %p\n", new); #endif - copy = min(len, FSL_DMA_BCR_MAX_CNT); + copy = min(len, (size_t)FSL_DMA_BCR_MAX_CNT); set_desc_cnt(fsl_chan, &new->hw, copy); set_desc_src(fsl_chan, &new->hw, dma_src); @@ -581,8 +581,8 @@ static void fsl_chan_xfer_ld_queue(struct fsl_dma_chan *fsl_chan) if (ld_node != &fsl_chan->ld_queue) { /* Get the ld start address from ld_queue */ next_dest_addr = to_fsl_desc(ld_node)->async_tx.phys; - dev_dbg(fsl_chan->dev, "xfer LDs staring from 0x%016llx\n", - (u64)next_dest_addr); + dev_dbg(fsl_chan->dev, "xfer LDs staring from %p\n", + (void *)next_dest_addr); set_cdar(fsl_chan, next_dest_addr); dma_start(fsl_chan); } else { @@ -655,7 +655,7 @@ static enum dma_status fsl_dma_is_complete(struct dma_chan *chan, static irqreturn_t fsl_dma_chan_do_interrupt(int irq, void *data) { struct fsl_dma_chan *fsl_chan = (struct fsl_dma_chan *)data; - dma_addr_t stat; + u32 stat; stat = get_sr(fsl_chan); dev_dbg(fsl_chan->dev, "event: channel %d, stat = 0x%x\n", @@ -674,9 +674,8 @@ static irqreturn_t fsl_dma_chan_do_interrupt(int irq, void *data) */ if (stat & FSL_DMA_SR_EOSI) { dev_dbg(fsl_chan->dev, "event: End-of-segments INT\n"); - dev_dbg(fsl_chan->dev, "event: clndar 0x%016llx, " - "nlndar 0x%016llx\n", (u64)get_cdar(fsl_chan), - (u64)get_ndar(fsl_chan)); + dev_dbg(fsl_chan->dev, "event: clndar %p, nlndar %p\n", + (void *)get_cdar(fsl_chan), (void *)get_ndar(fsl_chan)); stat &= ~FSL_DMA_SR_EOSI; } @@ -719,12 +718,15 @@ static void dma_do_tasklet(unsigned long data) fsl_chan_ld_cleanup(fsl_chan); } +#ifdef FSL_DMA_CALLBACKTEST static void fsl_dma_callback_test(struct fsl_dma_chan *fsl_chan) { if (fsl_chan) dev_info(fsl_chan->dev, "selftest: callback is ok!\n"); } +#endif +#ifdef CONFIG_FSL_DMA_SELFTEST static int fsl_dma_self_test(struct fsl_dma_chan *fsl_chan) { struct dma_chan *chan; @@ -830,9 +832,9 @@ static int fsl_dma_self_test(struct fsl_dma_chan *fsl_chan) if (err) { for (i = 0; (*(src + i) == *(dest + i)) && (i < test_size); i++); - dev_err(fsl_chan->dev, "selftest: Test failed, data %d/%d is " + dev_err(fsl_chan->dev, "selftest: Test failed, data %d/%ld is " "error! src 0x%x, dest 0x%x\n", - i, test_size, *(src + i), *(dest + i)); + i, (long)test_size, *(src + i), *(dest + i)); } free_resources: @@ -841,6 +843,7 @@ out: kfree(src); return err; } +#endif static int __devinit of_fsl_dma_chan_probe(struct of_device *dev, const struct of_device_id *match) @@ -1001,8 +1004,8 @@ static int __devinit of_fsl_dma_probe(struct of_device *dev, } dev_info(&dev->dev, "Probe the Freescale DMA driver for %s " - "controller at 0x%08x...\n", - match->compatible, fdev->reg.start); + "controller at %p...\n", + match->compatible, (void *)fdev->reg.start); fdev->reg_base = ioremap(fdev->reg.start, fdev->reg.end - fdev->reg.start + 1); -- cgit v1.2.3