summaryrefslogtreecommitdiff
path: root/drivers/dma
diff options
context:
space:
mode:
authorEric Pilmore <epilmore@gigaio.com>2023-01-18 19:39:08 -0800
committerVinod Koul <vkoul@kernel.org>2023-01-19 18:59:34 +0530
commit95e5fda3b5f9ed8239b145da3fa01e641cf5d53c (patch)
treeafce5d3b1023c71cdefca5c03800da9d9adfac93 /drivers/dma
parenta7a7ee6f5a019ad72852c001abbce50d35e992f2 (diff)
ptdma: pt_core_execute_cmd() should use spinlock
The interrupt handler (pt_core_irq_handler()) of the ptdma driver can be called from interrupt context. The code flow in this function can lead down to pt_core_execute_cmd() which will attempt to grab a mutex, which is not appropriate in interrupt context and ultimately leads to a kernel panic. The fix here changes this mutex to a spinlock, which has been verified to resolve the issue. Fixes: fa5d823b16a9 ("dmaengine: ptdma: Initial driver for the AMD PTDMA") Signed-off-by: Eric Pilmore <epilmore@gigaio.com> Link: https://lore.kernel.org/r/20230119033907.35071-1-epilmore@gigaio.com Signed-off-by: Vinod Koul <vkoul@kernel.org>
Diffstat (limited to 'drivers/dma')
-rw-r--r--drivers/dma/ptdma/ptdma-dev.c7
-rw-r--r--drivers/dma/ptdma/ptdma.h2
2 files changed, 5 insertions, 4 deletions
diff --git a/drivers/dma/ptdma/ptdma-dev.c b/drivers/dma/ptdma/ptdma-dev.c
index 377da23012ac..a2bf13ff18b6 100644
--- a/drivers/dma/ptdma/ptdma-dev.c
+++ b/drivers/dma/ptdma/ptdma-dev.c
@@ -71,12 +71,13 @@ static int pt_core_execute_cmd(struct ptdma_desc *desc, struct pt_cmd_queue *cmd
bool soc = FIELD_GET(DWORD0_SOC, desc->dw0);
u8 *q_desc = (u8 *)&cmd_q->qbase[cmd_q->qidx];
u32 tail;
+ unsigned long flags;
if (soc) {
desc->dw0 |= FIELD_PREP(DWORD0_IOC, desc->dw0);
desc->dw0 &= ~DWORD0_SOC;
}
- mutex_lock(&cmd_q->q_mutex);
+ spin_lock_irqsave(&cmd_q->q_lock, flags);
/* Copy 32-byte command descriptor to hw queue. */
memcpy(q_desc, desc, 32);
@@ -91,7 +92,7 @@ static int pt_core_execute_cmd(struct ptdma_desc *desc, struct pt_cmd_queue *cmd
/* Turn the queue back on using our cached control register */
pt_start_queue(cmd_q);
- mutex_unlock(&cmd_q->q_mutex);
+ spin_unlock_irqrestore(&cmd_q->q_lock, flags);
return 0;
}
@@ -199,7 +200,7 @@ int pt_core_init(struct pt_device *pt)
cmd_q->pt = pt;
cmd_q->dma_pool = dma_pool;
- mutex_init(&cmd_q->q_mutex);
+ spin_lock_init(&cmd_q->q_lock);
/* Page alignment satisfies our needs for N <= 128 */
cmd_q->qsize = Q_SIZE(Q_DESC_SIZE);
diff --git a/drivers/dma/ptdma/ptdma.h b/drivers/dma/ptdma/ptdma.h
index d093c43b7d13..21b4bf895200 100644
--- a/drivers/dma/ptdma/ptdma.h
+++ b/drivers/dma/ptdma/ptdma.h
@@ -196,7 +196,7 @@ struct pt_cmd_queue {
struct ptdma_desc *qbase;
/* Aligned queue start address (per requirement) */
- struct mutex q_mutex ____cacheline_aligned;
+ spinlock_t q_lock ____cacheline_aligned;
unsigned int qidx;
unsigned int qsize;