[Sound-open-firmware] [PATCH v2] dw-dma: add support for interrupt per channel
On Apollolake, the interrupt number for different channels of the same controller are different, here add implementation of it: register interrupt handler for each channel, and don't need check channel in its specific handler anymore.
Signed-off-by: Keyon Jie yang.jie@linux.intel.com --- Update in v2: Fixed checkpatch.pl issues
Tested on GP-MRB, SOF Master: f7beb51118e6e8463a864b9416c773a508930e06, SOF Tool Master: 59d81995f682876bd34f939332e8838c76f714ec, https://github.com/plbossart/sound/tree/topic/sof-v4.14: 5a91e6776d41b0e97828882294cdc00b5c0bafd6
src/drivers/dw-dma.c | 247 +++++++++++++++++++++++++++++++++++++------------ src/include/reef/dma.h | 6 ++ 2 files changed, 195 insertions(+), 58 deletions(-)
diff --git a/src/drivers/dw-dma.c b/src/drivers/dw-dma.c index 5501f8e..7a1805e 100644 --- a/src/drivers/dw-dma.c +++ b/src/drivers/dw-dma.c @@ -809,6 +809,194 @@ static inline void dw_dma_chan_reload_next(struct dma *dma, int channel, dw_write(dma, DW_DMA_CHAN_EN, CHAN_ENABLE(channel)); }
+static void dw_dma_setup(struct dma *dma) +{ + struct dw_drv_plat_data *dp = dma->plat_data.drv_plat_data; + int i; + + /* we cannot config DMAC if DMAC has been already enabled by host */ + if (dw_read(dma, DW_DMA_CFG) != 0) + dw_write(dma, DW_DMA_CFG, 0x0); + + /* now check that it's 0 */ + for (i = DW_DMA_CFG_TRIES; i > 0; i--) { + if (dw_read(dma, DW_DMA_CFG) == 0) + goto found; + } + trace_dma_error("eDs"); + return; + +found: + for (i = 0; i < DW_MAX_CHAN; i++) + dw_read(dma, DW_DMA_CHAN_EN); + +#ifdef HAVE_HDDA + /* enable HDDA before DMAC */ + shim_write(SHIM_HMDC, SHIM_HMDC_HDDA_ALLCH); +#endif + + /* enable the DMA controller */ + dw_write(dma, DW_DMA_CFG, 1); + + /* mask all interrupts for all 8 channels */ + dw_write(dma, DW_MASK_TFR, INT_MASK_ALL); + dw_write(dma, DW_MASK_BLOCK, INT_MASK_ALL); + dw_write(dma, DW_MASK_SRC_TRAN, INT_MASK_ALL); + dw_write(dma, DW_MASK_DST_TRAN, INT_MASK_ALL); + dw_write(dma, DW_MASK_ERR, INT_MASK_ALL); + +#ifdef DW_FIFO_PARTITION + /* TODO: we cannot config DMA FIFOs if DMAC has been already */ + /* allocate FIFO partitions, 128 bytes for each ch */ + dw_write(dma, DW_FIFO_PART1_LO, 0x100080); + dw_write(dma, DW_FIFO_PART1_HI, 0x100080); + dw_write(dma, DW_FIFO_PART0_HI, 0x100080); + dw_write(dma, DW_FIFO_PART0_LO, 0x100080 | (1 << 26)); + dw_write(dma, DW_FIFO_PART0_LO, 0x100080); +#endif + + /* set channel priorities */ + for (i = 0; i < DW_MAX_CHAN; i++) { +#if defined CONFIG_BAYTRAIL || defined CONFIG_CHERRYTRAIL ||\ + defined CONFIG_APOLLOLAKE || defined CONFIG_CANNONLAKE + dw_write(dma, DW_CTRL_HIGH(i), + DW_CTLH_CLASS(dp->chan[i].class)); +#elif defined CONFIG_BROADWELL || defined CONFIG_HASWELL + dw_write(dma, DW_CFG_LOW(i), + DW_CFG_CLASS(dp->chan[i].class)); +#endif + } +} + +#ifdef CONFIG_APOLLOLAKE +/* external layer 2 interrupt for dmac */ +static void dw_dma_irq_handler(void *data) +{ + struct dma_int *dma_int = (struct dma_int *)data; + struct dma *dma = dma_int->dma; + struct dma_pdata *p = dma_get_drvdata(dma); + struct dma_sg_elem next; + uint32_t status_tfr = 0, status_block = 0, status_err = 0, status_intr; + uint32_t mask; + int i = dma_int->channel; + + status_intr = dw_read(dma, DW_INTR_STATUS); + if (!status_intr) + trace_dma_error("eDI"); + + trace_dma("irq"); + trace_value(status_intr); + + /* get the source of our IRQ. */ + status_block = dw_read(dma, DW_STATUS_BLOCK); + status_tfr = dw_read(dma, DW_STATUS_TFR); + + /* TODO: handle errors, just clear them atm */ + status_err = dw_read(dma, DW_STATUS_ERR); + if (status_err) { + trace_dma_error("eDi"); + dw_write(dma, DW_CLEAR_ERR, status_err & i); + } + + /* clear interrupts for channel*/ + dw_write(dma, DW_CLEAR_BLOCK, status_block); + dw_write(dma, DW_CLEAR_TFR, status_tfr); + + /* skip if channel is not running */ + if (p->chan[i].status != COMP_STATE_ACTIVE) { + trace_dma_error("eDs"); + return; + } + + mask = 0x1 << i; + +#if DW_USE_HW_LLI + /* end of a LLI block */ + if (status_block & mask && + p->chan[i].cb_type & DMA_IRQ_TYPE_BLOCK) { + next.src = DMA_RELOAD_LLI; + next.dest = DMA_RELOAD_LLI; + /* will reload lli by default */ + next.size = DMA_RELOAD_LLI; + p->chan[i].cb(p->chan[i].cb_data, + DMA_IRQ_TYPE_BLOCK, &next); + } +#endif + /* end of a transfer */ + if ((status_tfr & mask) && + (p->chan[i].cb_type & DMA_IRQ_TYPE_LLIST)) { + trace_value(status_tfr); + + next.src = DMA_RELOAD_LLI; + next.dest = DMA_RELOAD_LLI; + next.size = DMA_RELOAD_LLI; /* will reload lli by default */ + if (p->chan[i].cb) + p->chan[i].cb(p->chan[i].cb_data, + DMA_IRQ_TYPE_LLIST, &next); + + /* check for reload channel: + * next.size is DMA_RELOAD_END, stop this dma copy; + * next.size > 0 but not DMA_RELOAD_LLI, use next + * element for next copy; + * if we are waiting for pause, pause it; + * otherwise, reload lli + */ + switch (next.size) { + case DMA_RELOAD_END: + p->chan[i].status = COMP_STATE_PREPARE; + break; + case DMA_RELOAD_LLI: + /* reload lli, but let's check if it is paused */ + if (p->chan[i].status != COMP_STATE_PAUSED) + dw_dma_chan_reload_lli(dma, i); + break; + default: + dw_dma_chan_reload_next(dma, i, &next); + break; + } + } +} + +static int dw_dma_probe(struct dma *dma) +{ + struct dma_int *dma_int[DW_MAX_CHAN]; + struct dma_pdata *dw_pdata; + int i; + + /* allocate private data */ + dw_pdata = rzalloc(RZONE_SYS, RFLAGS_NONE, sizeof(*dw_pdata)); + dma_set_drvdata(dma, dw_pdata); + + spinlock_init(&dma->lock); + + dw_dma_setup(dma); + + /* init work */ + for (i = 0; i < dma->plat_data.channels; i++) { + dw_pdata->chan[i].dma = dma; + dw_pdata->chan[i].channel = i; + dw_pdata->chan[i].status = COMP_STATE_INIT; + + dma_int[i] = rzalloc(RZONE_SYS, RFLAGS_NONE, + sizeof(struct dma_int)); + + dma_int[i]->dma = dma; + dma_int[i]->channel = i; + dma_int[i]->irq = dma->plat_data.irq + + (i << REEF_IRQ_BIT_SHIFT); + + /* register our IRQ handler */ + interrupt_register(dma_int[i]->irq, + dw_dma_irq_handler, + dma_int[i]); + interrupt_enable(dma_int[i]->irq); + } + + return 0; +} + +#else + /* this will probably be called at the end of every period copied */ static void dw_dma_irq_handler(void *data) { @@ -909,64 +1097,6 @@ static void dw_dma_irq_handler(void *data) } }
-static void dw_dma_setup(struct dma *dma) -{ - struct dw_drv_plat_data *dp = dma->plat_data.drv_plat_data; - int i; - - /* we cannot config DMAC if DMAC has been already enabled by host */ - if (dw_read(dma, DW_DMA_CFG) != 0) - dw_write(dma, DW_DMA_CFG, 0x0); - - /* now check that it's 0 */ - for (i = DW_DMA_CFG_TRIES; i > 0; i--) { - if (dw_read(dma, DW_DMA_CFG) == 0) - goto found; - } - trace_dma_error("eDs"); - return; - -found: - for (i = 0; i < DW_MAX_CHAN; i++) - dw_read(dma, DW_DMA_CHAN_EN); - -#ifdef HAVE_HDDA - /* enable HDDA before DMAC */ - shim_write(SHIM_HMDC, SHIM_HMDC_HDDA_ALLCH); -#endif - - /* enable the DMA controller */ - dw_write(dma, DW_DMA_CFG, 1); - - /* mask all interrupts for all 8 channels */ - dw_write(dma, DW_MASK_TFR, INT_MASK_ALL); - dw_write(dma, DW_MASK_BLOCK, INT_MASK_ALL); - dw_write(dma, DW_MASK_SRC_TRAN, INT_MASK_ALL); - dw_write(dma, DW_MASK_DST_TRAN, INT_MASK_ALL); - dw_write(dma, DW_MASK_ERR, INT_MASK_ALL); - -#ifdef DW_FIFO_PARTITION - /* TODO: we cannot config DMA FIFOs if DMAC has been already */ - /* allocate FIFO partitions, 128 bytes for each ch */ - dw_write(dma, DW_FIFO_PART1_LO, 0x100080); - dw_write(dma, DW_FIFO_PART1_HI, 0x100080); - dw_write(dma, DW_FIFO_PART0_HI, 0x100080); - dw_write(dma, DW_FIFO_PART0_LO, 0x100080 | (1 << 26)); - dw_write(dma, DW_FIFO_PART0_LO, 0x100080); -#endif - - /* set channel priorities */ - for (i = 0; i < DW_MAX_CHAN; i++) { -#if defined CONFIG_BAYTRAIL || defined CONFIG_CHERRYTRAIL \ - || defined CONFIG_APOLLOLAKE || defined CONFIG_CANNONLAKE - dw_write(dma, DW_CTRL_HIGH(i), DW_CTLH_CLASS(dp->chan[i].class)); -#else - dw_write(dma, DW_CFG_LOW(i), DW_CFG_CLASS(dp->chan[i].class)); -#endif - } - -} - static int dw_dma_probe(struct dma *dma) { struct dma_pdata *dw_pdata; @@ -993,6 +1123,7 @@ static int dw_dma_probe(struct dma *dma)
return 0; } +#endif
const struct dma_ops dw_dma_ops = { .channel_get = dw_dma_channel_get, diff --git a/src/include/reef/dma.h b/src/include/reef/dma.h index e33adaa..77f8f71 100644 --- a/src/include/reef/dma.h +++ b/src/include/reef/dma.h @@ -128,6 +128,12 @@ struct dma { void *private; };
+struct dma_int { + struct dma *dma; + uint32_t channel; + uint32_t irq; +}; + struct dma *dma_get(int dmac_id);
#define dma_set_drvdata(dma, data) \
For hardware link list mode, we also need to configure address and config registers for the first link list, here fix it.
Signed-off-by: Keyon Jie yang.jie@linux.intel.com --- update on v2: Fix checkpatch.pl issues.
Tested on minnow turbot rt5651, SOF Master: f7beb51118e6e8463a864b9416c773a508930e06, SOF Tool Master: 59d81995f682876bd34f939332e8838c76f714ec, https://github.com/plbossart/sound/tree/topic/sof-v4.14: 5a91e6776d41b0e97828882294cdc00b5c0bafd6
src/audio/dai.c | 3 ++- src/drivers/dw-dma.c | 50 ++++++++++++++++++++++---------------------------- 2 files changed, 24 insertions(+), 29 deletions(-)
diff --git a/src/audio/dai.c b/src/audio/dai.c index d19e18a..3a6b9f7 100644 --- a/src/audio/dai.c +++ b/src/audio/dai.c @@ -226,7 +226,8 @@ static struct comp_dev *dai_new(struct sof_ipc_comp *comp) }
/* set up callback */ - dma_set_cb(dd->dma, dd->chan, DMA_IRQ_TYPE_LLIST, dai_dma_cb, dev); + dma_set_cb(dd->dma, dd->chan, DMA_IRQ_TYPE_BLOCK | + DMA_IRQ_TYPE_LLIST, dai_dma_cb, dev); dev->state = COMP_STATE_READY; return dev;
diff --git a/src/drivers/dw-dma.c b/src/drivers/dw-dma.c index 8248461..5501f8e 100644 --- a/src/drivers/dw-dma.c +++ b/src/drivers/dw-dma.c @@ -396,22 +396,11 @@ static int dw_dma_start(struct dma *dma, int channel)
#if DW_USE_HW_LLI /* TODO: Revisit: are we using LLP mode or single transfer ? */ - if (p->chan[channel].lli->llp) { - /* LLP mode - only write LLP pointer */ + if (p->chan[channel].lli) { + /* LLP mode - write LLP pointer */ dw_write(dma, DW_LLP(channel), (uint32_t)p->chan[channel].lli); - } else { - /* single transfer */ - dw_write(dma, DW_LLP(channel), 0); - - /* channel needs started from scratch, so write SARn, DARn */ - dw_write(dma, DW_SAR(channel), p->chan[channel].lli->sar); - dw_write(dma, DW_DAR(channel), p->chan[channel].lli->dar); - - /* program CTLn */ - dw_write(dma, DW_CTRL_LOW(channel), p->chan[channel].lli->ctrl_lo); - dw_write(dma, DW_CTRL_HIGH(channel), p->chan[channel].lli->ctrl_hi); } -#else +#endif /* channel needs started from scratch, so write SARn, DARn */ dw_write(dma, DW_SAR(channel), p->chan[channel].lli->sar); dw_write(dma, DW_DAR(channel), p->chan[channel].lli->dar); @@ -419,7 +408,6 @@ static int dw_dma_start(struct dma *dma, int channel) /* program CTLn */ dw_write(dma, DW_CTRL_LOW(channel), p->chan[channel].lli->ctrl_lo); dw_write(dma, DW_CTRL_HIGH(channel), p->chan[channel].lli->ctrl_hi); -#endif
/* write channel config */ dw_write(dma, DW_CFG_LOW(channel), p->chan[channel].cfg_lo); @@ -829,6 +817,7 @@ static void dw_dma_irq_handler(void *data) struct dma_sg_elem next; uint32_t status_tfr = 0; uint32_t status_block = 0; + uint32_t status_block_new = 0; uint32_t status_err = 0; uint32_t status_intr; uint32_t mask; @@ -861,10 +850,10 @@ static void dw_dma_irq_handler(void *data) platform_interrupt_clear(dma_irq(dma), pmask);
/* confirm IRQ cleared */ - status_block = dw_read(dma, DW_STATUS_BLOCK); - if (status_block) { + status_block_new = dw_read(dma, DW_STATUS_BLOCK); + if (status_block_new) { trace_dma_error("eI2"); - trace_value(status_block); + trace_value(status_block_new); }
for (i = 0; i < DW_MAX_CHAN; i++) { @@ -875,12 +864,25 @@ static void dw_dma_irq_handler(void *data)
mask = 0x1 << i;
+#if DW_USE_HW_LLI + /* end of a LLI block */ + if (status_block & mask && + p->chan[i].cb_type & DMA_IRQ_TYPE_BLOCK) { + next.src = DMA_RELOAD_LLI; + next.dest = DMA_RELOAD_LLI; + next.size = DMA_RELOAD_LLI; + p->chan[i].cb(p->chan[i].cb_data, + DMA_IRQ_TYPE_BLOCK, &next); + } +#endif /* end of a transfer */ if ((status_tfr & mask) && (p->chan[i].cb_type & DMA_IRQ_TYPE_LLIST)) {
- next.src = next.dest = DMA_RELOAD_LLI; - next.size = DMA_RELOAD_LLI; /* will reload lli by default */ + next.src = DMA_RELOAD_LLI; + next.dest = DMA_RELOAD_LLI; + /* will reload lli by default */ + next.size = DMA_RELOAD_LLI; if (p->chan[i].cb) p->chan[i].cb(p->chan[i].cb_data, DMA_IRQ_TYPE_LLIST, &next); @@ -904,14 +906,6 @@ static void dw_dma_irq_handler(void *data) break; } } -#if DW_USE_HW_LLI - /* end of a LLI block */ - if (status_block & mask && - p->chan[i].cb_type & DMA_IRQ_TYPE_BLOCK) { - p->chan[i].cb(p->chan[i].cb_data, - DMA_IRQ_TYPE_BLOCK); - } -#endif } }
On Mon, 2018-01-29 at 09:39 +0800, Keyon Jie wrote:
On Apollolake, the interrupt number for different channels of the same controller are different, here add implementation of it: register interrupt handler for each channel, and don't need check channel in its specific handler anymore.
Signed-off-by: Keyon Jie yang.jie@linux.intel.com
Both applied.
Liam
participants (2)
-
Keyon Jie
-
Liam Girdwood