On 2018年01月26日 02:13, Pierre-Louis Bossart wrote:
CHECK: Alignment should match open parenthesis #72: FILE: src/drivers/dw-dma.c:863:
dw_write(dma, DW_CTRL_HIGH(i),
DW_CTLH_CLASS(dp->chan[i].class));
CHECK: Alignment should match open parenthesis #75: FILE: src/drivers/dw-dma.c:866:
dw_write(dma, DW_CFG_LOW(i),
DW_CFG_CLASS(dp->chan[i].class));
CHECK: Alignment should match open parenthesis #125: FILE: src/drivers/dw-dma.c:916:
if (status_block & mask &&
p->chan[i].cb_type & DMA_IRQ_TYPE_BLOCK) {
CHECK: multiple assignments should be avoided #126: FILE: src/drivers/dw-dma.c:917:
next.src = next.dest = DMA_RELOAD_LLI;
CHECK: Alignment should match open parenthesis #135: FILE: src/drivers/dw-dma.c:926:
- if ((status_tfr & mask) &&
(p->chan[i].cb_type & DMA_IRQ_TYPE_LLIST)) {
CHECK: multiple assignments should be avoided #138: FILE: src/drivers/dw-dma.c:929:
next.src = next.dest = DMA_RELOAD_LLI;
CHECK: Alignment should match open parenthesis #188: FILE: src/drivers/dw-dma.c:979:
dma_int[i] = rzalloc(RZONE_SYS, RFLAGS_NONE,
sizeof(struct dma_int));
CHECK: Alignment should match open parenthesis #197: FILE: src/drivers/dw-dma.c:988:
interrupt_register(dma_int[i]->irq,
dw_dma_irq_handler, dma_int[i]);
OK, will fix it on v2.
Thanks, ~Keyon
On 01/25/2018 03:46 AM, Keyon Jie wrote:
On Apollolake, the interrupt number for different channels of the same controller are different, here add implementation of it: register interrupt handler for each channel, and don't need check channel in its specific handler anymore.
Signed-off-by: Keyon Jie yang.jie@linux.intel.com
Tested on GP-MRB, SOF Master: f7beb51118e6e8463a864b9416c773a508930e06, SOF Tool Master: 59d81995f682876bd34f939332e8838c76f714ec, https://github.com/plbossart/sound/tree/topic/sof-v4.14: 5a91e6776d41b0e97828882294cdc00b5c0bafd6
src/drivers/dw-dma.c | 244 +++++++++++++++++++++++++++++++++++++------------ src/include/reef/dma.h | 6 ++ 2 files changed, 192 insertions(+), 58 deletions(-)
diff --git a/src/drivers/dw-dma.c b/src/drivers/dw-dma.c index 09e441c..a0782cb 100644 --- a/src/drivers/dw-dma.c +++ b/src/drivers/dw-dma.c @@ -809,6 +809,191 @@ static inline void dw_dma_chan_reload_next(struct dma *dma, int channel, dw_write(dma, DW_DMA_CHAN_EN, CHAN_ENABLE(channel)); } +static void dw_dma_setup(struct dma *dma) +{
- struct dw_drv_plat_data *dp = dma->plat_data.drv_plat_data;
- int i;
- /* we cannot config DMAC if DMAC has been already enabled by host */
- if (dw_read(dma, DW_DMA_CFG) != 0)
dw_write(dma, DW_DMA_CFG, 0x0);
- /* now check that it's 0 */
- for (i = DW_DMA_CFG_TRIES; i > 0; i--) {
if (dw_read(dma, DW_DMA_CFG) == 0)
goto found;
- }
- trace_dma_error("eDs");
- return;
+found:
- for (i = 0; i < DW_MAX_CHAN; i++)
dw_read(dma, DW_DMA_CHAN_EN);
+#ifdef HAVE_HDDA
- /* enable HDDA before DMAC */
- shim_write(SHIM_HMDC, SHIM_HMDC_HDDA_ALLCH);
+#endif
- /* enable the DMA controller */
- dw_write(dma, DW_DMA_CFG, 1);
- /* mask all interrupts for all 8 channels */
- dw_write(dma, DW_MASK_TFR, INT_MASK_ALL);
- dw_write(dma, DW_MASK_BLOCK, INT_MASK_ALL);
- dw_write(dma, DW_MASK_SRC_TRAN, INT_MASK_ALL);
- dw_write(dma, DW_MASK_DST_TRAN, INT_MASK_ALL);
- dw_write(dma, DW_MASK_ERR, INT_MASK_ALL);
+#ifdef DW_FIFO_PARTITION
- /* TODO: we cannot config DMA FIFOs if DMAC has been already */
- /* allocate FIFO partitions, 128 bytes for each ch */
- dw_write(dma, DW_FIFO_PART1_LO, 0x100080);
- dw_write(dma, DW_FIFO_PART1_HI, 0x100080);
- dw_write(dma, DW_FIFO_PART0_HI, 0x100080);
- dw_write(dma, DW_FIFO_PART0_LO, 0x100080 | (1 << 26));
- dw_write(dma, DW_FIFO_PART0_LO, 0x100080);
+#endif
- /* set channel priorities */
- for (i = 0; i < DW_MAX_CHAN; i++) {
+#if defined CONFIG_BAYTRAIL || defined CONFIG_CHERRYTRAIL ||\
- defined CONFIG_APOLLOLAKE || defined CONFIG_CANNONLAKE
dw_write(dma, DW_CTRL_HIGH(i),
DW_CTLH_CLASS(dp->chan[i].class));
+#elif defined CONFIG_BROADWELL || defined CONFIG_HASWELL
dw_write(dma, DW_CFG_LOW(i),
DW_CFG_CLASS(dp->chan[i].class));
+#endif
- }
+}
+#ifdef CONFIG_APOLLOLAKE +/* external layer 2 interrupt for dmac */ +static void dw_dma_irq_handler(void *data) +{
- struct dma_int *dma_int = (struct dma_int *)data;
- struct dma *dma = dma_int->dma;
- struct dma_pdata *p = dma_get_drvdata(dma);
- struct dma_sg_elem next;
- uint32_t status_tfr = 0, status_block = 0, status_err = 0,
status_intr;
- uint32_t mask;
- int i = dma_int->channel;
- status_intr = dw_read(dma, DW_INTR_STATUS);
- if (!status_intr)
trace_dma_error("eDI");
- trace_dma("irq");
- trace_value(status_intr);
- /* get the source of our IRQ. */
- status_block = dw_read(dma, DW_STATUS_BLOCK);
- status_tfr = dw_read(dma, DW_STATUS_TFR);
- /* TODO: handle errors, just clear them atm */
- status_err = dw_read(dma, DW_STATUS_ERR);
- if (status_err) {
trace_dma_error("eDi");
dw_write(dma, DW_CLEAR_ERR, status_err & i);
- }
- /* clear interrupts for channel*/
- dw_write(dma, DW_CLEAR_BLOCK, status_block);
- dw_write(dma, DW_CLEAR_TFR, status_tfr);
- /* skip if channel is not running */
- if (p->chan[i].status != COMP_STATE_ACTIVE) {
trace_dma_error("eDs");
return;
- }
- mask = 0x1 << i;
+#if DW_USE_HW_LLI
/* end of a LLI block */
if (status_block & mask &&
p->chan[i].cb_type & DMA_IRQ_TYPE_BLOCK) {
next.src = next.dest = DMA_RELOAD_LLI;
/* will reload lli by default */
next.size = DMA_RELOAD_LLI;
p->chan[i].cb(p->chan[i].cb_data,
DMA_IRQ_TYPE_BLOCK, &next);
}
+#endif
- /* end of a transfer */
- if ((status_tfr & mask) &&
(p->chan[i].cb_type & DMA_IRQ_TYPE_LLIST)) {
trace_value(status_tfr);
next.src = next.dest = DMA_RELOAD_LLI;
next.size = DMA_RELOAD_LLI; /* will reload lli by default */
if (p->chan[i].cb)
p->chan[i].cb(p->chan[i].cb_data,
DMA_IRQ_TYPE_LLIST, &next);
/* check for reload channel:
* next.size is DMA_RELOAD_END, stop this dma copy;
* next.size > 0 but not DMA_RELOAD_LLI, use next
* element for next copy;
* if we are waiting for pause, pause it;
* otherwise, reload lli
*/
switch (next.size) {
case DMA_RELOAD_END:
p->chan[i].status = COMP_STATE_PREPARE;
break;
case DMA_RELOAD_LLI:
/* reload lli, but let's check if it is paused */
if (p->chan[i].status != COMP_STATE_PAUSED)
dw_dma_chan_reload_lli(dma, i);
break;
default:
dw_dma_chan_reload_next(dma, i, &next);
break;
}
- }
+}
+static int dw_dma_probe(struct dma *dma) +{
- struct dma_int *dma_int[DW_MAX_CHAN];
- struct dma_pdata *dw_pdata;
- int i;
- /* allocate private data */
- dw_pdata = rzalloc(RZONE_SYS, RFLAGS_NONE, sizeof(*dw_pdata));
- dma_set_drvdata(dma, dw_pdata);
- spinlock_init(&dma->lock);
- dw_dma_setup(dma);
- /* init work */
- for (i = 0; i < dma->plat_data.channels; i++) {
dw_pdata->chan[i].dma = dma;
dw_pdata->chan[i].channel = i;
dw_pdata->chan[i].status = COMP_STATE_INIT;
dma_int[i] = rzalloc(RZONE_SYS, RFLAGS_NONE,
sizeof(struct dma_int));
dma_int[i]->dma = dma;
dma_int[i]->channel = i;
dma_int[i]->irq = dma->plat_data.irq +
(i << REEF_IRQ_BIT_SHIFT);
/* register our IRQ handler */
interrupt_register(dma_int[i]->irq,
dw_dma_irq_handler, dma_int[i]);
interrupt_enable(dma_int[i]->irq);
- }
- return 0;
+}
+#else
- /* this will probably be called at the end of every period copied */ static void dw_dma_irq_handler(void *data) {
@@ -906,64 +1091,6 @@ static void dw_dma_irq_handler(void *data) } } -static void dw_dma_setup(struct dma *dma) -{
- struct dw_drv_plat_data *dp = dma->plat_data.drv_plat_data;
- int i;
- /* we cannot config DMAC if DMAC has been already enabled by host */
- if (dw_read(dma, DW_DMA_CFG) != 0)
dw_write(dma, DW_DMA_CFG, 0x0);
- /* now check that it's 0 */
- for (i = DW_DMA_CFG_TRIES; i > 0; i--) {
if (dw_read(dma, DW_DMA_CFG) == 0)
goto found;
- }
- trace_dma_error("eDs");
- return;
-found:
- for (i = 0; i < DW_MAX_CHAN; i++)
dw_read(dma, DW_DMA_CHAN_EN);
-#ifdef HAVE_HDDA
- /* enable HDDA before DMAC */
- shim_write(SHIM_HMDC, SHIM_HMDC_HDDA_ALLCH);
-#endif
- /* enable the DMA controller */
- dw_write(dma, DW_DMA_CFG, 1);
- /* mask all interrupts for all 8 channels */
- dw_write(dma, DW_MASK_TFR, INT_MASK_ALL);
- dw_write(dma, DW_MASK_BLOCK, INT_MASK_ALL);
- dw_write(dma, DW_MASK_SRC_TRAN, INT_MASK_ALL);
- dw_write(dma, DW_MASK_DST_TRAN, INT_MASK_ALL);
- dw_write(dma, DW_MASK_ERR, INT_MASK_ALL);
-#ifdef DW_FIFO_PARTITION
- /* TODO: we cannot config DMA FIFOs if DMAC has been already */
- /* allocate FIFO partitions, 128 bytes for each ch */
- dw_write(dma, DW_FIFO_PART1_LO, 0x100080);
- dw_write(dma, DW_FIFO_PART1_HI, 0x100080);
- dw_write(dma, DW_FIFO_PART0_HI, 0x100080);
- dw_write(dma, DW_FIFO_PART0_LO, 0x100080 | (1 << 26));
- dw_write(dma, DW_FIFO_PART0_LO, 0x100080);
-#endif
- /* set channel priorities */
- for (i = 0; i < DW_MAX_CHAN; i++) {
-#if defined CONFIG_BAYTRAIL || defined CONFIG_CHERRYTRAIL \
- || defined CONFIG_APOLLOLAKE || defined CONFIG_CANNONLAKE
dw_write(dma, DW_CTRL_HIGH(i),
DW_CTLH_CLASS(dp->chan[i].class)); -#else
dw_write(dma, DW_CFG_LOW(i), DW_CFG_CLASS(dp->chan[i].class));
-#endif
- }
-}
- static int dw_dma_probe(struct dma *dma) { struct dma_pdata *dw_pdata;
@@ -990,6 +1117,7 @@ static int dw_dma_probe(struct dma *dma) return 0; } +#endif const struct dma_ops dw_dma_ops = { .channel_get = dw_dma_channel_get, diff --git a/src/include/reef/dma.h b/src/include/reef/dma.h index e33adaa..77f8f71 100644 --- a/src/include/reef/dma.h +++ b/src/include/reef/dma.h @@ -128,6 +128,12 @@ struct dma { void *private; }; +struct dma_int {
- struct dma *dma;
- uint32_t channel;
- uint32_t irq;
+};
- struct dma *dma_get(int dmac_id); #define dma_set_drvdata(dma, data) \