[Sound-open-firmware] [PATCH 1/3] configure.ac: add CONFIG_DMA_TRACE flag for DMA trace feature
For debug reason, e.g. DMA trace doesn't work or not implemented yet, we can use --disable-dma-trace in configure command line, which will unset CONFIG_DMA_TRACE flag, and it will fallback to use traditional mailbox trace instead.
The flag is set by default, if we don't add '--disable-dma-trace' to configure command.
Signed-off-by: Keyon Jie yang.jie@linux.intel.com --- configure.ac | 8 ++++++++ src/ipc/dma-copy.c | 2 ++ src/ipc/intel-ipc.c | 4 ++++ src/lib/trace.c | 58 +++++++++++++++++++++++++++++++++++++++++++++++++++++ 4 files changed, 72 insertions(+)
diff --git a/configure.ac b/configure.ac index 8946c36..6d67220 100644 --- a/configure.ac +++ b/configure.ac @@ -122,6 +122,14 @@ case "$with_dsp_core" in
esac
+# dma trace support (Optional), dma trace by default +AC_ARG_ENABLE([dma-trace], + AS_HELP_STRING([--disable-dma-trace], [Disabled dma trace and use fallback mailbox trace])) + +AS_IF([test "x$enable_dma_trace" != "xno"], [ + AC_DEFINE([CONFIG_DMA_TRACE], [1], [Configure DMA trace]) + ]) + # Test after CFLAGS set othewise test of cross compiler fails. AM_PROG_AS AM_PROG_AR diff --git a/src/ipc/dma-copy.c b/src/ipc/dma-copy.c index 8565966..57d3e3d 100644 --- a/src/ipc/dma-copy.c +++ b/src/ipc/dma-copy.c @@ -77,7 +77,9 @@ static void dma_complete(void *data, uint32_t type, struct dma_sg_elem *next) if (type == DMA_IRQ_TYPE_LLIST) wait_completed(comp);
+#if defined(CONFIG_DMA_TRACE) ipc_dma_trace_send_position(); +#endif
next->size = DMA_RELOAD_END; } diff --git a/src/ipc/intel-ipc.c b/src/ipc/intel-ipc.c index e13b541..4c310b6 100644 --- a/src/ipc/intel-ipc.c +++ b/src/ipc/intel-ipc.c @@ -585,6 +585,7 @@ static int ipc_glb_pm_message(uint32_t header) } }
+#if defined(CONFIG_DMA_TRACE) /* * Debug IPC Operations. */ @@ -662,6 +663,7 @@ static int ipc_glb_debug_message(uint32_t header) return -EINVAL; } } +#endif
/* * Topology IPC Operations. @@ -877,8 +879,10 @@ int ipc_cmd(void) return ipc_glb_stream_message(hdr->cmd); case iGS(SOF_IPC_GLB_DAI_MSG): return ipc_glb_dai_message(hdr->cmd); +#if defined(CONFIG_DMA_TRACE) case iGS(SOF_IPC_GLB_TRACE_MSG): return ipc_glb_debug_message(hdr->cmd); +#endif default: trace_ipc_error("eGc"); trace_value(type); diff --git a/src/lib/trace.c b/src/lib/trace.c index eec93b5..bb81d47 100644 --- a/src/lib/trace.c +++ b/src/lib/trace.c @@ -111,6 +111,8 @@ void _trace_error_atomic(uint32_t event) dcache_writeback_region((void*)t, sizeof(uint64_t) * 2); }
+#if defined(CONFIG_DMA_TRACE) + void _trace_event(uint32_t event) { uint64_t dt[2]; @@ -135,6 +137,62 @@ void _trace_event_atomic(uint32_t event) dtrace_event_atomic((const char*)dt, sizeof(uint64_t) * 2); }
+#else + +void _trace_event(uint32_t event) +{ + unsigned long flags; + uint64_t time, *t; + + if (!trace.enable) + return; + + time = platform_timer_get(platform_timer); + + /* send event by mail box too. */ + spin_lock_irq(&trace.lock, flags); + + /* write timestamp and event to trace buffer */ + t = (uint64_t *)(MAILBOX_TRACE_BASE + trace.pos); + trace.pos += (sizeof(uint64_t) << 1); + + if (trace.pos > MAILBOX_TRACE_SIZE - sizeof(uint64_t) * 2) + trace.pos = 0; + + spin_unlock_irq(&trace.lock, flags); + + t[0] = time; + t[1] = event; + + /* writeback trace data */ + dcache_writeback_region((void *)t, sizeof(uint64_t) * 2); +} + +void _trace_event_atomic(uint32_t event) +{ + uint64_t time, *t; + + if (!trace.enable) + return; + + time = platform_timer_get(platform_timer); + + /* write timestamp and event to trace buffer */ + t = (uint64_t *)(MAILBOX_TRACE_BASE + trace.pos); + trace.pos += (sizeof(uint64_t) << 1); + + if (trace.pos > MAILBOX_TRACE_SIZE - sizeof(uint64_t) * 2) + trace.pos = 0; + + t[0] = time; + t[1] = event; + + /* writeback trace data */ + dcache_writeback_region((void *)t, sizeof(uint64_t) * 2); +} + +#endif + void trace_off(void) { trace.enable = 0;
On some platforms(CONFIG_IRQ_MAP configured), the DW DMAC interrupts will be mapped to external layer 2 numbers, and use different numbers for each channel.
Here add the handles for this case: 1. register interrupt handler for each channel; 2. the handler only need to take care of the specific channel.
Signed-off-by: Keyon Jie yang.jie@linux.intel.com --- src/drivers/dw-dma.c | 238 +++++++++++++++++++++++++++++++++++++------------ src/include/reef/dma.h | 6 ++ 2 files changed, 187 insertions(+), 57 deletions(-)
diff --git a/src/drivers/dw-dma.c b/src/drivers/dw-dma.c index b288049..fe229e1 100644 --- a/src/drivers/dw-dma.c +++ b/src/drivers/dw-dma.c @@ -711,6 +711,186 @@ static inline void dw_dma_chan_reload_next(struct dma *dma, int channel, dw_write(dma, DW_DMA_CHAN_EN, CHAN_ENABLE(channel)); }
+static void dw_dma_setup(struct dma *dma) +{ + struct dw_drv_plat_data *dp = dma->plat_data.drv_plat_data; + int i; + + /* we cannot config DMAC if DMAC has been already enabled by host */ + if (dw_read(dma, DW_DMA_CFG) != 0) + dw_write(dma, DW_DMA_CFG, 0x0); + + /* now check that it's 0 */ + for (i = DW_DMA_CFG_TRIES; i > 0; i--) { + if (dw_read(dma, DW_DMA_CFG) == 0) + goto found; + } + trace_dma_error("eDs"); + return; + +found: + for (i = 0; i < DW_MAX_CHAN; i++) + dw_read(dma, DW_DMA_CHAN_EN); + +#ifdef HAVE_HDDA + /* enable HDDA before DMAC */ + shim_write(SHIM_HMDC, SHIM_HMDC_HDDA_ALLCH); +#endif + + /* enable the DMA controller */ + dw_write(dma, DW_DMA_CFG, 1); + + /* mask all interrupts for all 8 channels */ + dw_write(dma, DW_MASK_TFR, INT_MASK_ALL); + dw_write(dma, DW_MASK_BLOCK, INT_MASK_ALL); + dw_write(dma, DW_MASK_SRC_TRAN, INT_MASK_ALL); + dw_write(dma, DW_MASK_DST_TRAN, INT_MASK_ALL); + dw_write(dma, DW_MASK_ERR, INT_MASK_ALL); + +#ifdef DW_FIFO_PARTITION + /* TODO: we cannot config DMA FIFOs if DMAC has been already */ + /* allocate FIFO partitions, 128 bytes for each ch */ + dw_write(dma, DW_FIFO_PART1_LO, 0x100080); + dw_write(dma, DW_FIFO_PART1_HI, 0x100080); + dw_write(dma, DW_FIFO_PART0_HI, 0x100080); + dw_write(dma, DW_FIFO_PART0_LO, 0x100080 | (1 << 26)); + dw_write(dma, DW_FIFO_PART0_LO, 0x100080); +#endif + + /* set channel priorities */ + for (i = 0; i < DW_MAX_CHAN; i++) { +#if defined CONFIG_BAYTRAIL || defined CONFIG_CHERRYTRAIL + dw_write(dma, DW_CTRL_HIGH(i), + DW_CTLH_CLASS(dp->chan[i].class)); +#else + dw_write(dma, DW_CFG_LOW(i), DW_CFG_CLASS(dp->chan[i].class)); +#endif + } + +} + +#ifdef CONFIG_IRQ_MAP +/* external layer 2 interrupt for dmac */ +static void dw_dma_irq_handler(void *data) +{ + struct dma_int *dma_int = (struct dma_int *)data; + struct dma *dma = dma_int->dma; + struct dma_pdata *p = dma_get_drvdata(dma); + struct dma_sg_elem next; + uint32_t status_tfr = 0, status_block = 0, status_err = 0, status_intr; + uint32_t mask; + int i = dma_int->channel; + + status_intr = dw_read(dma, DW_INTR_STATUS); + if (!status_intr) + trace_dma_error("eDI"); + + trace_dma("irq"); + + /* get the source of our IRQ. */ + status_block = dw_read(dma, DW_STATUS_BLOCK); + status_tfr = dw_read(dma, DW_STATUS_TFR); + + /* TODO: handle errors, just clear them atm */ + status_err = dw_read(dma, DW_STATUS_ERR); + if (status_err) { + trace_dma_error("eDi"); + dw_write(dma, DW_CLEAR_ERR, status_err & i); + } + + /* clear interrupts for channel*/ + dw_write(dma, DW_CLEAR_BLOCK, status_block); + dw_write(dma, DW_CLEAR_TFR, status_tfr); + + /* skip if channel is not running */ + if (p->chan[i].status != COMP_STATE_ACTIVE) { + trace_dma_error("eDs"); + return; + } + + mask = 0x1 << i; + + /* end of a transfer */ + if ((status_tfr & mask) && + (p->chan[i].cb_type & DMA_IRQ_TYPE_LLIST)) { + trace_value(status_tfr); + + next.src = next.dest = DMA_RELOAD_LLI; + next.size = DMA_RELOAD_LLI; /* will reload lli by default */ + if (p->chan[i].cb) + p->chan[i].cb(p->chan[i].cb_data, + DMA_IRQ_TYPE_LLIST, &next); + + /* check for reload channel: + * next.size is DMA_RELOAD_END, stop this dma copy; + * next.size > 0 but not DMA_RELOAD_LLI, use next + * element for next copy; + * if we are waiting for pause, pause it; + * otherwise, reload lli + */ + switch (next.size) { + case DMA_RELOAD_END: + p->chan[i].status = COMP_STATE_PREPARE; + break; + case DMA_RELOAD_LLI: + /* reload lli, but let's check if it is paused */ + if (p->chan[i].status != COMP_STATE_PAUSED) + dw_dma_chan_reload_lli(dma, i); + break; + default: + dw_dma_chan_reload_next(dma, i, &next); + break; + } + } +#if DW_USE_HW_LLI + /* end of a LLI block */ + if (status_block & mask && + p->chan[i].cb_type & DMA_IRQ_TYPE_BLOCK) { + p->chan[i].cb(p->chan[i].cb_data, + DMA_IRQ_TYPE_BLOCK); + } +#endif +} + +static int dw_dma_probe(struct dma *dma) +{ + struct dma_int *dma_int[DW_MAX_CHAN]; + struct dma_pdata *dw_pdata; + int i; + + /* allocate private data */ + dw_pdata = rzalloc(RZONE_SYS, RFLAGS_NONE, sizeof(*dw_pdata)); + dma_set_drvdata(dma, dw_pdata); + + spinlock_init(&dma->lock); + + dw_dma_setup(dma); + + /* init work */ + for (i = 0; i < dma->plat_data.channels; i++) { + dw_pdata->chan[i].dma = dma; + dw_pdata->chan[i].channel = i; + dw_pdata->chan[i].status = COMP_STATE_INIT; + + dma_int[i] = rzalloc(RZONE_SYS, RFLAGS_NONE, + sizeof(struct dma_int)); + + dma_int[i]->dma = dma; + dma_int[i]->channel = i; + dma_int[i]->irq = dma->plat_data.irq + + (i << REEF_IRQ_BIT_SHIFT); + + /* register our IRQ handler */ + interrupt_register(dma_int[i]->irq, + dw_dma_irq_handler, dma_int[i]); + interrupt_enable(dma_int[i]->irq); + + } + + return 0; +} + +#else /* this will probably be called at the end of every period copied */ static void dw_dma_irq_handler(void *data) { @@ -805,63 +985,6 @@ static void dw_dma_irq_handler(void *data) } }
-static void dw_dma_setup(struct dma *dma) -{ - struct dw_drv_plat_data *dp = dma->plat_data.drv_plat_data; - int i; - - /* we cannot config DMAC if DMAC has been already enabled by host */ - if (dw_read(dma, DW_DMA_CFG) != 0) - dw_write(dma, DW_DMA_CFG, 0x0); - - /* now check that it's 0 */ - for (i = DW_DMA_CFG_TRIES; i > 0; i--) { - if (dw_read(dma, DW_DMA_CFG) == 0) - goto found; - } - trace_dma_error("eDs"); - return; - -found: - for (i = 0; i < DW_MAX_CHAN; i++) - dw_read(dma, DW_DMA_CHAN_EN); - -#ifdef HAVE_HDDA - /* enable HDDA before DMAC */ - shim_write(SHIM_HMDC, SHIM_HMDC_HDDA_ALLCH); -#endif - - /* enable the DMA controller */ - dw_write(dma, DW_DMA_CFG, 1); - - /* mask all interrupts for all 8 channels */ - dw_write(dma, DW_MASK_TFR, INT_MASK_ALL); - dw_write(dma, DW_MASK_BLOCK, INT_MASK_ALL); - dw_write(dma, DW_MASK_SRC_TRAN, INT_MASK_ALL); - dw_write(dma, DW_MASK_DST_TRAN, INT_MASK_ALL); - dw_write(dma, DW_MASK_ERR, INT_MASK_ALL); - -#ifdef DW_FIFO_PARTITION - /* TODO: we cannot config DMA FIFOs if DMAC has been already */ - /* allocate FIFO partitions, 128 bytes for each ch */ - dw_write(dma, DW_FIFO_PART1_LO, 0x100080); - dw_write(dma, DW_FIFO_PART1_HI, 0x100080); - dw_write(dma, DW_FIFO_PART0_HI, 0x100080); - dw_write(dma, DW_FIFO_PART0_LO, 0x100080 | (1 << 26)); - dw_write(dma, DW_FIFO_PART0_LO, 0x100080); -#endif - - /* set channel priorities */ - for (i = 0; i < DW_MAX_CHAN; i++) { -#if defined CONFIG_BAYTRAIL || defined CONFIG_CHERRYTRAIL - dw_write(dma, DW_CTRL_HIGH(i), DW_CTLH_CLASS(dp->chan[i].class)); -#else - dw_write(dma, DW_CFG_LOW(i), DW_CFG_CLASS(dp->chan[i].class)); -#endif - } - -} - static int dw_dma_probe(struct dma *dma) { struct dma_pdata *dw_pdata; @@ -888,6 +1011,7 @@ static int dw_dma_probe(struct dma *dma)
return 0; } +#endif
const struct dma_ops dw_dma_ops = { .channel_get = dw_dma_channel_get, diff --git a/src/include/reef/dma.h b/src/include/reef/dma.h index 697e2c6..fc298e6 100644 --- a/src/include/reef/dma.h +++ b/src/include/reef/dma.h @@ -127,6 +127,12 @@ struct dma { void *private; };
+struct dma_int { + struct dma *dma; + uint32_t channel; + uint32_t irq; +}; + struct dma *dma_get(int dmac_id);
#define dma_set_drvdata(dma, data) \
We only need handle host page tables on platforms that we program DMA host buffer(addr/size) inside firmware, for other platforms, host driver will program these settings and won't pass in page tables.
So here add frag CONFIG_HOST_PTABLE to configure this for different platforms, on Baytrail, Cherrytrail, we need CONFIG_HOST_PTABLE to be selected.
Signed-off-by: Keyon Jie yang.jie@linux.intel.com --- configure.ac | 2 ++ src/ipc/intel-ipc.c | 6 ++++++ 2 files changed, 8 insertions(+)
diff --git a/configure.ac b/configure.ac index 6d67220..86d19cb 100644 --- a/configure.ac +++ b/configure.ac @@ -81,6 +81,7 @@ case "$with_platform" in AC_SUBST(XTENSA_CORE)
AC_DEFINE([CONFIG_BAYTRAIL], [1], [Configure for Baytrail]) + AC_DEFINE([CONFIG_HOST_PTABLE], [1], [Configure handling host page table]) ;; cherrytrail*)
@@ -97,6 +98,7 @@ case "$with_platform" in AC_SUBST(XTENSA_CORE)
AC_DEFINE([CONFIG_CHERRYTRAIL], [1], [Configure for Cherrytrail]) + AC_DEFINE([CONFIG_HOST_PTABLE], [1], [Configure handling host page table]) ;; *) AC_MSG_ERROR([Host platform not specified]) diff --git a/src/ipc/intel-ipc.c b/src/ipc/intel-ipc.c index 4c310b6..e39951a 100644 --- a/src/ipc/intel-ipc.c +++ b/src/ipc/intel-ipc.c @@ -82,6 +82,7 @@ static inline struct sof_ipc_hdr *mailbox_validate(void) return hdr; }
+#ifdef CONFIG_HOST_PTABLE static void dma_complete(void *data, uint32_t type, struct dma_sg_elem *next) { struct intel_ipc_data *iipc = (struct intel_ipc_data *)data; @@ -219,6 +220,7 @@ static int parse_page_descriptors(struct intel_ipc_data *iipc,
return 0; } +#endif
/* * Stream IPC Operations. @@ -227,7 +229,9 @@ static int parse_page_descriptors(struct intel_ipc_data *iipc, /* allocate a new stream */ static int ipc_stream_pcm_params(uint32_t stream) { +#ifdef CONFIG_HOST_PTABLE struct intel_ipc_data *iipc = ipc_get_drvdata(_ipc); +#endif struct sof_ipc_pcm_params *pcm_params = _ipc->comp_data; struct sof_ipc_pcm_params_reply reply; struct ipc_comp_dev *pcm_dev; @@ -255,6 +259,7 @@ static int ipc_stream_pcm_params(uint32_t stream) cd = pcm_dev->cd; cd->params = pcm_params->params;
+#ifdef CONFIG_HOST_PTABLE /* use DMA to read in compressed page table ringbuffer from host */ err = get_page_descriptors(iipc, &pcm_params->params.buffer); if (err < 0) { @@ -269,6 +274,7 @@ static int ipc_stream_pcm_params(uint32_t stream) trace_ipc_error("eAP"); goto error; } +#endif
/* configure pipeline audio params */ err = pipeline_params(pcm_dev->cd->pipeline, pcm_dev->cd, pcm_params);
On Wed, 2017-12-06 at 21:16 +0800, Keyon Jie wrote:
We only need handle host page tables on platforms that we program DMA host buffer(addr/size) inside firmware, for other platforms, host driver will program these settings and won't pass in page tables.
So here add frag CONFIG_HOST_PTABLE to configure this for different platforms, on Baytrail, Cherrytrail, we need CONFIG_HOST_PTABLE to be selected.
Signed-off-by: Keyon Jie yang.jie@linux.intel.com
Applied 1/3 and 3/3. 2/3 will be for 1.1.
Liam
participants (2)
-
Keyon Jie
-
Liam Girdwood