Fix locking in DMA trace. Also run the DMA trace as delayed work so we dont block callers. Refactor API so that DMAC and channel are pre-allocated.
Signed-off-by: Liam Girdwood liam.r.girdwood@linux.intel.com --- src/audio/dma-trace.c | 101 ++++++++++------------ src/include/reef/alloc.h | 4 +- src/include/reef/audio/dma-trace.h | 5 +- src/include/reef/dma.h | 22 ++++- src/ipc/dma-copy.c | 95 ++++++++++---------- src/ipc/intel-ipc.c | 4 +- src/ipc/ipc.c | 1 - src/lib/alloc.c | 12 +-- src/lib/trace.c | 6 +- src/platform/baytrail/include/platform/platform.h | 3 + 10 files changed, 131 insertions(+), 122 deletions(-)
diff --git a/src/audio/dma-trace.c b/src/audio/dma-trace.c index 00a29b4..9704733 100644 --- a/src/audio/dma-trace.c +++ b/src/audio/dma-trace.c @@ -41,51 +41,22 @@
static struct dma_trace_data *trace_data = NULL;
-static int dma_trace_new_buffer(struct dma_trace_data *d, uint32_t buffer_size) -{ - struct dma_trace_buf *buffer = &d->dmatb; - - trace_buffer("nlb"); - - /* validate request */ - if (buffer_size == 0 || buffer_size > HEAP_BUFFER_SIZE) { - trace_buffer_error("ebg"); - trace_value(buffer_size); - return -ENOMEM; - } - - /* allocate new buffer */ - buffer->addr = rballoc(RZONE_RUNTIME, RFLAGS_NONE, buffer_size); - if (buffer->addr == NULL) { - trace_buffer_error("ebm"); - return -ENOMEM; - } - - bzero(buffer->addr, buffer_size); - - buffer->size = buffer_size; - buffer->w_ptr = buffer->r_ptr = buffer->addr; - buffer->end_addr = buffer->addr + buffer->size; - buffer->avail = 0; - - return 0; -} - -static void trace_send(struct dma_trace_data *d) +static uint64_t trace_work(void *data, uint64_t delay) { + struct dma_trace_data *d = (struct dma_trace_data *)data; struct dma_trace_buf *buffer = &d->dmatb; struct dma_sg_config *config = &d->config; unsigned long flags; int32_t offset = 0; uint32_t avail = buffer->avail; - uint32_t bytes_copied = avail; + uint32_t bytes_copied = 0; uint32_t size; uint32_t hsize; uint32_t lsize;
/* any data to copy ? */ if (avail == 0) - return; + return DMA_TRACE_US;
/* copy to host in sections if we wrap */ while (avail > 0) { @@ -106,12 +77,15 @@ static void trace_send(struct dma_trace_data *d) else size = lsize;
+ /* writeback trace data */ + dcache_writeback_region((void*)buffer->r_ptr, size); + /* copy this section to host */ - offset = dma_copy_to_host(config, d->host_offset, + offset = dma_copy_to_host(&d->dc, config, d->host_offset, buffer->r_ptr, size); if (offset < 0) { trace_buffer_error("ebb"); - return; + goto out; }
/* update host pointer and check for wrap */ @@ -125,18 +99,13 @@ static void trace_send(struct dma_trace_data *d) buffer->r_ptr = buffer->addr;
avail -= size; + bytes_copied += size; }
+out: spin_lock_irq(&d->lock, flags); buffer->avail -= bytes_copied; spin_unlock_irq(&d->lock, flags); -} - -static uint64_t trace_work(void *data, uint64_t delay) -{ - struct dma_trace_data *d = (struct dma_trace_data *)data; - - trace_send(d);
/* reschedule the trace copying work */ return DMA_TRACE_US; @@ -144,25 +113,41 @@ static uint64_t trace_work(void *data, uint64_t delay)
int dma_trace_init(struct dma_trace_data *d) { - int err; + struct dma_trace_buf *buffer = &d->dmatb; + int ret;
trace_buffer("dtn");
- /* init buffer elems */ - list_init(&d->config.elem_list); + /* allocate new buffer */ + buffer->addr = rballoc(RZONE_RUNTIME, RFLAGS_NONE, DMA_TRACE_LOCAL_SIZE); + if (buffer->addr == NULL) { + trace_buffer_error("ebm"); + return -ENOMEM; + }
- /* allocate local DMA buffer */ - err = dma_trace_new_buffer(d, DMA_TRACE_LOCAL_SIZE); - if (err < 0) { - trace_buffer_error("ePb"); - return err; + /* init DMA copy context */ + ret = dma_copy_new(&d->dc, PLATFORM_TRACE_DMAC); + if (ret < 0) { + trace_buffer_error("edm"); + rfree(buffer->addr); + return ret; }
+ bzero(buffer->addr, DMA_TRACE_LOCAL_SIZE); + + /* initialise the DMA buffer */ + buffer->size = DMA_TRACE_LOCAL_SIZE; + buffer->w_ptr = buffer->r_ptr = buffer->addr; + buffer->end_addr = buffer->addr + buffer->size; + buffer->avail = 0; d->host_offset = 0; - trace_data = d; + d->enabled = 0;
+ list_init(&d->config.elem_list); work_init(&d->dmat_work, trace_work, d, WORK_ASYNC); spinlock_init(&d->lock); + trace_data = d; + return 0; }
@@ -183,10 +168,18 @@ int dma_trace_host_buffer(struct dma_trace_data *d, struct dma_sg_elem *elem, return 0; }
-void dma_trace_config_ready(struct dma_trace_data *d) +int dma_trace_enable(struct dma_trace_data *d) { + /* validate DMA context */ + if (d->dc.dmac == NULL || d->dc.chan < 0) { + trace_buffer_error("eem"); + return -ENODEV; + } + + /* TODO: fix crash when enabled */ + //d->enabled = 1; work_schedule_default(&d->dmat_work, DMA_TRACE_US); - d->ready = 1; + return 0; }
void dtrace_event(const char *e, uint32_t length) @@ -226,6 +219,6 @@ void dtrace_event(const char *e, uint32_t length) spin_unlock_irq(&trace_data->lock, flags);
/* schedule copy now if buffer > 50% full */ - if (trace_data->ready && buffer->avail >= (DMA_TRACE_LOCAL_SIZE / 2)) + if (trace_data->enabled && buffer->avail >= (DMA_TRACE_LOCAL_SIZE / 2)) work_reschedule_default(&trace_data->dmat_work, 100); } diff --git a/src/include/reef/alloc.h b/src/include/reef/alloc.h index 4e8a630..982af90 100644 --- a/src/include/reef/alloc.h +++ b/src/include/reef/alloc.h @@ -86,8 +86,8 @@ int rstrlen(const char *s);
/* Heap save/restore contents and context for PM D0/D3 events */ uint32_t mm_pm_context_size(void); -int mm_pm_context_save(struct dma_sg_config *sg); -int mm_pm_context_restore(struct dma_sg_config *sg); +int mm_pm_context_save(struct dma_copy *dc, struct dma_sg_config *sg); +int mm_pm_context_restore(struct dma_copy *dc, struct dma_sg_config *sg);
/* heap initialisation */ void init_heap(struct reef *reef); diff --git a/src/include/reef/audio/dma-trace.h b/src/include/reef/audio/dma-trace.h index 20f2e18..83db484 100644 --- a/src/include/reef/audio/dma-trace.h +++ b/src/include/reef/audio/dma-trace.h @@ -55,17 +55,18 @@ struct dma_trace_buf { struct dma_trace_data { struct dma_sg_config config; struct dma_trace_buf dmatb; + struct dma_copy dc; int32_t host_offset; uint32_t host_size; struct work dmat_work; - uint32_t ready; + uint32_t enabled; spinlock_t lock; };
int dma_trace_init(struct dma_trace_data *d); int dma_trace_host_buffer(struct dma_trace_data *d, struct dma_sg_elem *elem, uint32_t host_size); -void dma_trace_config_ready(struct dma_trace_data *d); +int dma_trace_enable(struct dma_trace_data *d);
void dtrace_event(const char *e, uint32_t size);
diff --git a/src/include/reef/dma.h b/src/include/reef/dma.h index 881332f..33d1c9a 100644 --- a/src/include/reef/dma.h +++ b/src/include/reef/dma.h @@ -36,6 +36,7 @@ #include <reef/list.h> #include <reef/lock.h> #include <reef/reef.h> +#include <reef/wait.h>
/* DMA directions */ #define DMA_DIR_MEM_TO_MEM 0 /* local memcpy */ @@ -228,12 +229,29 @@ static inline uint32_t dma_sg_get_size(struct dma_sg_config *sg) return size; }
+/* generic DMA DSP <-> Host copier */ + +struct dma_copy { + int chan; + struct dma *dmac; + completion_t complete; +}; + +/* init dma copy context */ +int dma_copy_new(struct dma_copy *dc, int dmac); + +/* free dma copy context resources */ +static inline void dma_copy_free(struct dma_copy *dc) +{ + dma_channel_put(dc->dmac, dc->chan); +} + /* DMA copy data from host to DSP */ -int dma_copy_from_host(struct dma_sg_config *host_sg, +int dma_copy_from_host(struct dma_copy *dc, struct dma_sg_config *host_sg, int32_t host_offset, void *local_ptr, int32_t size);
/* DMA copy data from DSP to host */ -int dma_copy_to_host(struct dma_sg_config *host_sg, +int dma_copy_to_host(struct dma_copy *dc, struct dma_sg_config *host_sg, int32_t host_offset, void *local_ptr, int32_t size);
#endif diff --git a/src/ipc/dma-copy.c b/src/ipc/dma-copy.c index b345ac9..75dd3bd 100644 --- a/src/ipc/dma-copy.c +++ b/src/ipc/dma-copy.c @@ -38,6 +38,11 @@ #include <reef/wait.h> #include <platform/dma.h>
+/* tracing */ +#define trace_dma(__e) trace_event(TRACE_CLASS_DMA, __e) +#define trace_dma_error(__e) trace_error(TRACE_CLASS_DMA, __e) +#define tracev_dma(__e) tracev_event(TRACE_CLASS_DMA, __e) + static struct dma_sg_elem *sg_get_elem_at(struct dma_sg_config *host_sg, int32_t *offset) { @@ -60,7 +65,7 @@ static struct dma_sg_elem *sg_get_elem_at(struct dma_sg_config *host_sg, }
/* host offset in beyond end of SG buffer */ - //trace_mem_error("eMs"); + trace_dma_error("ex0"); return NULL; }
@@ -72,27 +77,17 @@ static void dma_complete(void *data, uint32_t type, struct dma_sg_elem *next) wait_completed(comp); }
-int dma_copy_to_host(struct dma_sg_config *host_sg, int32_t host_offset, - void *local_ptr, int32_t size) +int dma_copy_to_host(struct dma_copy *dc, struct dma_sg_config *host_sg, + int32_t host_offset, void *local_ptr, int32_t size) { struct dma_sg_config config; struct dma_sg_elem *host_sg_elem; struct dma_sg_elem local_sg_elem; - struct dma *dma = dma_get(DMA_ID_DMAC0); - completion_t complete; int32_t err; int32_t offset = host_offset; - int32_t chan;
- if (dma == NULL) - return -ENODEV; - - /* get DMA channel from DMAC0 */ - chan = dma_channel_get(dma); - if (chan < 0) { - //trace_ipc_error("ePC"); - return chan; - } + if (size <= 0) + return 0;
/* find host element with host_offset */ host_sg_elem = sg_get_elem_at(host_sg, &offset); @@ -100,7 +95,6 @@ int dma_copy_to_host(struct dma_sg_config *host_sg, int32_t host_offset, return -EINVAL;
/* set up DMA configuration */ - complete.timeout = 100; /* wait 100 usecs for DMA to finish */ config.direction = DMA_DIR_LMEM_TO_HMEM; config.src_width = sizeof(uint32_t); config.dest_width = sizeof(uint32_t); @@ -113,21 +107,18 @@ int dma_copy_to_host(struct dma_sg_config *host_sg, int32_t host_offset, local_sg_elem.size = HOST_PAGE_SIZE - offset; list_item_prepend(&local_sg_elem.list, &config.elem_list);
- dma_set_cb(dma, chan, DMA_IRQ_TYPE_LLIST, dma_complete, &complete); - /* transfer max PAGE size at a time to SG buffer */ while (size > 0) {
/* start the DMA */ - wait_init(&complete); - dma_set_config(dma, chan, &config); - dma_start(dma, chan); + wait_init(&dc->complete); + dma_set_config(dc->dmac, dc->chan, &config); + dma_start(dc->dmac, dc->chan); /* wait for DMA to complete */ - err = wait_for_completion_timeout(&complete); + err = wait_for_completion_timeout(&dc->complete); if (err < 0) { - //trace_comp_error("eAp"); - dma_channel_put(dma, chan); + trace_dma_error("ex1"); return -EIO; }
@@ -150,31 +141,20 @@ int dma_copy_to_host(struct dma_sg_config *host_sg, int32_t host_offset, }
/* new host offset in SG buffer */ - dma_channel_put(dma, chan); return host_offset; }
-int dma_copy_from_host(struct dma_sg_config *host_sg, int32_t host_offset, - void *local_ptr, int32_t size) +int dma_copy_from_host(struct dma_copy *dc, struct dma_sg_config *host_sg, + int32_t host_offset, void *local_ptr, int32_t size) { struct dma_sg_config config; struct dma_sg_elem *host_sg_elem; struct dma_sg_elem local_sg_elem; - struct dma *dma = dma_get(DMA_ID_DMAC0); - completion_t complete; int32_t err; int32_t offset = host_offset; - int32_t chan;
- if (dma == NULL) - return -ENODEV; - - /* get DMA channel from DMAC0 */ - chan = dma_channel_get(dma); - if (chan < 0) { - //trace_ipc_error("ePC"); - return chan; - } + if (size <= 0) + return 0;
/* find host element with host_offset */ host_sg_elem = sg_get_elem_at(host_sg, &offset); @@ -182,7 +162,6 @@ int dma_copy_from_host(struct dma_sg_config *host_sg, int32_t host_offset, return -EINVAL;
/* set up DMA configuration */ - complete.timeout = 100; /* wait 100 usecs for DMA to finish */ config.direction = DMA_DIR_HMEM_TO_LMEM; config.src_width = sizeof(uint32_t); config.dest_width = sizeof(uint32_t); @@ -195,21 +174,18 @@ int dma_copy_from_host(struct dma_sg_config *host_sg, int32_t host_offset, local_sg_elem.size = HOST_PAGE_SIZE - offset; list_item_prepend(&local_sg_elem.list, &config.elem_list);
- dma_set_cb(dma, chan, DMA_IRQ_TYPE_LLIST, dma_complete, &complete); - /* transfer max PAGE size at a time to SG buffer */ while (size > 0) {
/* start the DMA */ - wait_init(&complete); - dma_set_config(dma, chan, &config); - dma_start(dma, chan); + wait_init(&dc->complete); + dma_set_config(dc->dmac, dc->chan, &config); + dma_start(dc->dmac, dc->chan); /* wait for DMA to complete */ - err = wait_for_completion_timeout(&complete); + err = wait_for_completion_timeout(&dc->complete); if (err < 0) { - //trace_comp_error("eAp"); - dma_channel_put(dma, chan); + trace_dma_error("ex2"); return -EIO; }
@@ -232,6 +208,27 @@ int dma_copy_from_host(struct dma_sg_config *host_sg, int32_t host_offset, }
/* new host offset in SG buffer */ - dma_channel_put(dma, chan); return host_offset; } + +int dma_copy_new(struct dma_copy *dc, int dmac) +{ + dc->dmac = dma_get(dmac); + if (dc->dmac == NULL) { + trace_dma_error("ec0"); + return -ENODEV; + } + + /* get DMA channel from DMAC0 */ + dc->chan = dma_channel_get(dc->dmac); + if (dc->chan < 0) { + trace_dma_error("ec1"); + return dc->chan; + } + + dc->complete.timeout = 100; /* wait 100 usecs for DMA to finish */ + dma_set_cb(dc->dmac, dc->chan, DMA_IRQ_TYPE_LLIST, dma_complete, + &dc->complete); + return 0; +} + diff --git a/src/ipc/intel-ipc.c b/src/ipc/intel-ipc.c index 4b56993..9b47ca9 100644 --- a/src/ipc/intel-ipc.c +++ b/src/ipc/intel-ipc.c @@ -629,7 +629,9 @@ static int ipc_dma_trace_config(uint32_t header)
trace_ipc("DAp");
- dma_trace_config_ready(&_ipc->dmat); + err = dma_trace_enable(&_ipc->dmat); + if (err < 0) + goto error;
/* write component values to the outbox */ reply.hdr.size = sizeof(reply); diff --git a/src/ipc/ipc.c b/src/ipc/ipc.c index 05de838..6c40d10 100644 --- a/src/ipc/ipc.c +++ b/src/ipc/ipc.c @@ -350,7 +350,6 @@ int ipc_init(struct reef *reef) /* init ipc data */ reef->ipc = rzalloc(RZONE_SYS, RFLAGS_NONE, sizeof(*reef->ipc)); reef->ipc->comp_data = rzalloc(RZONE_SYS, RFLAGS_NONE, SOF_IPC_MSG_MAX_SIZE); - reef->ipc->dmat.ready = 0;
list_init(&reef->ipc->comp_list);
diff --git a/src/lib/alloc.c b/src/lib/alloc.c index 0d9b07c..63f92de 100644 --- a/src/lib/alloc.c +++ b/src/lib/alloc.c @@ -573,7 +573,7 @@ uint32_t mm_pm_context_size(void) * must be disabled before calling this functions. No allocations are permitted after * calling this and before calling restore. */ -int mm_pm_context_save(struct dma_sg_config *sg) +int mm_pm_context_save(struct dma_copy *dc, struct dma_sg_config *sg) { uint32_t used; int32_t offset = 0; @@ -585,13 +585,13 @@ int mm_pm_context_save(struct dma_sg_config *sg) return -EINVAL;
/* copy memory maps to SG */ - ret = dma_copy_to_host(sg, offset, + ret = dma_copy_to_host(dc, sg, offset, (void *)&memmap, sizeof(memmap)); if (ret < 0) return ret;
/* copy system memory contents to SG */ - ret = dma_copy_to_host(sg, offset + ret, + ret = dma_copy_to_host(dc, sg, offset + ret, (void *)memmap.system.heap, (int32_t)(memmap.system.size)); if (ret < 0) return ret; @@ -611,19 +611,19 @@ int mm_pm_context_save(struct dma_sg_config *sg) * Restore the DSP memories to modules abd the system. This must be called immediately * after booting before any pipeline work. */ -int mm_pm_context_restore(struct dma_sg_config *sg) +int mm_pm_context_restore(struct dma_copy *dc, struct dma_sg_config *sg) { int32_t offset = 0; int32_t ret;
/* copy memory maps from SG */ - ret = dma_copy_from_host(sg, offset, + ret = dma_copy_from_host(dc, sg, offset, (void *)&memmap, sizeof(memmap)); if (ret < 0) return ret;
/* copy system memory contents from SG */ - ret = dma_copy_to_host(sg, offset + ret, + ret = dma_copy_to_host(dc, sg, offset + ret, (void *)memmap.system.heap, (int32_t)(memmap.system.size)); if (ret < 0) return ret; diff --git a/src/lib/trace.c b/src/lib/trace.c index bdd50ae..6f707c3 100644 --- a/src/lib/trace.c +++ b/src/lib/trace.c @@ -77,15 +77,11 @@ void _trace_error(uint32_t event)
void _trace_event(uint32_t event) { - volatile uint64_t dt[2]; - uint32_t et = (event & 0xff000000); + uint64_t dt[2];
if (!trace.enable) return;
- if (et == TRACE_CLASS_DMA) - return; - dt[0] = platform_timer_get(platform_timer); dt[1] = event; dtrace_event((const char*)dt, sizeof(uint64_t) * 2); diff --git a/src/platform/baytrail/include/platform/platform.h b/src/platform/baytrail/include/platform/platform.h index ff3a117..4d63f7b 100644 --- a/src/platform/baytrail/include/platform/platform.h +++ b/src/platform/baytrail/include/platform/platform.h @@ -82,6 +82,9 @@ struct reef; /* the interval of DMA trace copying */ #define DMA_TRACE_US 500000
+/* DMAC used for trace DMA */ +#define PLATFORM_TRACE_DMAC DMA_ID_DMAC0 + /* Platform defined panic code */ #define platform_panic(__x) \ shim_write(SHIM_IPCXL, ((shim_read(SHIM_IPCXL) & 0xc0000000) |\