[Sound-open-firmware] [PATCH] trace: dma: Add atomic and nowait DMA tracing support.
Add support for DMA trace to run in atomic and IRQ contexts. Currently DMA trace would sleep between DMA copies and enter atomic state when inserting new tarce data into the trace buffer.
This patch adds new DMA _nowait() APIs that dont sleep and _atomic() APIs for trace logging that can be safely called in atomic context.
Signed-off-by: Liam Girdwood liam.r.girdwood@linux.intel.com --- src/audio/dma-trace.c | 144 ++++++++++++++++++++-------------- src/include/reef/audio/dma-trace.h | 1 + src/include/reef/dma.h | 4 + src/include/reef/trace.h | 12 +++ src/ipc/dma-copy.c | 157 ++++++++++++++++++++++++++++++++++--- src/lib/trace.c | 56 ++++++++++++- 6 files changed, 301 insertions(+), 73 deletions(-)
diff --git a/src/audio/dma-trace.c b/src/audio/dma-trace.c index 9704733..b0267fa 100644 --- a/src/audio/dma-trace.c +++ b/src/audio/dma-trace.c @@ -47,9 +47,7 @@ static uint64_t trace_work(void *data, uint64_t delay) struct dma_trace_buf *buffer = &d->dmatb; struct dma_sg_config *config = &d->config; unsigned long flags; - int32_t offset = 0; uint32_t avail = buffer->avail; - uint32_t bytes_copied = 0; uint32_t size; uint32_t hsize; uint32_t lsize; @@ -58,53 +56,51 @@ static uint64_t trace_work(void *data, uint64_t delay) if (avail == 0) return DMA_TRACE_US;
+ /* make sure we dont write more than buffer */ + if (avail > DMA_TRACE_LOCAL_SIZE) + avail = DMA_TRACE_LOCAL_SIZE; + /* copy to host in sections if we wrap */ - while (avail > 0) { - - lsize = hsize = avail; - - /* host buffer wrap ? */ - if (d->host_offset + buffer->avail > d->host_size) - hsize = d->host_offset + buffer->avail - d->host_size; - - /* local buffer wrap ? */ - if (buffer->r_ptr > buffer->w_ptr) - lsize = buffer->end_addr - buffer->r_ptr; - - /* get smallest size */ - if (hsize < lsize) - size = hsize; - else - size = lsize; - - /* writeback trace data */ - dcache_writeback_region((void*)buffer->r_ptr, size); - - /* copy this section to host */ - offset = dma_copy_to_host(&d->dc, config, d->host_offset, - buffer->r_ptr, size); - if (offset < 0) { - trace_buffer_error("ebb"); - goto out; - } - - /* update host pointer and check for wrap */ - d->host_offset += size; - if (d->host_offset + size >= d->host_size) - d->host_offset = 0; - - /* update local pointer and check for wrap */ - buffer->r_ptr += size; - if (buffer->r_ptr >= buffer->end_addr) - buffer->r_ptr = buffer->addr; - - avail -= size; - bytes_copied += size; + lsize = hsize = avail; + + /* host buffer wrap ? */ + if (d->host_offset + avail > d->host_size) + hsize = d->host_size - d->host_offset; + + /* local buffer wrap ? */ + if (buffer->r_ptr + avail > buffer->end_addr) + lsize = buffer->end_addr - buffer->r_ptr; + + /* get smallest size */ + if (hsize < lsize) + size = hsize; + else + size = lsize; + + /* writeback trace data */ + dcache_writeback_region((void*)buffer->r_ptr, size); + + /* copy this section to host */ + size = dma_copy_to_host_nowait(&d->dc, config, d->host_offset, + buffer->r_ptr, size); + if (size < 0) { + trace_buffer_error("ebb"); + goto out; }
+ /* update host pointer and check for wrap */ + d->host_offset += size; + if (d->host_offset + size >= d->host_size) + d->host_offset = 0; + + /* update local pointer and check for wrap */ + buffer->r_ptr += size; + if (buffer->r_ptr >= buffer->end_addr) + buffer->r_ptr = buffer->addr; + out: spin_lock_irq(&d->lock, flags); - buffer->avail -= bytes_copied; + buffer->avail -= size; spin_unlock_irq(&d->lock, flags);
/* reschedule the trace copying work */ @@ -172,32 +168,28 @@ int dma_trace_enable(struct dma_trace_data *d) { /* validate DMA context */ if (d->dc.dmac == NULL || d->dc.chan < 0) { - trace_buffer_error("eem"); + trace_error_atomic(TRACE_CLASS_BUFFER, "eem"); return -ENODEV; }
/* TODO: fix crash when enabled */ - //d->enabled = 1; + d->enabled = 1; work_schedule_default(&d->dmat_work, DMA_TRACE_US); return 0; }
-void dtrace_event(const char *e, uint32_t length) +static void dtrace_add_event(const char *e, uint32_t length) { - struct dma_trace_buf *buffer = NULL; - int margin = 0; - unsigned long flags; + struct dma_trace_buf *buffer = &trace_data->dmatb; + int margin;
- if (trace_data == NULL || length == 0) - return; + margin = buffer->end_addr - buffer->w_ptr;
- buffer = &trace_data->dmatb; - if (buffer == NULL) + /* validate */ + if (margin <= 0) { + trace_buffer_error("emm"); return; - - spin_lock_irq(&trace_data->lock, flags); - - margin = buffer->end_addr - buffer->w_ptr; + }
/* check for buffer wrap */ if (margin > length) { @@ -216,9 +208,45 @@ void dtrace_event(const char *e, uint32_t length) }
buffer->avail += length; +} + +void dtrace_event(const char *e, uint32_t length) +{ + struct dma_trace_buf *buffer = NULL; + unsigned long flags; + + if (trace_data == NULL || length == 0) + return; + + if (!trace_data->enabled) + return; + + buffer = &trace_data->dmatb; + if (buffer == NULL) + return; + + spin_lock_irq(&trace_data->lock, flags); + dtrace_add_event(e, length); spin_unlock_irq(&trace_data->lock, flags);
/* schedule copy now if buffer > 50% full */ if (trace_data->enabled && buffer->avail >= (DMA_TRACE_LOCAL_SIZE / 2)) work_reschedule_default(&trace_data->dmat_work, 100); } + +void dtrace_event_atomic(const char *e, uint32_t length) +{ + struct dma_trace_buf *buffer = NULL; + + if (trace_data == NULL || length == 0) + return; + + if (!trace_data->enabled) + return; + + buffer = &trace_data->dmatb; + if (buffer == NULL) + return; + + dtrace_add_event(e, length); +} diff --git a/src/include/reef/audio/dma-trace.h b/src/include/reef/audio/dma-trace.h index 83db484..12d0956 100644 --- a/src/include/reef/audio/dma-trace.h +++ b/src/include/reef/audio/dma-trace.h @@ -69,5 +69,6 @@ int dma_trace_host_buffer(struct dma_trace_data *d, struct dma_sg_elem *elem, int dma_trace_enable(struct dma_trace_data *d);
void dtrace_event(const char *e, uint32_t size); +void dtrace_event_atomic(const char *e, uint32_t length);
#endif diff --git a/src/include/reef/dma.h b/src/include/reef/dma.h index 33d1c9a..8dd4adf 100644 --- a/src/include/reef/dma.h +++ b/src/include/reef/dma.h @@ -249,9 +249,13 @@ static inline void dma_copy_free(struct dma_copy *dc) /* DMA copy data from host to DSP */ int dma_copy_from_host(struct dma_copy *dc, struct dma_sg_config *host_sg, int32_t host_offset, void *local_ptr, int32_t size); +int dma_copy_from_host_nowait(struct dma_copy *dc, struct dma_sg_config *host_sg, + int32_t host_offset, void *local_ptr, int32_t size);
/* DMA copy data from DSP to host */ int dma_copy_to_host(struct dma_copy *dc, struct dma_sg_config *host_sg, int32_t host_offset, void *local_ptr, int32_t size); +int dma_copy_to_host_nowait(struct dma_copy *dc, struct dma_sg_config *host_sg, + int32_t host_offset, void *local_ptr, int32_t size);
#endif diff --git a/src/include/reef/trace.h b/src/include/reef/trace.h index cdd8158..17dff5c 100644 --- a/src/include/reef/trace.h +++ b/src/include/reef/trace.h @@ -95,6 +95,8 @@
void _trace_event(uint32_t event); void _trace_error(uint32_t event); +void _trace_event_atomic(uint32_t event); +void _trace_error_atomic(uint32_t event); void trace_off(void); void trace_init(struct reef * reef);
@@ -102,8 +104,11 @@ void trace_init(struct reef * reef);
#define trace_event(__c, __e) \ _trace_event(__c | (__e[0] << 16) | (__e[1] <<8) | __e[2]) +#define trace_event_atomic(__c, __e) \ + _trace_event_atomic(__c | (__e[0] << 16) | (__e[1] <<8) | __e[2])
#define trace_value(x) _trace_event(x) +#define trace_value_atomic(x) _trace_event_atomic(x)
#define trace_point(x) platform_trace_point(x)
@@ -111,17 +116,24 @@ void trace_init(struct reef * reef); #if TRACEV #define tracev_event(__c, __e) trace_event(__c, __e) #define tracev_value(x) _trace_event(x) +#define tracev_event_atomic(__c, __e) trace_event_atomic(__c, __e) +#define tracev_value_atomic(x) _trace_event_atomic(x) #else #define tracev_event(__c, __e) #define tracev_value(x) +#define tracev_event_atomic(__c, __e) +#define tracev_value_atomic(x) #endif
/* error tracing */ #if TRACEE #define trace_error(__c, __e) \ _trace_error(__c | (__e[0] << 16) | (__e[1] <<8) | __e[2]) +#define trace_error_atomic(__c, __e) \ + _trace_error_atomic(__c | (__e[0] << 16) | (__e[1] <<8) | __e[2]) #else #define trace_error(__c, __e) +#define trace_error_atomic(__c, __e) #endif
#else diff --git a/src/ipc/dma-copy.c b/src/ipc/dma-copy.c index 75dd3bd..5c9ff97 100644 --- a/src/ipc/dma-copy.c +++ b/src/ipc/dma-copy.c @@ -75,8 +75,14 @@ static void dma_complete(void *data, uint32_t type, struct dma_sg_elem *next)
if (type == DMA_IRQ_TYPE_LLIST) wait_completed(comp); + + next->size = DMA_RELOAD_END; }
+/* Copy DSP memory to host memory. + * copies DSP memory to host in PAGE_SIZE or smaller blocks and waits/sleeps + * between blocks. Cant be used in IRQ context. + */ int dma_copy_to_host(struct dma_copy *dc, struct dma_sg_config *host_sg, int32_t host_offset, void *local_ptr, int32_t size) { @@ -85,6 +91,7 @@ int dma_copy_to_host(struct dma_copy *dc, struct dma_sg_config *host_sg, struct dma_sg_elem local_sg_elem; int32_t err; int32_t offset = host_offset; + int32_t bytes_copied = 0;
if (size <= 0) return 0; @@ -104,7 +111,11 @@ int dma_copy_to_host(struct dma_copy *dc, struct dma_sg_config *host_sg, /* configure local DMA elem */ local_sg_elem.dest = host_sg_elem->dest + offset; local_sg_elem.src = (uint32_t)local_ptr; - local_sg_elem.size = HOST_PAGE_SIZE - offset; + if (size >= HOST_PAGE_SIZE - offset) + local_sg_elem.size = HOST_PAGE_SIZE - offset; + else + local_sg_elem.size = size; + list_item_prepend(&local_sg_elem.list, &config.elem_list);
/* transfer max PAGE size at a time to SG buffer */ @@ -112,8 +123,13 @@ int dma_copy_to_host(struct dma_copy *dc, struct dma_sg_config *host_sg,
/* start the DMA */ wait_init(&dc->complete); - dma_set_config(dc->dmac, dc->chan, &config); - dma_start(dc->dmac, dc->chan); + err = dma_set_config(dc->dmac, dc->chan, &config); + if (err < 0) + return err; + + err = dma_start(dc->dmac, dc->chan); + if (err < 0) + return err; /* wait for DMA to complete */ err = wait_for_completion_timeout(&dc->complete); @@ -127,12 +143,15 @@ int dma_copy_to_host(struct dma_copy *dc, struct dma_sg_config *host_sg, host_offset += local_sg_elem.size;
/* next dest host address is in next host elem */ + host_sg_elem = list_next_item(host_sg_elem, list); local_sg_elem.dest = host_sg_elem->dest;
/* local address is continuous */ local_sg_elem.src = (uint32_t)local_ptr + local_sg_elem.size;
+ bytes_copied += local_sg_elem.size; + /* do we have less than 1 PAGE to copy ? */ if (size >= HOST_PAGE_SIZE) local_sg_elem.size = HOST_PAGE_SIZE; @@ -140,10 +159,65 @@ int dma_copy_to_host(struct dma_copy *dc, struct dma_sg_config *host_sg, local_sg_elem.size = size; }
- /* new host offset in SG buffer */ - return host_offset; + /* bytes copied */ + return bytes_copied; +} + +/* Copy DSP memory to host memory. + * Copies DSP memory to host in a single PAGE_SIZE or smaller block. Does not + * waits/sleeps and can be used in IRQ context. + */ +int dma_copy_to_host_nowait(struct dma_copy *dc, struct dma_sg_config *host_sg, + int32_t host_offset, void *local_ptr, int32_t size) +{ + struct dma_sg_config config; + struct dma_sg_elem *host_sg_elem; + struct dma_sg_elem local_sg_elem; + int32_t err; + int32_t offset = host_offset; + + if (size <= 0) + return 0; + + /* find host element with host_offset */ + host_sg_elem = sg_get_elem_at(host_sg, &offset); + if (host_sg_elem == NULL) + return -EINVAL; + + /* set up DMA configuration */ + config.direction = DMA_DIR_LMEM_TO_HMEM; + config.src_width = sizeof(uint32_t); + config.dest_width = sizeof(uint32_t); + config.cyclic = 0; + list_init(&config.elem_list); + + /* configure local DMA elem */ + local_sg_elem.dest = host_sg_elem->dest + offset; + local_sg_elem.src = (uint32_t)local_ptr; + if (size >= HOST_PAGE_SIZE - offset) + local_sg_elem.size = HOST_PAGE_SIZE - offset; + else + local_sg_elem.size = size; + + list_item_prepend(&local_sg_elem.list, &config.elem_list); + + /* start the DMA */ + err = dma_set_config(dc->dmac, dc->chan, &config); + if (err < 0) + return err; + + err = dma_start(dc->dmac, dc->chan); + if (err < 0) + return err; + + /* bytes copied */ + return local_sg_elem.size; }
+/* Copy host memory to DSP memory. + * Copies host memory to host in PAGE_SIZE or smaller blocks and waits/sleeps + * between blocks. Cant be used in IRQ context. + */ int dma_copy_from_host(struct dma_copy *dc, struct dma_sg_config *host_sg, int32_t host_offset, void *local_ptr, int32_t size) { @@ -152,6 +226,7 @@ int dma_copy_from_host(struct dma_copy *dc, struct dma_sg_config *host_sg, struct dma_sg_elem local_sg_elem; int32_t err; int32_t offset = host_offset; + int32_t bytes_copied = 0;
if (size <= 0) return 0; @@ -171,7 +246,10 @@ int dma_copy_from_host(struct dma_copy *dc, struct dma_sg_config *host_sg, /* configure local DMA elem */ local_sg_elem.dest = (uint32_t)local_ptr; local_sg_elem.src = host_sg_elem->src + offset; - local_sg_elem.size = HOST_PAGE_SIZE - offset; + if (size >= HOST_PAGE_SIZE - offset) + local_sg_elem.size = HOST_PAGE_SIZE - offset; + else + local_sg_elem.size = size; list_item_prepend(&local_sg_elem.list, &config.elem_list);
/* transfer max PAGE size at a time to SG buffer */ @@ -179,9 +257,14 @@ int dma_copy_from_host(struct dma_copy *dc, struct dma_sg_config *host_sg,
/* start the DMA */ wait_init(&dc->complete); - dma_set_config(dc->dmac, dc->chan, &config); - dma_start(dc->dmac, dc->chan); - + err = dma_set_config(dc->dmac, dc->chan, &config); + if (err < 0) + return err; + + err = dma_start(dc->dmac, dc->chan); + if (err < 0) + return err; + /* wait for DMA to complete */ err = wait_for_completion_timeout(&dc->complete); if (err < 0) { @@ -200,6 +283,8 @@ int dma_copy_from_host(struct dma_copy *dc, struct dma_sg_config *host_sg, /* local address is continuous */ local_sg_elem.dest = (uint32_t)local_ptr + local_sg_elem.size;
+ bytes_copied += local_sg_elem.size; + /* do we have less than 1 PAGE to copy ? */ if (size >= HOST_PAGE_SIZE) local_sg_elem.size = HOST_PAGE_SIZE; @@ -207,8 +292,58 @@ int dma_copy_from_host(struct dma_copy *dc, struct dma_sg_config *host_sg, local_sg_elem.size = size; }
- /* new host offset in SG buffer */ - return host_offset; + /* bytes copied */ + return bytes_copied; +} + +/* Copy host memory to DSP memory. + * Copies host memory to DSP in a single PAGE_SIZE or smaller block. Does not + * waits/sleeps and can be used in IRQ context. + */ +int dma_copy_from_host_nowait(struct dma_copy *dc, struct dma_sg_config *host_sg, + int32_t host_offset, void *local_ptr, int32_t size) +{ + struct dma_sg_config config; + struct dma_sg_elem *host_sg_elem; + struct dma_sg_elem local_sg_elem; + int32_t err; + int32_t offset = host_offset; + + if (size <= 0) + return 0; + + /* find host element with host_offset */ + host_sg_elem = sg_get_elem_at(host_sg, &offset); + if (host_sg_elem == NULL) + return -EINVAL; + + /* set up DMA configuration */ + config.direction = DMA_DIR_HMEM_TO_LMEM; + config.src_width = sizeof(uint32_t); + config.dest_width = sizeof(uint32_t); + config.cyclic = 0; + list_init(&config.elem_list); + + /* configure local DMA elem */ + local_sg_elem.dest = (uint32_t)local_ptr; + local_sg_elem.src = host_sg_elem->src + offset; + if (size >= HOST_PAGE_SIZE - offset) + local_sg_elem.size = HOST_PAGE_SIZE - offset; + else + local_sg_elem.size = size; + list_item_prepend(&local_sg_elem.list, &config.elem_list); + + /* start the DMA */ + err = dma_set_config(dc->dmac, dc->chan, &config); + if (err < 0) + return err; + + err = dma_start(dc->dmac, dc->chan); + if (err < 0) + return err; + + /* bytes copied */ + return local_sg_elem.size; }
int dma_copy_new(struct dma_copy *dc, int dmac) diff --git a/src/lib/trace.c b/src/lib/trace.c index 6f707c3..c0abf7c 100644 --- a/src/lib/trace.c +++ b/src/lib/trace.c @@ -49,30 +49,66 @@ void _trace_error(uint32_t event) { unsigned long flags; volatile uint64_t *t; + uint64_t dt[2]; + uint64_t time;
if (!trace.enable) return;
+ time = platform_timer_get(platform_timer); + /* save event to DMA tracing buffer */ - _trace_event(event); + dt[0] = time; + dt[1] = event; + dtrace_event((const char*)dt, sizeof(uint64_t) * 2);
/* send event by mail box too. */ spin_lock_irq(&trace.lock, flags);
/* write timestamp and event to trace buffer */ t = (volatile uint64_t*)(MAILBOX_TRACE_BASE + trace.pos); - t[0] = platform_timer_get(platform_timer); + trace.pos += (sizeof(uint64_t) << 1); + + if (trace.pos > MAILBOX_TRACE_SIZE - sizeof(uint64_t) * 2) + trace.pos = 0; + + spin_unlock_irq(&trace.lock, flags); + + t[0] = time; t[1] = event;
/* writeback trace data */ dcache_writeback_region((void*)t, sizeof(uint64_t) * 2); +} + +void _trace_error_atomic(uint32_t event) +{ + volatile uint64_t *t; + uint64_t dt[2]; + uint64_t time; + + if (!trace.enable) + return; + + time = platform_timer_get(platform_timer); + + /* save event to DMA tracing buffer */ + dt[0] = time; + dt[1] = event; + dtrace_event_atomic((const char*)dt, sizeof(uint64_t) * 2);
+ /* write timestamp and event to trace buffer */ + t = (volatile uint64_t*)(MAILBOX_TRACE_BASE + trace.pos); trace.pos += (sizeof(uint64_t) << 1);
if (trace.pos > MAILBOX_TRACE_SIZE - sizeof(uint64_t) * 2) trace.pos = 0;
- spin_unlock_irq(&trace.lock, flags); + t[0] = time; + t[1] = event; + + /* writeback trace data */ + dcache_writeback_region((void*)t, sizeof(uint64_t) * 2); }
void _trace_event(uint32_t event) @@ -87,10 +123,22 @@ void _trace_event(uint32_t event) dtrace_event((const char*)dt, sizeof(uint64_t) * 2); }
+void _trace_event_atomic(uint32_t event) +{ + uint64_t dt[2]; + + if (!trace.enable) + return; + + dt[0] = platform_timer_get(platform_timer); + dt[1] = event; + dtrace_event_atomic((const char*)dt, sizeof(uint64_t) * 2); +} + void trace_off(void) { trace.enable = 0; -}; +}
void trace_init(struct reef *reef) {
No need to 2 IPCs to enable DMA trace. Just use one IPC.
Signed-off-by: Liam Girdwood liam.r.girdwood@linux.intel.com --- src/include/uapi/ipc.h | 3 +-- src/ipc/intel-ipc.c | 30 ++++-------------------------- 2 files changed, 5 insertions(+), 28 deletions(-)
diff --git a/src/include/uapi/ipc.h b/src/include/uapi/ipc.h index 05952ab..281e77a 100644 --- a/src/include/uapi/ipc.h +++ b/src/include/uapi/ipc.h @@ -117,8 +117,7 @@ #define SOF_IPC_STREAM_VORBIS_FREE SOF_CMD_TYPE(0x011)
/* trace and debug */ -#define SOF_IPC_TRACE_DMA_INIT SOF_CMD_TYPE(0x001) -#define SOF_IPC_TRACE_DMA_PARAMS SOF_CMD_TYPE(0x002) +#define SOF_IPC_TRACE_DMA_PARAMS SOF_CMD_TYPE(0x001)
/* Get message component id */ #define SOF_IPC_MESSAGE_ID(x) (x & 0xffff) diff --git a/src/ipc/intel-ipc.c b/src/ipc/intel-ipc.c index 9b47ca9..f6d88ef 100644 --- a/src/ipc/intel-ipc.c +++ b/src/ipc/intel-ipc.c @@ -574,12 +574,14 @@ static int ipc_glb_pm_message(uint32_t header) * Debug IPC Operations. */
-static int ipc_dma_trace_init(uint32_t header) +static int ipc_dma_trace_config(uint32_t header) { + struct intel_ipc_data *iipc = ipc_get_drvdata(_ipc); + struct sof_ipc_dma_trace_params *params = _ipc->comp_data; struct sof_ipc_reply reply; int err;
- trace_ipc("Dti"); + trace_ipc_error("DA1");
/* Initialize DMA for Trace*/ err = dma_trace_init(&_ipc->dmat); @@ -588,28 +590,6 @@ static int ipc_dma_trace_init(uint32_t header) goto error; }
- /* write component values to the outbox */ - reply.hdr.size = sizeof(reply); - reply.hdr.cmd = header; - reply.error = 0; - mailbox_hostbox_write(0, &reply, sizeof(reply)); - return 0; - -error: - if (err < 0) - trace_ipc_error("eA!"); - return -EINVAL; -} - -static int ipc_dma_trace_config(uint32_t header) -{ - struct intel_ipc_data *iipc = ipc_get_drvdata(_ipc); - struct sof_ipc_dma_trace_params *params = _ipc->comp_data; - struct sof_ipc_reply reply; - int err; - - trace_ipc("DAl"); - /* use DMA to read in compressed page table ringbuffer from host */ err = get_page_descriptors(iipc, ¶ms->buffer); if (err < 0) { @@ -653,8 +633,6 @@ static int ipc_glb_debug_message(uint32_t header) trace_ipc("Idn");
switch (cmd) { - case iCS(SOF_IPC_TRACE_DMA_INIT): - return ipc_dma_trace_init(header); case iCS(SOF_IPC_TRACE_DMA_PARAMS): return ipc_dma_trace_config(header); default:
Execution should never return from do_task(). Dump stack in the panic if we do return here.
Signed-off-by: Liam Girdwood liam.r.girdwood@linux.intel.com --- src/init/init.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/src/init/init.c b/src/init/init.c index 491a6ad..9cdd93f 100644 --- a/src/init/init.c +++ b/src/init/init.c @@ -84,6 +84,6 @@ int main(int argc, char *argv[]) err = do_task(&reef);
/* should never get here */ - panic(PANIC_TASK); + panic_dump_stack(PANIC_TASK); return err; }
Add protection to make sure we don't overflow the number of sources.
Signed-off-by: Liam Girdwood liam.r.girdwood@linux.intel.com --- src/audio/mixer.c | 4 ++++ 1 file changed, 4 insertions(+)
diff --git a/src/audio/mixer.c b/src/audio/mixer.c index cf265a7..41f1d88 100644 --- a/src/audio/mixer.c +++ b/src/audio/mixer.c @@ -233,6 +233,10 @@ static int mixer_copy(struct comp_dev *dev) /* only mix the sources with the same state with mixer */ if (source->source->state == dev->state) sources[num_mix_sources++] = source; + + /* too many sources ? */ + if (num_mix_sources == PLATFORM_MAX_STREAMS - 1) + return 0; }
/* dont have any work if all sources are inactive */
Allows objects to be inserted at any offset in the debug buffer for analysis.
Signed-off-by: Liam Girdwood liam.r.girdwood@linux.intel.com --- src/include/reef/debug.h | 14 ++++++++++++++ 1 file changed, 14 insertions(+)
diff --git a/src/include/reef/debug.h b/src/include/reef/debug.h index 2bee8e6..ef64419 100644 --- a/src/include/reef/debug.h +++ b/src/include/reef/debug.h @@ -98,6 +98,16 @@ *(__m++) = *(__a++); \ } while (0);
+/* dump data area at addr and size count at mailbox ofset or shared memory */ +#define dump_at(addr, count, offset) \ + do { \ + volatile uint32_t *__m = (uint32_t*)mailbox_get_debug_base() + offset; \ + volatile uint32_t *__a = (uint32_t*)addr; \ + volatile int __c = count; \ + while (__c--) \ + *(__m++) = *(__a++); \ + } while (0); + /* dump object to start of mailbox */ #define dump_object(__o) \ dbg(); \ @@ -108,6 +118,10 @@ dbg(); \ dump(__o, sizeof(*(__o)) >> 2);
+#define dump_object_ptr_at(__o, __at) \ + dbg(); \ + dump_at(__o, sizeof(*(__o)) >> 2, __at); + #else
#define dbg()
Add a simple trace method to output lock users and detect any sleeping during atomic context. The intention is to provide simple trace data that can be used to pinpoint any deadlocks or attempts to sleep whilst atomic.
Future: The trace output in that patch can be improved to provide easier lookup for lock users rather than line numbers.
Signed-off-by: Liam Girdwood liam.r.girdwood@linux.intel.com --- src/arch/xtensa/include/arch/spinlock.h | 3 ++ src/arch/xtensa/init.c | 6 ++++ src/audio/pipeline.c | 1 + src/include/reef/lock.h | 61 +++++++++++++++++++++++++++++---- src/include/reef/wait.h | 11 ++++++ 5 files changed, 75 insertions(+), 7 deletions(-)
diff --git a/src/arch/xtensa/include/arch/spinlock.h b/src/arch/xtensa/include/arch/spinlock.h index 639d7a4..acb1b8e 100644 --- a/src/arch/xtensa/include/arch/spinlock.h +++ b/src/arch/xtensa/include/arch/spinlock.h @@ -37,6 +37,9 @@
typedef struct { volatile uint32_t lock; +#if DEBUG_LOCKS + uint32_t user; +#endif } spinlock_t;
static inline void arch_spinlock_init(spinlock_t *lock) diff --git a/src/arch/xtensa/init.c b/src/arch/xtensa/init.c index 1e52d99..44f7faf 100644 --- a/src/arch/xtensa/init.c +++ b/src/arch/xtensa/init.c @@ -37,8 +37,14 @@ #include <arch/task.h> #include <reef/debug.h> #include <reef/init.h> +#include <reef/lock.h> #include <stdint.h>
+#if DEBUG_LOCKS +uint32_t lock_dbg_atomic = 0; +uint32_t lock_dbg_user[DBG_LOCK_USERS] = {0}; +#endif + /* TODO: this should be fixed by rotating the register Window on the stack and * dumping the saved registers. * TODO: we should also have different handlers for each type where we can take diff --git a/src/audio/pipeline.c b/src/audio/pipeline.c index 636a42b..0ef971e 100644 --- a/src/audio/pipeline.c +++ b/src/audio/pipeline.c @@ -39,6 +39,7 @@ #include <reef/alloc.h> #include <reef/debug.h> #include <reef/ipc.h> +#include <reef/lock.h> #include <platform/timer.h> #include <platform/platform.h> #include <reef/audio/component.h> diff --git a/src/include/reef/lock.h b/src/include/reef/lock.h index 234dff2..8b05ef2 100644 --- a/src/include/reef/lock.h +++ b/src/include/reef/lock.h @@ -34,24 +34,71 @@ #ifndef __INCLUDE_LOCK__ #define __INCLUDE_LOCK__
+#define DEBUG_LOCKS 0 + #include <stdint.h> #include <arch/spinlock.h> #include <reef/interrupt.h> #include <reef/trace.h>
-#define DEBUG_LOCKS 0
#if DEBUG_LOCKS
-#define trace_lock(__e) trace_event(TRACE_CLASS_LOCK, __e) -#define tracev_lock(__e) tracev_event(TRACE_CLASS_LOCK, __e) +#define DBG_LOCK_USERS 8 + +#define trace_lock(__e) trace_event_atomic(TRACE_CLASS_LOCK, __e) +#define tracev_lock(__e) tracev_event_atomic(TRACE_CLASS_LOCK, __e) +#define trace_lock_error(__e) trace_error_atomic(TRACE_CLASS_LOCK, __e) +#define trace_lock_value(__e) _trace_error_atomic(__e) + +extern uint32_t lock_dbg_atomic; +extern uint32_t lock_dbg_user[DBG_LOCK_USERS];
#define spin_lock_dbg() \ - trace_lock("LcE"); \ - trace_value(__LINE__); + trace_lock("LcE"); + #define spin_unlock_dbg() \ trace_lock("LcX");
+/* all SMP spinlocks need init, nothing todo on UP */ +#define spinlock_init(lock) \ + arch_spinlock_init(lock); \ + (lock)->user = __LINE__; + +/* does nothing on UP systems */ +#define spin_lock(lock) \ + spin_lock_dbg(); \ + if (lock_dbg_atomic) { \ + int __i = 0; \ + int __count = lock_dbg_atomic >= DBG_LOCK_USERS \ + ? DBG_LOCK_USERS : lock_dbg_atomic; \ + trace_lock_error("eal"); \ + trace_lock_value(__LINE__); \ + trace_lock_value(lock_dbg_atomic); \ + for (__i = 0; __i < __count; __i++) { \ + trace_lock_value((lock_dbg_atomic << 24) | \ + lock_dbg_user[__i]); \ + } \ + } \ + arch_spin_lock(lock); + +#define spin_unlock(lock) \ + arch_spin_unlock(lock); \ + spin_unlock_dbg(); + +/* disables all IRQ sources and takes lock - enter atomic context */ +#define spin_lock_irq(lock, flags) \ + flags = interrupt_global_disable(); \ + spin_lock(lock); \ + if (++lock_dbg_atomic < DBG_LOCK_USERS) \ + lock_dbg_user[lock_dbg_atomic - 1] = (lock)->user; + +/* re-enables current IRQ sources and releases lock - leave atomic context */ +#define spin_unlock_irq(lock, flags) \ + spin_unlock(lock); \ + lock_dbg_atomic--; \ + interrupt_global_enable(flags); + #else
#define trace_lock(__e) @@ -60,8 +107,6 @@ #define spin_lock_dbg() #define spin_unlock_dbg()
-#endif - /* all SMP spinlocks need init, nothing todo on UP */ #define spinlock_init(lock) \ arch_spinlock_init(lock) @@ -86,3 +131,5 @@ interrupt_global_enable(flags);
#endif + +#endif diff --git a/src/include/reef/wait.h b/src/include/reef/wait.h index 08fc0b0..c5b370e 100644 --- a/src/include/reef/wait.h +++ b/src/include/reef/wait.h @@ -40,8 +40,18 @@ #include <reef/timer.h> #include <reef/interrupt.h> #include <reef/trace.h> +#include <reef/lock.h> #include <platform/interrupt.h>
+#if DEBUG_LOCKS +#define wait_atomic_check \ + if (lock_dbg_atomic) { \ + trace_error_atomic(TRACE_CLASS_WAIT, "atm"); \ + } +#else +#define wait_atomic_check +#endif + typedef struct { uint32_t complete; struct work work; @@ -53,6 +63,7 @@ void arch_wait_for_interrupt(int level); static inline void wait_for_interrupt(int level) { tracev_event(TRACE_CLASS_WAIT, "WFE"); + wait_atomic_check; arch_wait_for_interrupt(level); tracev_event(TRACE_CLASS_WAIT, "WFX"); }
participants (1)
-
Liam Girdwood