[Sound-open-firmware] [PATCH 1/8] core: dma: Add DMA copy API.
From: Liam Girdwood liam.r.girdwood@linux.intel.com
This API allows clients to manually inform the DMAC when they need more data copied. The DMAC can then copy the desired amount of data.
Signed-off-by: Liam Girdwood liam.r.girdwood@linux.intel.com --- src/include/reef/dma.h | 6 ++++++ 1 file changed, 6 insertions(+)
diff --git a/src/include/reef/dma.h b/src/include/reef/dma.h index 77f8f71..80ff8d4 100644 --- a/src/include/reef/dma.h +++ b/src/include/reef/dma.h @@ -94,6 +94,7 @@ struct dma_ops {
int (*start)(struct dma *dma, int channel); int (*stop)(struct dma *dma, int channel); + int (*copy)(struct dma *dma, int channel, int bytes); int (*pause)(struct dma *dma, int channel); int (*release)(struct dma *dma, int channel); int (*status)(struct dma *dma, int channel, @@ -183,6 +184,11 @@ static inline int dma_stop(struct dma *dma, int channel) return dma->ops->stop(dma, channel); }
+static inline int dma_copy(struct dma *dma, int channel, int bytes) +{ + return dma->ops->copy(dma, channel, bytes); +} + static inline int dma_pause(struct dma *dma, int channel) { return dma->ops->pause(dma, channel);
From: Liam Girdwood liam.r.girdwood@linux.intel.com
Some DMACs have an array of register elements to config and control DMA channels. Provide a mapping here so that FW can easily calculate the correct channel offset.
Signed-off-by: Liam Girdwood liam.r.girdwood@linux.intel.com --- src/include/reef/dma.h | 5 +++++ 1 file changed, 5 insertions(+)
diff --git a/src/include/reef/dma.h b/src/include/reef/dma.h index 80ff8d4..13af20c 100644 --- a/src/include/reef/dma.h +++ b/src/include/reef/dma.h @@ -119,6 +119,7 @@ struct dma_plat_data { uint32_t base; uint32_t channels; uint32_t irq; + uint32_t chan_size; void *drv_plat_data; };
@@ -145,6 +146,10 @@ struct dma *dma_get(int dmac_id); dma->plat_data.base #define dma_irq(dma) \ dma->plat_data.irq +#define dma_chan_size(dma) \ + dma->plat_data.chan_size +#define dma_chan_base(dma, chan) \ + (dma->plat_data.base + chan * dma->plat_data.chan_size)
/* DMA API * Programming flow is :-
From: Liam Girdwood liam.r.girdwood@linux.intel.com
Allow clients to request a particular DMA channel if supported by the DMA driver and it's available. If requested channel is not supported by DMAC then return another channel. If requested channel is not free then we return an error.
Signed-off-by: Liam Girdwood liam.r.girdwood@linux.intel.com Signed-off-by: Keyon Jie yang.jie@linux.intel.com --- src/audio/dai.c | 2 +- src/audio/host.c | 2 +- src/drivers/dw-dma.c | 2 +- src/include/reef/dma.h | 6 +++--- src/ipc/dma-copy.c | 4 ++-- src/ipc/intel-ipc.c | 2 +- 6 files changed, 9 insertions(+), 9 deletions(-)
diff --git a/src/audio/dai.c b/src/audio/dai.c index ff2ff93..66edc2b 100644 --- a/src/audio/dai.c +++ b/src/audio/dai.c @@ -219,7 +219,7 @@ static struct comp_dev *dai_new(struct sof_ipc_comp *comp) dd->xrun = 0;
/* get DMA channel from DMAC1 */ - dd->chan = dma_channel_get(dd->dma); + dd->chan = dma_channel_get(dd->dma, 0); if (dd->chan < 0){ trace_dai_error("eDc"); goto error; diff --git a/src/audio/host.c b/src/audio/host.c index b3c3c83..0d0a7bb 100644 --- a/src/audio/host.c +++ b/src/audio/host.c @@ -272,7 +272,7 @@ static struct comp_dev *host_new(struct sof_ipc_comp *comp) list_item_prepend(&elem->list, &hd->config.elem_list);
/* get DMA channel from DMAC */ - hd->chan = dma_channel_get(hd->dma); + hd->chan = dma_channel_get(hd->dma, 0); if (hd->chan < 0) { trace_host_error("eDC"); goto error; diff --git a/src/drivers/dw-dma.c b/src/drivers/dw-dma.c index 7a1805e..cbfed0e 100644 --- a/src/drivers/dw-dma.c +++ b/src/drivers/dw-dma.c @@ -286,7 +286,7 @@ static inline void dw_update_bits(struct dma *dma, uint32_t reg, uint32_t mask, }
/* allocate next free DMA channel */ -static int dw_dma_channel_get(struct dma *dma) +static int dw_dma_channel_get(struct dma *dma, int req_chan) { struct dma_pdata *p = dma_get_drvdata(dma); uint32_t flags; diff --git a/src/include/reef/dma.h b/src/include/reef/dma.h index 13af20c..aab9217 100644 --- a/src/include/reef/dma.h +++ b/src/include/reef/dma.h @@ -89,7 +89,7 @@ struct dma_chan_status { /* DMA operations */ struct dma_ops {
- int (*channel_get)(struct dma *dma); + int (*channel_get)(struct dma *dma, int req_channel); void (*channel_put)(struct dma *dma, int channel);
int (*start)(struct dma *dma, int channel); @@ -163,9 +163,9 @@ struct dma *dma_get(int dmac_id); * 6) dma_channel_put() */
-static inline int dma_channel_get(struct dma *dma) +static inline int dma_channel_get(struct dma *dma, int req_channel) { - return dma->ops->channel_get(dma); + return dma->ops->channel_get(dma, req_channel); }
static inline void dma_channel_put(struct dma *dma, int channel) diff --git a/src/ipc/dma-copy.c b/src/ipc/dma-copy.c index 57d3e3d..06f05d5 100644 --- a/src/ipc/dma-copy.c +++ b/src/ipc/dma-copy.c @@ -359,8 +359,8 @@ int dma_copy_new(struct dma_copy *dc, int dmac) return -ENODEV; }
- /* get DMA channel from DMAC0 */ - dc->chan = dma_channel_get(dc->dmac); + /* get DMA channel from DMAC0 */ + dc->chan = dma_channel_get(dc->dmac, 0); if (dc->chan < 0) { trace_dma_error("ec1"); return dc->chan; diff --git a/src/ipc/intel-ipc.c b/src/ipc/intel-ipc.c index 54a0812..fac3371 100644 --- a/src/ipc/intel-ipc.c +++ b/src/ipc/intel-ipc.c @@ -104,7 +104,7 @@ static int get_page_descriptors(struct intel_ipc_data *iipc, int ret = 0;
/* get DMA channel from DMAC0 */ - chan = dma_channel_get(iipc->dmac0); + chan = dma_channel_get(iipc->dmac0, 0); if (chan < 0) { trace_ipc_error("ePC"); return chan;
From: Liam Girdwood liam.r.girdwood@linux.intel.com
Let clients know if they can expect a callback from DMA.
Signed-off-by: Liam Girdwood liam.r.girdwood@linux.intel.com Signed-off-by: Keyon Jie yang.jie@linux.intel.com --- src/drivers/dw-dma.c | 4 +++- src/include/reef/dma.h | 6 +++--- 2 files changed, 6 insertions(+), 4 deletions(-)
diff --git a/src/drivers/dw-dma.c b/src/drivers/dw-dma.c index cbfed0e..c566509 100644 --- a/src/drivers/dw-dma.c +++ b/src/drivers/dw-dma.c @@ -729,7 +729,7 @@ static int dw_dma_pm_context_store(struct dma *dma) return 0; }
-static void dw_dma_set_cb(struct dma *dma, int channel, int type, +static int dw_dma_set_cb(struct dma *dma, int channel, int type, void (*cb)(void *data, uint32_t type, struct dma_sg_elem *next), void *data) { @@ -741,6 +741,8 @@ static void dw_dma_set_cb(struct dma *dma, int channel, int type, p->chan[channel].cb_data = data; p->chan[channel].cb_type = type; spin_unlock_irq(&dma->lock, flags); + + return 0; }
/* reload using LLI data */ diff --git a/src/include/reef/dma.h b/src/include/reef/dma.h index aab9217..71bce56 100644 --- a/src/include/reef/dma.h +++ b/src/include/reef/dma.h @@ -103,7 +103,7 @@ struct dma_ops { int (*set_config)(struct dma *dma, int channel, struct dma_sg_config *config);
- void (*set_cb)(struct dma *dma, int channel, int type, + int (*set_cb)(struct dma *dma, int channel, int type, void (*cb)(void *data, uint32_t type, struct dma_sg_elem *next), void *data);
@@ -173,10 +173,10 @@ static inline void dma_channel_put(struct dma *dma, int channel) dma->ops->channel_put(dma, channel); }
-static inline void dma_set_cb(struct dma *dma, int channel, int type, +static inline int dma_set_cb(struct dma *dma, int channel, int type, void (*cb)(void *data, uint32_t type, struct dma_sg_elem *next), void *data) { - dma->ops->set_cb(dma, channel, type, cb, data); + return dma->ops->set_cb(dma, channel, type, cb, data); }
static inline int dma_start(struct dma *dma, int channel)
From: Liam Girdwood liam.r.girdwood@linux.intel.com
Add the HDA driver to the DMA platform data and clean up alignment.
Signed-off-by: Liam Girdwood liam.r.girdwood@linux.intel.com --- src/platform/apollolake/dma.c | 21 +++++++++++++++------ 1 file changed, 15 insertions(+), 6 deletions(-)
diff --git a/src/platform/apollolake/dma.c b/src/platform/apollolake/dma.c index d142fa0..4a6c0ec 100644 --- a/src/platform/apollolake/dma.c +++ b/src/platform/apollolake/dma.c @@ -31,6 +31,7 @@
#include <reef/dma.h> #include <reef/dw-dma.h> +#include <reef/hda-dma.h> #include <platform/memory.h> #include <platform/interrupt.h> #include <platform/dma.h> @@ -113,7 +114,7 @@ static struct dma dma[] = { .id = DMA_GP_LP_DMAC0, .base = LP_GP_DMA_BASE(0), .channels = 8, - .irq = IRQ_EXT_LP_GPDMA0_LVL5(0, 0), + .irq = IRQ_EXT_LP_GPDMA0_LVL5(0, 0), .drv_plat_data = &dmac0, }, .ops = &dw_dma_ops, @@ -123,7 +124,7 @@ static struct dma dma[] = { .id = DMA_GP_LP_DMAC1, .base = LP_GP_DMA_BASE(1), .channels = 8, - .irq = IRQ_EXT_LP_GPDMA1_LVL5(0, 0), + .irq = IRQ_EXT_LP_GPDMA1_LVL5(0, 0), .drv_plat_data = &dmac1, }, .ops = &dw_dma_ops, @@ -133,32 +134,40 @@ static struct dma dma[] = { .id = DMA_HOST_IN_DMAC, .base = GTW_HOST_IN_STREAM_BASE(0), .channels = 7, - .irq = IRQ_EXT_HOST_DMA_IN_LVL3(0, 0), + .irq = IRQ_EXT_HOST_DMA_IN_LVL3(0, 0), + .chan_size = GTW_HOST_IN_STREAM_SIZE, }, + .ops = &hda_host_dma_ops, }, { /* Host out DMAC */ .plat_data = { .id = DMA_HOST_OUT_DMAC, .base = GTW_HOST_OUT_STREAM_BASE(0), .channels = 6, - .irq = IRQ_EXT_HOST_DMA_OUT_LVL3(0, 0), + .irq = IRQ_EXT_HOST_DMA_OUT_LVL3(0, 0), + .chan_size = GTW_HOST_OUT_STREAM_SIZE, }, + .ops = &hda_host_dma_ops, }, { /* Link In DMAC */ .plat_data = { .id = DMA_LINK_IN_DMAC, .base = GTW_LINK_IN_STREAM_BASE(0), .channels = 8, - .irq = IRQ_EXT_LINK_DMA_IN_LVL4(0, 0), + .irq = IRQ_EXT_LINK_DMA_IN_LVL4(0, 0), + .chan_size = GTW_LINK_IN_STREAM_SIZE, }, + .ops = &hda_link_dma_ops, }, { /* Link out DMAC */ .plat_data = { .id = DMA_LINK_OUT_DMAC, .base = GTW_LINK_OUT_STREAM_BASE(0), .channels = 8, - .irq = IRQ_EXT_LINK_DMA_OUT_LVL4(0, 0), + .irq = IRQ_EXT_LINK_DMA_OUT_LVL4(0, 0), + .chan_size = GTW_LINK_OUT_STREAM_SIZE, }, + .ops = &hda_link_dma_ops, },};
struct dma *dma_get(int dmac_id)
Add a host and link DMA driver for Intel HDA DMA gateway.
Signed-off-by: Liam Girdwood liam.r.girdwood@linux.intel.com Signed-off-by: Keyon Jie yang.jie@linux.intel.com --- configure.ac | 2 + src/drivers/Makefile.am | 10 + src/drivers/hda-dma.c | 437 +++++++++++++++++++++++++++++++++++++ src/platform/apollolake/platform.c | 25 ++- 4 files changed, 466 insertions(+), 8 deletions(-) create mode 100644 src/drivers/hda-dma.c
diff --git a/configure.ac b/configure.ac index 05a3bee..ab2e845 100644 --- a/configure.ac +++ b/configure.ac @@ -142,6 +142,7 @@ case "$with_platform" in
AC_DEFINE([CONFIG_APOLLOLAKE], [1], [Configure for Apololake]) AC_DEFINE([CONFIG_IRQ_MAP], [1], [Configure IRQ maps]) + AC_DEFINE([CONFIG_DMA_GW], [1], [Configure DMA Gateway]) ;; haswell*)
@@ -193,6 +194,7 @@ case "$with_platform" in
AC_DEFINE([CONFIG_CANNONLAKE], [1], [Configure for Cannonlake]) AC_DEFINE([CONFIG_IRQ_MAP], [1], [Configure IRQ maps]) + AC_DEFINE([CONFIG_DMA_GW], [1], [Configure DMA Gateway]) ;; *) if test "$ARCH" = "host"; then diff --git a/src/drivers/Makefile.am b/src/drivers/Makefile.am index 8c507fa..0e8d84e 100644 --- a/src/drivers/Makefile.am +++ b/src/drivers/Makefile.am @@ -4,6 +4,16 @@ libdrivers_a_SOURCES = \ ssp.c \ dw-dma.c
+if BUILD_APOLLOLAKE +libdrivers_a_SOURCES += \ + hda-dma.c +endif + +if BUILD_CANNONLAKE +libdrivers_a_SOURCES += \ + hda-dma.c +endif + libdrivers_a_CFLAGS = \ $(ARCH_CFLAGS) \ $(REEF_INCDIR) \ diff --git a/src/drivers/hda-dma.c b/src/drivers/hda-dma.c new file mode 100644 index 0000000..a143528 --- /dev/null +++ b/src/drivers/hda-dma.c @@ -0,0 +1,437 @@ +/* + * Copyright (c) 2018, Intel Corporation + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the Intel Corporation nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + * + * Author: Keyon Jie yang.jie@linux.intel.com + * Liam Girdwood liam.r.girdwood@linux.intel.com + */ + +#include <stdint.h> +#include <stddef.h> +#include <errno.h> +#include <reef/reef.h> +#include <reef/lock.h> +#include <reef/list.h> +#include <reef/stream.h> +#include <reef/alloc.h> +#include <reef/trace.h> +#include <reef/dma.h> +#include <reef/io.h> +#include <reef/ipc.h> +#include <reef/wait.h> +#include <platform/dma.h> +#include <arch/cache.h> +#include <uapi/ipc.h> + +#define trace_host(__e) trace_event(TRACE_CLASS_HOST, __e) +#define tracev_host(__e) tracev_event(TRACE_CLASS_HOST, __e) +#define trace_host_error(__e) trace_error(TRACE_CLASS_HOST, __e) + +/* Gateway Stream Registers */ +#define DGCS 0x00 +#define DGBBA 0x04 +#define DGBS 0x08 +#define DGBFPI 0x0c /* firmware need to update this when DGCS.FWCB=1 */ +#define DGBRP 0x10 /* Read Only, read pointer */ +#define DGBWP 0x14 /* Read Only, write pointer */ +#define DGBSP 0x18 +#define DGMBS 0x1c +#define DGLLPI 0x24 +#define DGLPIBI 0x28 + +/* DGCS */ +#define DGCS_SCS (1 << 31) +#define DGCS_GEN (1 << 26) +#define DGCS_FWCB (1 << 23) +#define DGCS_BSC (1 << 11) +#define DGCS_BF (1 << 9) /* buffer full */ +#define DGCS_BNE (1 << 8) /* buffer not empty */ +#define DGCS_FIFORDY (1 << 5) /* enable FIFO */ + +/* DGBBA */ +#define DGBBA_MASK 0xffff80 + +/* DGBS */ +#define DGBS_MASK 0xfffff0 + +#define HDA_DMA_MAX_CHANS 8 + +struct hda_chan_data { + uint32_t stream_id; + uint32_t status; + uint32_t desc_count; + uint32_t desc_avail; + uint32_t direction; +}; + +struct dma_pdata { + struct dma *dma; + int32_t num_channels; + struct hda_chan_data chan[HDA_DMA_MAX_CHANS]; +}; + +static inline uint32_t host_dma_reg_read(struct dma *dma, uint32_t chan, + uint32_t reg) +{ + return io_reg_read(dma_chan_base(dma, chan) + reg); +} + +static inline void host_dma_reg_write(struct dma *dma, uint32_t chan, + uint32_t reg, uint32_t value) +{ + io_reg_write(dma_chan_base(dma, chan) + reg, value); +} + +static inline void hda_update_bits(struct dma *dma, uint32_t chan, + uint32_t reg, uint32_t mask, uint32_t value) +{ + io_reg_update_bits(dma_chan_base(dma, chan) + reg, mask, value); +} + +/* notify DMA to copy bytes */ +static int hda_dma_copy(struct dma *dma, int channel, int bytes) +{ + trace_host("GwU"); + + /* reset BSC before start next copy */ + hda_update_bits(dma, channel, DGCS, DGCS_BSC, DGCS_BSC); + + /* + * set BFPI to let host gateway knows we have read size, + * which will trigger next copy start. + */ + host_dma_reg_write(dma, channel, DGBFPI, bytes); + + host_dma_reg_write(dma, channel, DGLLPI, bytes); + host_dma_reg_write(dma, channel, DGLPIBI, bytes); + + return 0; +} + +/* acquire the specific DMA channel */ +static int hda_dma_channel_get(struct dma *dma, int channel) +{ + struct dma_pdata *p = dma_get_drvdata(dma); + uint32_t flags; + + spin_lock_irq(&dma->lock, flags); + + trace_host("Dgt"); + + /* use channel if it's free */ + if (p->chan[channel].status == COMP_STATE_INIT) { + p->chan[channel].status = COMP_STATE_READY; + + /* return channel */ + spin_unlock_irq(&dma->lock, flags); + return channel; + } + + /* DMAC has no free channels */ + spin_unlock_irq(&dma->lock, flags); + trace_host_error("eG0"); + return -ENODEV; +} + +/* channel must not be running when this is called */ +static void hda_dma_channel_put_unlocked(struct dma *dma, int channel) +{ + struct dma_pdata *p = dma_get_drvdata(dma); + + /* set new state */ + p->chan[channel].status = COMP_STATE_INIT; +} + +/* channel must not be running when this is called */ +static void hda_dma_channel_put(struct dma *dma, int channel) +{ + uint32_t flags; + + spin_lock_irq(&dma->lock, flags); + hda_dma_channel_put_unlocked(dma, channel); + spin_unlock_irq(&dma->lock, flags); +} + +static int hda_dma_start(struct dma *dma, int channel) +{ + struct dma_pdata *p = dma_get_drvdata(dma); + uint32_t flags; + uint32_t dgcs; + int ret = 0; + + spin_lock_irq(&dma->lock, flags); + + trace_host("DEn"); + + /* is channel idle, disabled and ready ? */ + dgcs = host_dma_reg_read(dma, channel, DGCS); + if (p->chan[channel].status != COMP_STATE_PREPARE || + (dgcs & DGCS_GEN)) { + ret = -EBUSY; + trace_host_error("eS0"); + trace_value(dgcs); + trace_value(p->chan[channel].status); + goto out; + } + + /* enable the channel */ + hda_update_bits(dma, channel, DGCS, DGCS_GEN | DGCS_FIFORDY, + DGCS_GEN | DGCS_FIFORDY); + + /* full buffer is copied at startup */ + p->chan[channel].desc_avail = p->chan[channel].desc_count; +out: + spin_unlock_irq(&dma->lock, flags); + return ret; +} + +static int hda_dma_release(struct dma *dma, int channel) +{ + struct dma_pdata *p = dma_get_drvdata(dma); + uint32_t flags; + + spin_lock_irq(&dma->lock, flags); + + trace_host("Dpr"); + + /* resume and reload DMA */ + p->chan[channel].status = COMP_STATE_ACTIVE; + + spin_unlock_irq(&dma->lock, flags); + return 0; +} + +static int hda_dma_pause(struct dma *dma, int channel) +{ + struct dma_pdata *p = dma_get_drvdata(dma); + uint32_t flags; + + spin_lock_irq(&dma->lock, flags); + + trace_host("Dpa"); + + if (p->chan[channel].status != COMP_STATE_ACTIVE) + goto out; + + /* pause the channel */ + p->chan[channel].status = COMP_STATE_PAUSED; + +out: + spin_unlock_irq(&dma->lock, flags); + return 0; +} + +static int hda_dma_stop(struct dma *dma, int channel) +{ + struct dma_pdata *p = dma_get_drvdata(dma); + uint32_t flags; + + spin_lock_irq(&dma->lock, flags); + + trace_host("DDi"); + + /* disable the channel */ + hda_update_bits(dma, channel, DGCS, DGCS_GEN | DGCS_FIFORDY, 0); + p->chan[channel].status = COMP_STATE_PREPARE; + + spin_unlock_irq(&dma->lock, flags); + return 0; +} + +/* fill in "status" with current DMA channel state and position */ +static int hda_dma_status(struct dma *dma, int channel, + struct dma_chan_status *status, uint8_t direction) +{ + struct dma_pdata *p = dma_get_drvdata(dma); + + status->state = p->chan[channel].status; + status->r_pos = host_dma_reg_read(dma, channel, DGBRP); + status->w_pos = host_dma_reg_read(dma, channel, DGBWP); + status->timestamp = timer_get_system(platform_timer); + + return 0; +} + +/* set the DMA channel configuration, source/target address, buffer sizes */ +static int hda_dma_set_config(struct dma *dma, int channel, + struct dma_sg_config *config) +{ + struct dma_pdata *p = dma_get_drvdata(dma); + struct list_item *plist; + struct dma_sg_elem *sg_elem; + uint32_t buffer_addr = 0; + uint32_t period_bytes = 0; + uint32_t buffer_bytes = 0; + uint32_t desc_count = 0; + uint32_t flags; + uint32_t addr; + uint32_t dgcs; + int ret = 0; + + spin_lock_irq(&dma->lock, flags); + + trace_host("Dsc"); + + /* get number of SG elems */ + list_for_item(plist, &config->elem_list) + desc_count++; + + if (desc_count == 0) { + trace_host_error("eD1"); + ret = -EINVAL; + goto out; + } + + /* default channel config */ + p->chan[channel].direction = config->direction; + p->chan[channel].desc_count = desc_count; + + + /* validate - HDA only supports continuous elems of same size */ + list_for_item(plist, &config->elem_list) { + sg_elem = container_of(plist, struct dma_sg_elem, list); + + if (config->direction == SOF_IPC_STREAM_PLAYBACK) + addr = sg_elem->dest; + else + addr = sg_elem->src; + + /* make sure elem is continuous */ + if (buffer_addr && (buffer_addr + buffer_bytes) != addr) { + trace_host_error("eD2"); + ret = -EINVAL; + goto out; + } + + /* make sure period_bytes are constant */ + if (period_bytes && period_bytes != sg_elem->size) { + trace_host_error("eD3"); + ret = -EINVAL; + goto out; + } + + /* update counters */ + period_bytes = sg_elem->size; + buffer_bytes += period_bytes; + + if (buffer_addr == 0) + buffer_addr = addr; + } + + /* firmware control buffer */ + dgcs = DGCS_FWCB; + /* set DGCS.SCS bit to 0 for 32 bit container */ + if ((config->direction == SOF_IPC_STREAM_PLAYBACK && + config->dest_width <= 16) || + (config->direction == SOF_IPC_STREAM_CAPTURE && + config->src_width <= 16)) + dgcs |= DGCS_SCS; + + /* init channel in HW */ + host_dma_reg_write(dma, channel, DGCS, dgcs); + host_dma_reg_write(dma, channel, DGBBA, buffer_addr); + host_dma_reg_write(dma, channel, DGBS, buffer_bytes); + host_dma_reg_write(dma, channel, DGBFPI, 0); + host_dma_reg_write(dma, channel, DGBSP, period_bytes); + host_dma_reg_write(dma, channel, DGMBS, period_bytes); + host_dma_reg_write(dma, channel, DGLLPI, 0); + host_dma_reg_write(dma, channel, DGLPIBI, 0); + + p->chan[channel].status = COMP_STATE_PREPARE; +out: + spin_unlock_irq(&dma->lock, flags); + return ret; +} + +/* restore DMA conext after leaving D3 */ +static int hda_dma_pm_context_restore(struct dma *dma) +{ + return 0; +} + +/* store DMA conext after leaving D3 */ +static int hda_dma_pm_context_store(struct dma *dma) +{ + return 0; +} + +static int hda_dma_set_cb(struct dma *dma, int channel, int type, + void (*cb)(void *data, uint32_t type, struct dma_sg_elem *next), + void *data) +{ + return -EINVAL; +} + +static int hda_dma_probe(struct dma *dma) +{ + struct dma_pdata *hda_pdata; + int i; + + /* allocate private data */ + hda_pdata = rzalloc(RZONE_SYS, RFLAGS_NONE, sizeof(*hda_pdata)); + dma_set_drvdata(dma, hda_pdata); + + spinlock_init(&dma->lock); + + /* init channel status */ + for (i = 0; i < HDA_DMA_MAX_CHANS; i++) + hda_pdata->chan[i].status = COMP_STATE_INIT; + + return 0; +} + +const struct dma_ops hda_host_dma_ops = { + .channel_get = hda_dma_channel_get, + .channel_put = hda_dma_channel_put, + .start = hda_dma_start, + .stop = hda_dma_stop, + .copy = hda_dma_copy, + .pause = hda_dma_pause, + .release = hda_dma_release, + .status = hda_dma_status, + .set_config = hda_dma_set_config, + .set_cb = hda_dma_set_cb, + .pm_context_restore = hda_dma_pm_context_restore, + .pm_context_store = hda_dma_pm_context_store, + .probe = hda_dma_probe, +}; + +const struct dma_ops hda_link_dma_ops = { + .channel_get = hda_dma_channel_get, + .channel_put = hda_dma_channel_put, + .start = hda_dma_start, + .stop = hda_dma_stop, + .copy = hda_dma_copy, + .pause = hda_dma_pause, + .release = hda_dma_release, + .status = hda_dma_status, + .set_config = hda_dma_set_config, + .set_cb = hda_dma_set_cb, + .pm_context_restore = hda_dma_pm_context_restore, + .pm_context_store = hda_dma_pm_context_store, + .probe = hda_dma_probe, +}; + diff --git a/src/platform/apollolake/platform.c b/src/platform/apollolake/platform.c index bb44634..555f14f 100644 --- a/src/platform/apollolake/platform.c +++ b/src/platform/apollolake/platform.c @@ -176,8 +176,7 @@ static struct timer platform_ext_timer = {
int platform_init(struct reef *reef) { - struct dma *dmac0; - struct dma *dmac1; + struct dma *dmac; struct dai *ssp2;
platform_interrupt_init(); @@ -232,15 +231,25 @@ int platform_init(struct reef *reef)
/* init DMACs */ trace_point(TRACE_BOOT_PLATFORM_DMA); - dmac0 = dma_get(DMA_GP_LP_DMAC0); - if (dmac0 == NULL) + dmac = dma_get(DMA_GP_LP_DMAC0); + if (dmac == NULL) return -ENODEV; - dma_probe(dmac0); + dma_probe(dmac);
- dmac1 = dma_get(DMA_GP_LP_DMAC1); - if (dmac1 == NULL) + dmac = dma_get(DMA_GP_LP_DMAC1); + if (dmac == NULL) return -ENODEV; - dma_probe(dmac1); + dma_probe(dmac); + + dmac = dma_get(DMA_HOST_OUT_DMAC); + if (dmac == NULL) + return -ENODEV; + dma_probe(dmac); + + dmac = dma_get(DMA_HOST_IN_DMAC); + if (dmac == NULL) + return -ENODEV; + dma_probe(dmac);
/* init SSP ports */ trace_point(TRACE_BOOT_PLATFORM_SSP);
From: Liam Girdwood liam.r.girdwood@linux.intel.com
Host GW DMA has no callback so host component has to manually notify DMA driver when to copy each period.
Signed-off-by: Liam Girdwood liam.r.girdwood@linux.intel.com Signed-off-by: Keyon Jie yang.jie@linux.intel.com --- src/audio/host.c | 201 ++++++++++++++++++++++++++++++++++++++++++++++++------- 1 file changed, 176 insertions(+), 25 deletions(-)
diff --git a/src/audio/host.c b/src/audio/host.c index 0d0a7bb..17218fe 100644 --- a/src/audio/host.c +++ b/src/audio/host.c @@ -84,6 +84,9 @@ struct host_data { uint32_t period_bytes; uint32_t period_count;
+#if defined CONFIG_DMA_GW + uint32_t first_copy; +#endif /* stream info */ struct sof_ipc_stream_posn posn; /* TODO: update this */ }; @@ -101,6 +104,8 @@ static inline struct dma_sg_elem *next_buffer(struct hc_buf *hc) return elem; }
+#if !defined CONFIG_DMA_GW + /* * Host period copy between DSP and host DMA completion. * This is called by DMA driver every time when DMA completes its current @@ -226,6 +231,87 @@ static void host_dma_cb(void *data, uint32_t type, struct dma_sg_elem *next) wait_completed(&hd->complete); }
+#else + +static void host_gw_dma_update(struct comp_dev *dev) +{ + struct host_data *hd = comp_get_drvdata(dev); + struct dma_sg_elem *local_elem; + struct dma_sg_elem *source_elem; + struct dma_sg_elem *sink_elem; + struct comp_buffer *dma_buffer; + + local_elem = list_first_item(&hd->config.elem_list, + struct dma_sg_elem, list); + + tracev_host("upd"); + + /* update buffer positions */ + dma_buffer = hd->dma_buffer; + + if (dev->params.direction == SOF_IPC_STREAM_PLAYBACK) { + /* invalidate audio data */ + dcache_invalidate_region(dma_buffer->w_ptr, local_elem->size); + + /* recalc available buffer space */ + comp_update_buffer_produce(hd->dma_buffer, local_elem->size); + } else { + /* recalc available buffer space */ + comp_update_buffer_consume(hd->dma_buffer, local_elem->size); + + /* writeback audio data */ + dcache_writeback_region(dma_buffer->r_ptr, local_elem->size); + } + + dev->position += local_elem->size; + + /* new local period, update host buffer position blks */ + hd->local_pos += local_elem->size; + + /* buffer overlap, hard code host buffer size at the moment ? */ + if (hd->local_pos >= hd->host_size) + hd->local_pos = 0; + + /* send IPC message to driver if needed */ + hd->report_pos += local_elem->size; + /* update for host side */ + if (hd->host_pos) + *hd->host_pos = hd->local_pos; + + /* NO_IRQ mode if host_period_size == 0 */ + if (dev->params.host_period_bytes != 0 && + hd->report_pos >= dev->params.host_period_bytes) { + hd->report_pos = 0; + + /* send timestamps to host */ + pipeline_get_timestamp(dev->pipeline, dev, &hd->posn); + ipc_stream_send_position(dev, &hd->posn); + } + + /* update src/dest positions for local buf, and check for overflow */ + if (dev->params.direction == SOF_IPC_STREAM_PLAYBACK) { + local_elem->dest += local_elem->size; + if (local_elem->dest == hd->sink->current_end) { + /* end of elem, so use next */ + sink_elem = next_buffer(hd->sink); + hd->sink->current_end = sink_elem->dest + + sink_elem->size; + local_elem->dest = sink_elem->dest; + } + } else { + local_elem->src += local_elem->size; + if (local_elem->src == hd->source->current_end) { + /* end of elem, so use next */ + source_elem = next_buffer(hd->source); + hd->source->current_end = source_elem->src + + source_elem->size; + local_elem->src = source_elem->src; + } + } +} + +#endif + static struct comp_dev *host_new(struct sof_ipc_comp *comp) { struct comp_dev *dev; @@ -259,7 +345,14 @@ static struct comp_dev *host_new(struct sof_ipc_comp *comp)
comp_set_drvdata(dev, hd);
+#if !defined CONFIG_DMA_GW hd->dma = dma_get(ipc_host->dmac_id); +#else + if (ipc_host->direction == SOF_IPC_STREAM_PLAYBACK) + hd->dma = dma_get(DMA_HOST_OUT_DMAC); + else + hd->dma = dma_get(DMA_HOST_IN_DMAC); +#endif if (hd->dma == NULL) { trace_host_error("eDM"); goto error; @@ -271,6 +364,7 @@ static struct comp_dev *host_new(struct sof_ipc_comp *comp) list_init(&hd->local.elem_list); list_item_prepend(&elem->list, &hd->config.elem_list);
+#if !defined CONFIG_DMA_GW /* get DMA channel from DMAC */ hd->chan = dma_channel_get(hd->dma, 0); if (hd->chan < 0) { @@ -280,6 +374,7 @@ static struct comp_dev *host_new(struct sof_ipc_comp *comp)
/* set up callback */ dma_set_cb(hd->dma, hd->chan, DMA_IRQ_TYPE_LLIST, host_dma_cb, dev); +#endif
/* init posn data. TODO: other fields */ hd->posn.comp_id = comp->id; @@ -302,7 +397,10 @@ static void host_free(struct comp_dev *dev)
elem = list_first_item(&hd->config.elem_list, struct dma_sg_elem, list); + +#if !defined CONFIG_DMA_GW dma_channel_put(hd->dma, hd->chan); +#endif
rfree(elem); rfree(hd); @@ -334,6 +432,16 @@ static int create_local_elems(struct comp_dev *dev) e->size = hd->period_bytes;
list_item_append(&e->list, &hd->local.elem_list); +#if defined CONFIG_DMA_GW + /* + * for dma gateway, we don't allocate extra sg elements, so, + * just reuse local elements for config.elem_list. + * And, as the first element has been added at host_new, so + * add from the 2nd element here + */ + if (i >= 1) + list_item_append(&e->list, &hd->config.elem_list); +#endif }
return 0; @@ -448,14 +556,24 @@ static int host_params(struct comp_dev *dev) if (err < 0) return err;
- /* set up DMA configuration - copy in words for all formats as - this is most optimal for memory <-> memory copies. */ - config->src_width = sizeof(uint32_t); - config->dest_width = sizeof(uint32_t); + /* set up DMA configuration - copy in sample bytes. */ + config->src_width = comp_sample_bytes(dev); + config->dest_width = comp_sample_bytes(dev); config->cyclic = 0;
host_elements_reset(dev);
+#if defined CONFIG_DMA_GW + dev->params.stream_tag -= 1; + /* get DMA channel from DMAC */ + hd->chan = dma_channel_get(hd->dma, dev->params.stream_tag); + if (hd->chan < 0) { + trace_host_error("eDC"); + return -ENODEV; + } + dma_set_config(hd->dma, hd->chan, &hd->config); +#endif + return 0; }
@@ -477,6 +595,10 @@ static int host_prepare(struct comp_dev *dev) hd->split_remaining = 0; dev->position = 0;
+#if defined CONFIG_DMA_GW + hd->first_copy = 1; +#endif + return 0; }
@@ -522,7 +644,9 @@ static int host_position(struct comp_dev *dev, static int host_cmd(struct comp_dev *dev, int cmd, void *data) { int ret = 0; - +#if defined CONFIG_DMA_GW + struct host_data *hd = comp_get_drvdata(dev); +#endif trace_host("cmd");
ret = comp_set_state(dev, cmd); @@ -534,11 +658,23 @@ static int host_cmd(struct comp_dev *dev, int cmd, void *data) ret = host_stop(dev); break; case COMP_CMD_START: +#if defined CONFIG_DMA_GW + dma_start(hd->dma, hd->chan); +#endif /* preload first playback period for preloader task */ if (dev->params.direction == SOF_IPC_STREAM_PLAYBACK) { +#if !defined CONFIG_DMA_GW ret = host_copy(dev); if (ret == dev->frames) ret = 0; +#else + /* + * host dma will not start copy at this point yet, + * just produce empty period bytes for it. + */ + comp_update_buffer_produce(hd->dma_buffer, + hd->period_bytes); +#endif } break; default: @@ -590,6 +726,11 @@ static int host_reset(struct comp_dev *dev) rfree(e); }
+#if defined CONFIG_DMA_GW + dma_stop(hd->dma, hd->chan); + dma_channel_put(hd->dma, hd->chan); +#endif + host_pointer_reset(dev); hd->host_pos = NULL; hd->source = NULL; @@ -603,46 +744,56 @@ static int host_reset(struct comp_dev *dev) static int host_copy(struct comp_dev *dev) { struct host_data *hd = comp_get_drvdata(dev); - struct comp_buffer *dma_buffer; struct dma_sg_elem *local_elem;
- tracev_host("cpy"); + trace_host("cpy");
if (dev->state != COMP_STATE_ACTIVE) return 0;
+#if defined CONFIG_DMA_GW + if (hd->first_copy) { + /* + * host dma will not start copy at this point yet, just produce + * empty period bytes for it. + */ + comp_update_buffer_produce(hd->dma_buffer, + hd->period_bytes); + hd->first_copy = 0; + return 0; + } +#endif local_elem = list_first_item(&hd->config.elem_list, struct dma_sg_elem, list);
/* enough free or avail to copy ? */ if (dev->params.direction == SOF_IPC_STREAM_PLAYBACK) { - - dma_buffer = list_first_item(&dev->bsink_list, - struct comp_buffer, source_list); - - if (dma_buffer->free < local_elem->size) { - /* make sure there is free bytes for next period */ - trace_host_error("xro"); - comp_overrun(dev, dma_buffer, local_elem->size, 0); - return -EIO; + if (hd->dma_buffer->free < local_elem->size) { + /* buffer is enough avail, just return. */ + trace_host("Bea"); + return 0; } } else {
- dma_buffer = list_first_item(&dev->bsource_list, - struct comp_buffer, sink_list); - - if (dma_buffer->avail < local_elem->size) { - /* make sure there is avail bytes for next period */ - trace_host_error("xru"); - comp_underrun(dev, dma_buffer, local_elem->size, 0); - return -EIO; + if (hd->dma_buffer->avail < local_elem->size) { + /* buffer is enough empty, just return. */ + trace_host("Bee"); + return 0; } }
+#if defined CONFIG_DMA_GW + + /* update host pointers from last period */ + host_gw_dma_update(dev); + + /* tell gateway to copy another period */ + dma_copy(hd->dma, hd->chan, hd->period_bytes); +#else /* do DMA transfer */ dma_set_config(hd->dma, hd->chan, &hd->config); dma_start(hd->dma, hd->chan); - +#endif return dev->frames; }
Fix the dma not stop issue for using Hardware Link List mode. Only enable it for APL/CNL at the moment.
Signed-off-by: Keyon Jie yang.jie@linux.intel.com --- src/drivers/dw-dma.c | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-)
diff --git a/src/drivers/dw-dma.c b/src/drivers/dw-dma.c index c566509..b370d32 100644 --- a/src/drivers/dw-dma.c +++ b/src/drivers/dw-dma.c @@ -236,8 +236,12 @@ #define trace_dma_error(__e) trace_error(TRACE_CLASS_DMA, __e) #define tracev_dma(__e) tracev_event(TRACE_CLASS_DMA, __e)
-/* HW Linked list support currently disabled - needs debug for missing IRQs !!! */ +/* HW Linked list support, only enabled for APL/CNL at the moment */ +#ifdef CONFIG_APOLLOLAKE || defined CONFIG_CANNONLAKE +#define DW_USE_HW_LLI 1 +#else #define DW_USE_HW_LLI 0 +#endif
/* number of tries to wait for reset */ #define DW_DMA_CFG_TRIES 10000 @@ -922,6 +926,11 @@ static void dw_dma_irq_handler(void *data) next.size = DMA_RELOAD_LLI; p->chan[i].cb(p->chan[i].cb_data, DMA_IRQ_TYPE_BLOCK, &next); + if (next.size == DMA_RELOAD_END) { + trace_dma("LSo"); + /* disable channel, finished */ + dw_write(dma, DW_DMA_CHAN_EN, CHAN_DISABLE(i)); + } } #endif /* end of a transfer */
On Thu, 2018-02-08 at 20:48 +0800, Keyon Jie wrote:
From: Liam Girdwood liam.r.girdwood@linux.intel.com
This API allows clients to manually inform the DMAC when they need more data copied. The DMAC can then copy the desired amount of data.
Signed-off-by: Liam Girdwood liam.r.girdwood@linux.intel.com
src/include/reef/dma.h | 6 ++++++ 1 file changed, 6 insertions(+)
All applied with a minor build fix on BYT.
Thanks
Liam
participants (2)
-
Keyon Jie
-
Liam Girdwood