[alsa-devel] [PATCH 0/4] ASoC: Introduce the new caching API
This patch series introduces the new caching API. The idea behind this caching interface is that we can provide different means of organizing and accessing the register cache. This is useful for large and sparse register maps, where one can use some kind of compression algorithm to reduce the memory footprint. The caching API is designed in such way to eliminate the need for modifying any existing drivers.
Things that still need to be done.
- Memory usage statistics, to make it easier to select the proper caching technique. - Support for bulk reads/writes.
Dimitris Papastamos (4): ASoC: soc.h: Add new caching API prototypes and hooks ASoC: soc-cache: Add support for flat register caching ASoC: soc-cache: Add support for LZO register caching ASoC: soc-cache: Add support for rbtree based register caching
include/sound/soc.h | 28 ++ sound/soc/Kconfig | 2 + sound/soc/soc-cache.c | 988 +++++++++++++++++++++++++++++++++++++++++++++++-- sound/soc/soc-core.c | 38 +-- 4 files changed, 1002 insertions(+), 54 deletions(-)
Signed-off-by: Dimitris Papastamos dp@opensource.wolfsonmicro.com --- include/sound/soc.h | 26 ++++++++++++++++++++++++++ sound/soc/Kconfig | 2 ++ 2 files changed, 28 insertions(+), 0 deletions(-)
diff --git a/include/sound/soc.h b/include/sound/soc.h index aaf34d7..90a9b60 100644 --- a/include/sound/soc.h +++ b/include/sound/soc.h @@ -238,6 +238,7 @@ struct soc_enum; struct snd_soc_ac97_ops; struct snd_soc_jack; struct snd_soc_jack_pin; +struct snd_soc_cache_ops;
#ifdef CONFIG_GPIOLIB struct snd_soc_jack_gpio; @@ -253,6 +254,10 @@ enum snd_soc_control_type { SND_SOC_SPI, };
+enum snd_soc_compress_type { + SND_SOC_NO_COMPRESSION +}; + int snd_soc_register_platform(struct device *dev, struct snd_soc_platform_driver *platform_drv); void snd_soc_unregister_platform(struct device *dev); @@ -264,6 +269,13 @@ int snd_soc_codec_volatile_register(struct snd_soc_codec *codec, int reg); int snd_soc_codec_set_cache_io(struct snd_soc_codec *codec, int addr_bits, int data_bits, enum snd_soc_control_type control); +int snd_soc_cache_sync(struct snd_soc_codec *codec); +int snd_soc_cache_init(struct snd_soc_codec *codec); +int snd_soc_cache_exit(struct snd_soc_codec *codec); +int snd_soc_cache_write(struct snd_soc_codec *codec, + unsigned int reg, unsigned int value); +int snd_soc_cache_read(struct snd_soc_codec *codec, + unsigned int reg, unsigned int *value);
/* Utility functions to get clock rates from various things */ int snd_soc_calc_frame_size(int sample_size, int channels, int tdm_slots); @@ -420,6 +432,18 @@ struct snd_soc_ops { int (*trigger)(struct snd_pcm_substream *, int); };
+/* SoC cache ops */ +struct snd_soc_cache_ops { + int id; /* corresponds to snd_soc_compress_type */ + int (*init)(struct snd_soc_codec *codec); + int (*exit)(struct snd_soc_codec *codec); + int (*read)(struct snd_soc_codec *codec, unsigned int reg, + unsigned int *value); + int (*write)(struct snd_soc_codec *codec, unsigned int reg, + unsigned int value); + int (*sync)(struct snd_soc_codec *codec); +}; + /* SoC Audio Codec device */ struct snd_soc_codec { const char *name; @@ -450,6 +474,7 @@ struct snd_soc_codec { hw_write_t hw_write; unsigned int (*hw_read)(struct snd_soc_codec *, unsigned int); void *reg_cache; + const struct snd_soc_cache_ops *cache_ops;
/* dapm */ u32 pop_time; @@ -488,6 +513,7 @@ struct snd_soc_codec_driver { short reg_cache_step; short reg_word_size; const void *reg_cache_default; + enum snd_soc_compress_type compress_type;
/* codec bias level */ int (*set_bias_level)(struct snd_soc_codec *, diff --git a/sound/soc/Kconfig b/sound/soc/Kconfig index 3e598e7..4562c89 100644 --- a/sound/soc/Kconfig +++ b/sound/soc/Kconfig @@ -4,6 +4,8 @@
menuconfig SND_SOC tristate "ALSA for SoC audio support" + select LZO_COMPRESS + select LZO_DECOMPRESS select SND_PCM select AC97_BUS if SND_SOC_AC97_BUS select SND_JACK if INPUT=y || INPUT=SND
On Thu, Nov 04, 2010 at 02:22:41PM +0000, Dimitris Papastamos wrote:
Signed-off-by: Dimitris Papastamos dp@opensource.wolfsonmicro.com
As previously mentioned please split this series up into patches which individually add some useful functiionality
+/* SoC cache ops */ +struct snd_soc_cache_ops {
- int id; /* corresponds to snd_soc_compress_type */
Shouldn't this be using the enum then?
--- a/sound/soc/Kconfig +++ b/sound/soc/Kconfig @@ -4,6 +4,8 @@
menuconfig SND_SOC tristate "ALSA for SoC audio support"
- select LZO_COMPRESS
- select LZO_DECOMPRESS select SND_PCM select AC97_BUS if SND_SOC_AC97_BUS select SND_JACK if INPUT=y || INPUT=SND
This definitely looks like it should be in a separate patch.
This patch introduces the new caching API and migrates the old caching interface into the new one. The flat register caching technique does not use compression at all and it is equivalent to the old caching technique. One can still access codec->reg_cache directly but this is not advised as that will not be portable across different caching strategies.
None of the existing drivers need to be changed to adapt to this caching technique. There should be no noticeable overhead associated with using the new caching API.
Signed-off-by: Dimitris Papastamos dp@opensource.wolfsonmicro.com --- sound/soc/soc-cache.c | 342 ++++++++++++++++++++++++++++++++++++++++++++----- sound/soc/soc-core.c | 38 ++---- 2 files changed, 326 insertions(+), 54 deletions(-)
diff --git a/sound/soc/soc-cache.c b/sound/soc/soc-cache.c index 8785a0c..c5d05b8 100644 --- a/sound/soc/soc-cache.c +++ b/sound/soc/soc-cache.c @@ -15,10 +15,14 @@ #include <linux/spi/spi.h> #include <sound/soc.h>
+/* serialize access to *cache_read() and *cache_write() */ +static DEFINE_MUTEX(cache_rw_mutex); + static unsigned int snd_soc_4_12_read(struct snd_soc_codec *codec, unsigned int reg) { - u16 *cache = codec->reg_cache; + int ret; + unsigned int val;
if (reg >= codec->driver->reg_cache_size || snd_soc_codec_volatile_register(codec, reg)) { @@ -28,13 +32,15 @@ static unsigned int snd_soc_4_12_read(struct snd_soc_codec *codec, return codec->hw_read(codec, reg); }
- return cache[reg]; + ret = snd_soc_cache_read(codec, reg, &val); + if (ret < 0) + return -1; + return val; }
static int snd_soc_4_12_write(struct snd_soc_codec *codec, unsigned int reg, unsigned int value) { - u16 *cache = codec->reg_cache; u8 data[2]; int ret;
@@ -42,8 +48,11 @@ static int snd_soc_4_12_write(struct snd_soc_codec *codec, unsigned int reg, data[1] = value & 0x00ff;
if (!snd_soc_codec_volatile_register(codec, reg) && - reg < codec->driver->reg_cache_size) - cache[reg] = value; + reg < codec->driver->reg_cache_size) { + ret = snd_soc_cache_write(codec, reg, value); + if (ret < 0) + return -1; + }
if (codec->cache_only) { codec->cache_sync = 1; @@ -92,7 +101,8 @@ static int snd_soc_4_12_spi_write(void *control_data, const char *data, static unsigned int snd_soc_7_9_read(struct snd_soc_codec *codec, unsigned int reg) { - u16 *cache = codec->reg_cache; + int ret; + unsigned int val;
if (reg >= codec->driver->reg_cache_size || snd_soc_codec_volatile_register(codec, reg)) { @@ -102,13 +112,15 @@ static unsigned int snd_soc_7_9_read(struct snd_soc_codec *codec, return codec->hw_read(codec, reg); }
- return cache[reg]; + ret = snd_soc_cache_read(codec, reg, &val); + if (ret < 0) + return -1; + return val; }
static int snd_soc_7_9_write(struct snd_soc_codec *codec, unsigned int reg, unsigned int value) { - u16 *cache = codec->reg_cache; u8 data[2]; int ret;
@@ -116,8 +128,11 @@ static int snd_soc_7_9_write(struct snd_soc_codec *codec, unsigned int reg, data[1] = value & 0x00ff;
if (!snd_soc_codec_volatile_register(codec, reg) && - reg < codec->driver->reg_cache_size) - cache[reg] = value; + reg < codec->driver->reg_cache_size) { + ret = snd_soc_cache_write(codec, reg, value); + if (ret < 0) + return -1; + }
if (codec->cache_only) { codec->cache_sync = 1; @@ -166,16 +181,19 @@ static int snd_soc_7_9_spi_write(void *control_data, const char *data, static int snd_soc_8_8_write(struct snd_soc_codec *codec, unsigned int reg, unsigned int value) { - u8 *cache = codec->reg_cache; u8 data[2]; + int ret;
reg &= 0xff; data[0] = reg; data[1] = value & 0xff;
if (!snd_soc_codec_volatile_register(codec, reg) && - reg < codec->driver->reg_cache_size) - cache[reg] = value; + reg < codec->driver->reg_cache_size) { + ret = snd_soc_cache_write(codec, reg, value); + if (ret < 0) + return -1; + }
if (codec->cache_only) { codec->cache_sync = 1; @@ -191,7 +209,8 @@ static int snd_soc_8_8_write(struct snd_soc_codec *codec, unsigned int reg, static unsigned int snd_soc_8_8_read(struct snd_soc_codec *codec, unsigned int reg) { - u8 *cache = codec->reg_cache; + int ret; + unsigned int val;
reg &= 0xff; if (reg >= codec->driver->reg_cache_size || @@ -202,7 +221,10 @@ static unsigned int snd_soc_8_8_read(struct snd_soc_codec *codec, return codec->hw_read(codec, reg); }
- return cache[reg]; + ret = snd_soc_cache_read(codec, reg, &val); + if (ret < 0) + return -1; + return val; }
#if defined(CONFIG_SPI_MASTER) @@ -238,16 +260,19 @@ static int snd_soc_8_8_spi_write(void *control_data, const char *data, static int snd_soc_8_16_write(struct snd_soc_codec *codec, unsigned int reg, unsigned int value) { - u16 *reg_cache = codec->reg_cache; u8 data[3]; + int ret;
data[0] = reg; data[1] = (value >> 8) & 0xff; data[2] = value & 0xff;
if (!snd_soc_codec_volatile_register(codec, reg) && - reg < codec->driver->reg_cache_size) - reg_cache[reg] = value; + reg < codec->driver->reg_cache_size) { + ret = snd_soc_cache_write(codec, reg, value); + if (ret < 0) + return -1; + }
if (codec->cache_only) { codec->cache_sync = 1; @@ -263,7 +288,8 @@ static int snd_soc_8_16_write(struct snd_soc_codec *codec, unsigned int reg, static unsigned int snd_soc_8_16_read(struct snd_soc_codec *codec, unsigned int reg) { - u16 *cache = codec->reg_cache; + int ret; + unsigned int val;
if (reg >= codec->driver->reg_cache_size || snd_soc_codec_volatile_register(codec, reg)) { @@ -271,9 +297,12 @@ static unsigned int snd_soc_8_16_read(struct snd_soc_codec *codec, return -1;
return codec->hw_read(codec, reg); - } else { - return cache[reg]; } + + ret = snd_soc_cache_read(codec, reg, &val); + if (ret < 0) + return -1; + return val; }
#if defined(CONFIG_SPI_MASTER) @@ -412,7 +441,8 @@ static unsigned int snd_soc_16_8_read_i2c(struct snd_soc_codec *codec, static unsigned int snd_soc_16_8_read(struct snd_soc_codec *codec, unsigned int reg) { - u8 *cache = codec->reg_cache; + int ret; + unsigned int val;
reg &= 0xff; if (reg >= codec->driver->reg_cache_size || @@ -423,13 +453,15 @@ static unsigned int snd_soc_16_8_read(struct snd_soc_codec *codec, return codec->hw_read(codec, reg); }
- return cache[reg]; + ret = snd_soc_cache_read(codec, reg, &val); + if (ret < 0) + return -1; + return val; }
static int snd_soc_16_8_write(struct snd_soc_codec *codec, unsigned int reg, unsigned int value) { - u8 *cache = codec->reg_cache; u8 data[3]; int ret;
@@ -439,8 +471,11 @@ static int snd_soc_16_8_write(struct snd_soc_codec *codec, unsigned int reg,
reg &= 0xff; if (!snd_soc_codec_volatile_register(codec, reg) && - reg < codec->driver->reg_cache_size) - cache[reg] = value; + reg < codec->driver->reg_cache_size) { + ret = snd_soc_cache_write(codec, reg, value); + if (ret < 0) + return -1; + }
if (codec->cache_only) { codec->cache_sync = 1; @@ -524,7 +559,8 @@ static unsigned int snd_soc_16_16_read_i2c(struct snd_soc_codec *codec, static unsigned int snd_soc_16_16_read(struct snd_soc_codec *codec, unsigned int reg) { - u16 *cache = codec->reg_cache; + int ret; + unsigned int val;
if (reg >= codec->driver->reg_cache_size || snd_soc_codec_volatile_register(codec, reg)) { @@ -534,13 +570,16 @@ static unsigned int snd_soc_16_16_read(struct snd_soc_codec *codec, return codec->hw_read(codec, reg); }
- return cache[reg]; + ret = snd_soc_cache_read(codec, reg, &val); + if (ret < 0) + return -1; + + return val; }
static int snd_soc_16_16_write(struct snd_soc_codec *codec, unsigned int reg, unsigned int value) { - u16 *cache = codec->reg_cache; u8 data[4]; int ret;
@@ -550,8 +589,11 @@ static int snd_soc_16_16_write(struct snd_soc_codec *codec, unsigned int reg, data[3] = value & 0xff;
if (!snd_soc_codec_volatile_register(codec, reg) && - reg < codec->driver->reg_cache_size) - cache[reg] = value; + reg < codec->driver->reg_cache_size) { + ret = snd_soc_cache_write(codec, reg, value); + if (ret < 0) + return -1; + }
if (codec->cache_only) { codec->cache_sync = 1; @@ -680,6 +722,8 @@ int snd_soc_codec_set_cache_io(struct snd_soc_codec *codec, return -EINVAL; }
+ mutex_init(&cache_rw_mutex); + codec->driver->write = io_types[i].write; codec->driver->read = io_types[i].read;
@@ -712,3 +756,239 @@ int snd_soc_codec_set_cache_io(struct snd_soc_codec *codec, return 0; } EXPORT_SYMBOL_GPL(snd_soc_codec_set_cache_io); + +static int snd_soc_flat_cache_sync(struct snd_soc_codec *codec) +{ + int i; + struct snd_soc_codec_driver *codec_drv; + unsigned int val; + + codec_drv = codec->driver; + for (i = 0; i < codec_drv->reg_cache_size; ++i) { + snd_soc_cache_read(codec, i, &val); + if (codec_drv->reg_cache_default) { + switch (codec_drv->reg_word_size) { + case 1: { + const u8 *cache; + + cache = codec_drv->reg_cache_default; + if (cache[i] == val) + continue; + } + break; + case 2: { + const u16 *cache; + + cache = codec_drv->reg_cache_default; + if (cache[i] == val) + continue; + } + break; + default: + return -EINVAL; + } + } + snd_soc_write(codec, i, val); + dev_dbg(codec->dev, "Synced register %#x, value = %#x\n", + i, val); + } + return 0; +} + +static int snd_soc_flat_cache_write(struct snd_soc_codec *codec, + unsigned int reg, unsigned int value) +{ + switch (codec->driver->reg_word_size) { + case 1: { + u8 *cache; + + cache = codec->reg_cache; + cache[reg] = value; + } + break; + case 2: { + u16 *cache; + + cache = codec->reg_cache; + cache[reg] = value; + } + break; + default: + return -EINVAL; + } + + return 0; +} + +static int snd_soc_flat_cache_read(struct snd_soc_codec *codec, + unsigned int reg, unsigned int *value) +{ + switch (codec->driver->reg_word_size) { + case 1: { + u8 *cache; + + cache = codec->reg_cache; + *value = cache[reg]; + } + break; + case 2: { + u16 *cache; + + cache = codec->reg_cache; + *value = cache[reg]; + } + break; + default: + return -EINVAL; + } + + return 0; +} + +static int snd_soc_flat_cache_exit(struct snd_soc_codec *codec) +{ + kfree(codec->reg_cache); + return 0; +} + +static int snd_soc_flat_cache_init(struct snd_soc_codec *codec) +{ + struct snd_soc_codec_driver *codec_drv; + size_t reg_size; + + codec_drv = codec->driver; + reg_size = codec_drv->reg_cache_size * codec_drv->reg_word_size; + + if (codec_drv->reg_cache_default) + codec->reg_cache = kmemdup(codec_drv->reg_cache_default, + reg_size, GFP_KERNEL); + else + codec->reg_cache = kzalloc(reg_size, GFP_KERNEL); + if (!codec->reg_cache) + return -EINVAL; + + return 0; +} + +/* an array of all supported compression types */ +static const struct snd_soc_cache_ops cache_types[] = { + { + .id = SND_SOC_NO_COMPRESSION, + .init = snd_soc_flat_cache_init, + .exit = snd_soc_flat_cache_exit, + .read = snd_soc_flat_cache_read, + .write = snd_soc_flat_cache_write, + .sync = snd_soc_flat_cache_sync + } +}; + +int snd_soc_cache_init(struct snd_soc_codec *codec) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(cache_types); ++i) + if (cache_types[i].id == codec->driver->compress_type) + break; + if (i == ARRAY_SIZE(cache_types)) { + dev_err(codec->dev, "Could not match compress type: %d\n", + codec->driver->compress_type); + return -EINVAL; + } + + codec->cache_ops = &cache_types[i]; + + if (codec->cache_ops->init) + return codec->cache_ops->init(codec); + return -EINVAL; +} +EXPORT_SYMBOL_GPL(snd_soc_cache_init); + +/* + * NOTE: keep in mind that this function might be called + * multiple times. + */ +int snd_soc_cache_exit(struct snd_soc_codec *codec) +{ + if (codec->cache_ops && codec->cache_ops->exit) + return codec->cache_ops->exit(codec); + return -EINVAL; +} +EXPORT_SYMBOL_GPL(snd_soc_cache_exit); + +/** + * snd_soc_cache_read: Fetch the value of a given register from the cache. + * + * @codec: CODEC to configure. + * @reg: The register index. + * @value: The value to be returned. + */ +int snd_soc_cache_read(struct snd_soc_codec *codec, + unsigned int reg, unsigned int *value) +{ + int ret; + + mutex_lock(&cache_rw_mutex); + + if (value && codec->cache_ops && codec->cache_ops->read) { + ret = codec->cache_ops->read(codec, reg, value); + mutex_unlock(&cache_rw_mutex); + return ret; + } + + mutex_unlock(&cache_rw_mutex); + return -EINVAL; +} +EXPORT_SYMBOL_GPL(snd_soc_cache_read); + +/** + * snd_soc_cache_write: Set the value of a given register in the cache. + * + * @codec: CODEC to configure. + * @reg: The register index. + * @value: The new register value. + */ +int snd_soc_cache_write(struct snd_soc_codec *codec, + unsigned int reg, unsigned int value) +{ + int ret; + + mutex_lock(&cache_rw_mutex); + + if (codec->cache_ops && codec->cache_ops->write) { + ret = codec->cache_ops->write(codec, reg, value); + mutex_unlock(&cache_rw_mutex); + return ret; + } + + mutex_unlock(&cache_rw_mutex); + return -EINVAL; +} +EXPORT_SYMBOL_GPL(snd_soc_cache_write); + +/** + * snd_soc_cache_sync: Sync the register cache with the hardware. + * + * @codec: CODEC to configure. + * + * Any registers that should not be synced should be marked as + * volatile. In general drivers can choose not to use the provided + * syncing functionality if they so require. + */ +int snd_soc_cache_sync(struct snd_soc_codec *codec) +{ + int ret; + + if (!codec->cache_sync) { + return 0; + } + + if (codec->cache_ops && codec->cache_ops->sync) { + ret = codec->cache_ops->sync(codec); + if (!ret) + codec->cache_sync = 0; + return ret; + } + + return -EINVAL; +} +EXPORT_SYMBOL_GPL(snd_soc_cache_sync); diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c index 4360436..ab84a34 100644 --- a/sound/soc/soc-core.c +++ b/sound/soc/soc-core.c @@ -3236,23 +3236,6 @@ int snd_soc_register_codec(struct device *dev, return -ENOMEM; }
- /* allocate CODEC register cache */ - if (codec_drv->reg_cache_size && codec_drv->reg_word_size) { - - if (codec_drv->reg_cache_default) - codec->reg_cache = kmemdup(codec_drv->reg_cache_default, - codec_drv->reg_cache_size * codec_drv->reg_word_size, GFP_KERNEL); - else - codec->reg_cache = kzalloc(codec_drv->reg_cache_size * - codec_drv->reg_word_size, GFP_KERNEL); - - if (codec->reg_cache == NULL) { - kfree(codec->name); - kfree(codec); - return -ENOMEM; - } - } - codec->dev = dev; codec->driver = codec_drv; codec->bias_level = SND_SOC_BIAS_OFF; @@ -3261,6 +3244,16 @@ int snd_soc_register_codec(struct device *dev, INIT_LIST_HEAD(&codec->dapm_widgets); INIT_LIST_HEAD(&codec->dapm_paths);
+ /* allocate CODEC register cache */ + if (codec_drv->reg_cache_size && codec_drv->reg_word_size) { + ret = snd_soc_cache_init(codec); + if (ret < 0) { + dev_err(codec->dev, "Failed to set cache compression type: %d\n", + ret); + goto error_cache; + } + } + for (i = 0; i < num_dai; i++) { fixup_codec_formats(&dai_drv[i].playback); fixup_codec_formats(&dai_drv[i].capture); @@ -3270,7 +3263,7 @@ int snd_soc_register_codec(struct device *dev, if (num_dai) { ret = snd_soc_register_dais(dev, dai_drv, num_dai); if (ret < 0) - goto error; + goto error_dais; }
mutex_lock(&client_mutex); @@ -3281,12 +3274,12 @@ int snd_soc_register_codec(struct device *dev, pr_debug("Registered codec '%s'\n", codec->name); return 0;
-error: +error_dais: for (i--; i >= 0; i--) snd_soc_unregister_dai(dev);
- if (codec->reg_cache) - kfree(codec->reg_cache); + snd_soc_cache_exit(codec); +error_cache: kfree(codec->name); kfree(codec); return ret; @@ -3320,8 +3313,7 @@ found:
pr_debug("Unregistered codec '%s'\n", codec->name);
- if (codec->reg_cache) - kfree(codec->reg_cache); + snd_soc_cache_exit(codec); kfree(codec->name); kfree(codec); }
On Thu, Nov 04, 2010 at 02:22:42PM +0000, Dimitris Papastamos wrote:
This patch introduces the new caching API and migrates the old caching interface into the new one. The flat register caching technique does not use compression at all and it is equivalent to
Looks good - probably the main thing I'm asking for with the single patches is to do stuff like squashing the first patch into this.
@@ -680,6 +722,8 @@ int snd_soc_codec_set_cache_io(struct snd_soc_codec *codec, return -EINVAL; }
- mutex_init(&cache_rw_mutex);
I'd kind of expect this to be with the other cache setup? I'd also expect the lock to be a member variable next to the cache, rather than a global. Probably not a big deal but still nicer.
switch (codec_drv->reg_word_size) {
...
default:
return -EINVAL;
}
I'd kind of expect these to be BUG() so we don't silently fall back to direct I/O if the cache support isn't implemented at all.
+static int snd_soc_flat_cache_init(struct snd_soc_codec *codec) +{
- struct snd_soc_codec_driver *codec_drv;
- size_t reg_size;
- codec_drv = codec->driver;
- reg_size = codec_drv->reg_cache_size * codec_drv->reg_word_size;
- if (codec_drv->reg_cache_default)
codec->reg_cache = kmemdup(codec_drv->reg_cache_default,
reg_size, GFP_KERNEL);
- else
codec->reg_cache = kzalloc(reg_size, GFP_KERNEL);
- if (!codec->reg_cache)
return -EINVAL;
-ENOMEM.
+int snd_soc_cache_init(struct snd_soc_codec *codec) +{
+EXPORT_SYMBOL_GPL(snd_soc_cache_init);
Does this need to be exported? Right now the only caller is in the core.
@@ -3261,6 +3244,16 @@ int snd_soc_register_codec(struct device *dev, INIT_LIST_HEAD(&codec->dapm_widgets); INIT_LIST_HEAD(&codec->dapm_paths);
- /* allocate CODEC register cache */
- if (codec_drv->reg_cache_size && codec_drv->reg_word_size) {
ret = snd_soc_cache_init(codec);
if (ret < 0) {
dev_err(codec->dev, "Failed to set cache compression type: %d\n",
ret);
goto error_cache;
}
- }
Are you sure that all the CODECs that rely on the existing shared register cache are going to call this?
On Thu, 2010-11-04 at 14:31 -0400, Mark Brown wrote:
@@ -680,6 +722,8 @@ int snd_soc_codec_set_cache_io(struct snd_soc_codec *codec, return -EINVAL; }
- mutex_init(&cache_rw_mutex);
I'd kind of expect this to be with the other cache setup?
Do you mean that the mutex should also be used with the other caching techniques? That is not needed because we currently lock at a higher level, in the function that delegates the calls to the implementation functions.
@@ -3261,6 +3244,16 @@ int snd_soc_register_codec(struct device *dev, INIT_LIST_HEAD(&codec->dapm_widgets); INIT_LIST_HEAD(&codec->dapm_paths);
- /* allocate CODEC register cache */
- if (codec_drv->reg_cache_size && codec_drv->reg_word_size) {
ret = snd_soc_cache_init(codec);
if (ret < 0) {
dev_err(codec->dev, "Failed to set cache compression type: %d\n",
ret);
goto error_cache;
}
- }
Are you sure that all the CODECs that rely on the existing shared register cache are going to call this?
What do you mean by 'shared register cache'? Each codec gets its own copy of their register cache.
Any CODEC driver that calls snd_soc_register_codec() and has provided reg_cache_size and reg_word_size will have soc-core setting up its cache accordingly. By default the provided snd_soc_codec_driver is zero-ed out, so its compress_type will default to the flat compression type.
Thanks, Dimitrios
On Fri, Nov 05, 2010 at 09:34:37AM +0000, Dimitris Papastamos wrote:
On Thu, 2010-11-04 at 14:31 -0400, Mark Brown wrote:
- mutex_init(&cache_rw_mutex);
I'd kind of expect this to be with the other cache setup?
Do you mean that the mutex should also be used with the other caching techniques? That is not needed because we currently lock at a higher level, in the function that delegates the calls to the implementation functions.
I'd expect this to be with the rest of the initialisation for the structure that it's embedded in - having this be initialised in this place separately to anything else feels wrong. Of course at the minute it's not in a structure (which I raised as an issue as well IIRC) which means that we'll have an issue with multiple initialisation if two devices are registered.
Are you sure that all the CODECs that rely on the existing shared register cache are going to call this?
What do you mean by 'shared register cache'? Each codec gets its own copy of their register cache.
The shared register cache support code.
Any CODEC driver that calls snd_soc_register_codec() and has provided reg_cache_size and reg_word_size will have soc-core setting up its cache accordingly. By default the provided snd_soc_codec_driver is zero-ed out, so its compress_type will default to the flat compression type.
Are you absolutely positive that every user of the code is using a register cache initialised using that method?
On Fri, 2010-11-05 at 09:31 -0400, Mark Brown wrote:
On Fri, Nov 05, 2010 at 09:34:37AM +0000, Dimitris Papastamos wrote:
On Thu, 2010-11-04 at 14:31 -0400, Mark Brown wrote:
- mutex_init(&cache_rw_mutex);
I'd kind of expect this to be with the other cache setup?
Do you mean that the mutex should also be used with the other caching techniques? That is not needed because we currently lock at a higher level, in the function that delegates the calls to the implementation functions.
I'd expect this to be with the rest of the initialisation for the structure that it's embedded in - having this be initialised in this place separately to anything else feels wrong. Of course at the minute it's not in a structure (which I raised as an issue as well IIRC) which means that we'll have an issue with multiple initialisation if two devices are registered.
Aw yes, I've made the mutex to be per codec, so its initialized in snd_soc_cache_init() for each codec that's registered.
Any CODEC driver that calls snd_soc_register_codec() and has provided reg_cache_size and reg_word_size will have soc-core setting up its cache accordingly. By default the provided snd_soc_codec_driver is zero-ed out, so its compress_type will default to the flat compression type.
Are you absolutely positive that every user of the code is using a register cache initialised using that method?
If people don't want to use our caching API, then if they need so they will have to manage that locally somewhere in the driver code. In that scenario, they will not call snd_soc_codec_set_cache_io() and they will have to set the codec->read() and codec->write() to point to their own implementation. They should also not set reg_cache_size and reg_word_size.
The previous caching code had the same issue. If someone called snd_soc_codec_set_cache_io() and they had not provided a valid reg_cache_size and reg_word_size (both of them were zero), the underlying codec->reg_cache would have been NULL and therefore any read()/write() operations going through soc-cache.c would result in an invalid access.
Thanks, Dimitrios
On Fri, Nov 05, 2010 at 01:59:51PM +0000, Dimitris Papastamos wrote:
On Fri, 2010-11-05 at 09:31 -0400, Mark Brown wrote:
Are you absolutely positive that every user of the code is using a register cache initialised using that method?
If people don't want to use our caching API, then if they need so they will have to manage that locally somewhere in the driver code. In that scenario, they will not call snd_soc_codec_set_cache_io() and they will have to set the codec->read() and codec->write() to point to their own implementation. They should also not set reg_cache_size and reg_word_size.
My concern is that since nothing stops people mix'n'matching currently someone may be using one bit and not the other - we need to audit all the current drivers to make sure that nothing is going to explode.
The previous caching code had the same issue. If someone called snd_soc_codec_set_cache_io() and they had not provided a valid reg_cache_size and reg_word_size (both of them were zero), the underlying codec->reg_cache would have been NULL and therefore any read()/write() operations going through soc-cache.c would result in an invalid access.
They could be setting these up later, or otherwise initialising. It's likely that nobody is but it's ringing an alarm bell that we need to audit in case there's something that's going to bite us later on.
On Fri, 2010-11-05 at 10:07 -0400, Mark Brown wrote:
On Fri, Nov 05, 2010 at 01:59:51PM +0000, Dimitris Papastamos wrote:
On Fri, 2010-11-05 at 09:31 -0400, Mark Brown wrote:
Are you absolutely positive that every user of the code is using a register cache initialised using that method?
If people don't want to use our caching API, then if they need so they will have to manage that locally somewhere in the driver code. In that scenario, they will not call snd_soc_codec_set_cache_io() and they will have to set the codec->read() and codec->write() to point to their own implementation. They should also not set reg_cache_size and reg_word_size.
My concern is that since nothing stops people mix'n'matching currently someone may be using one bit and not the other - we need to audit all the current drivers to make sure that nothing is going to explode.
The previous caching code had the same issue. If someone called snd_soc_codec_set_cache_io() and they had not provided a valid reg_cache_size and reg_word_size (both of them were zero), the underlying codec->reg_cache would have been NULL and therefore any read()/write() operations going through soc-cache.c would result in an invalid access.
They could be setting these up later, or otherwise initialising. It's likely that nobody is but it's ringing an alarm bell that we need to audit in case there's something that's going to bite us later on.
Yup sure, it'd be wise to audit the codecs for any invalid use of the caching API.
Thanks, Dimitrios
On Fri, Nov 05, 2010 at 02:12:00PM +0000, Dimitris Papastamos wrote:
On Fri, 2010-11-05 at 10:07 -0400, Mark Brown wrote:
They could be setting these up later, or otherwise initialising. It's likely that nobody is but it's ringing an alarm bell that we need to audit in case there's something that's going to bite us later on.
Yup sure, it'd be wise to audit the codecs for any invalid use of the caching API.
Thanks.
On Thu, 2010-11-04 at 14:31 -0400, Mark Brown wrote:
@@ -680,6 +722,8 @@ int snd_soc_codec_set_cache_io(struct snd_soc_codec *codec, return -EINVAL; }
- mutex_init(&cache_rw_mutex);
I'd kind of expect this to be with the other cache setup?
Aw yes, I'll move it into the snd_soc_cache_init() function.
Thanks, Dimitrios
This patch adds support for LZO compression when storing the register cache. The initial register defaults cache is marked as __devinitconst and the only change required for a driver to use LZO compression is to set the compress_type member in codec->driver to SND_SOC_LZO_COMPRESSION.
For a typical device whose register map would normally occupy 25kB or 50kB by using the LZO compression technique, one can get down to ~3-5kB. There might be a performance penalty associated with each individual read/write due to decompressing/compressing the underlying cache, however that should not be noticeable. The memory benefits depend on whether the target architecture can get rid of the memory occupied by the original register defaults cache which is marked as __devinitconst.
Signed-off-by: Dimitris Papastamos dp@opensource.wolfsonmicro.com --- include/sound/soc.h | 3 +- sound/soc/soc-cache.c | 413 +++++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 415 insertions(+), 1 deletions(-)
diff --git a/include/sound/soc.h b/include/sound/soc.h index 90a9b60..8135b3c 100644 --- a/include/sound/soc.h +++ b/include/sound/soc.h @@ -255,7 +255,8 @@ enum snd_soc_control_type { };
enum snd_soc_compress_type { - SND_SOC_NO_COMPRESSION + SND_SOC_NO_COMPRESSION, + SND_SOC_LZO_COMPRESSION };
int snd_soc_register_platform(struct device *dev, diff --git a/sound/soc/soc-cache.c b/sound/soc/soc-cache.c index c5d05b8..63b7fe9 100644 --- a/sound/soc/soc-cache.c +++ b/sound/soc/soc-cache.c @@ -14,6 +14,8 @@ #include <linux/i2c.h> #include <linux/spi/spi.h> #include <sound/soc.h> +#include <linux/lzo.h> +#include <linux/bitmap.h>
/* serialize access to *cache_read() and *cache_write() */ static DEFINE_MUTEX(cache_rw_mutex); @@ -757,6 +759,409 @@ int snd_soc_codec_set_cache_io(struct snd_soc_codec *codec, } EXPORT_SYMBOL_GPL(snd_soc_codec_set_cache_io);
+struct snd_soc_lzo_ctx { + void *wmem; + void *dst; + const void *src; + size_t src_len; + size_t dst_len; + size_t decompressed_size; + unsigned long *sync_bmp; + int sync_bmp_nbits; +}; + +#define LZO_BLOCK_NUM 8 +static int snd_soc_lzo_block_count(void) +{ + return LZO_BLOCK_NUM; +} + +static int snd_soc_lzo_prepare(struct snd_soc_lzo_ctx *lzo_ctx) +{ + lzo_ctx->wmem = kmalloc(LZO1X_MEM_COMPRESS, GFP_KERNEL); + if (!lzo_ctx->wmem) + return -ENOMEM; + return 0; +} + +static int snd_soc_lzo_compress(struct snd_soc_lzo_ctx *lzo_ctx) +{ + size_t compress_size; + int ret; + + ret = lzo1x_1_compress(lzo_ctx->src, lzo_ctx->src_len, + lzo_ctx->dst, &compress_size, lzo_ctx->wmem); + if (ret != LZO_E_OK || compress_size > lzo_ctx->dst_len) + return -EINVAL; + lzo_ctx->dst_len = compress_size; + return 0; +} + +static int snd_soc_lzo_decompress(struct snd_soc_lzo_ctx *lzo_ctx) +{ + size_t dst_len; + int ret; + + dst_len = lzo_ctx->dst_len; + ret = lzo1x_decompress_safe(lzo_ctx->src, lzo_ctx->src_len, + lzo_ctx->dst, &dst_len); + if (ret != LZO_E_OK || dst_len != lzo_ctx->dst_len) + return -EINVAL; + return 0; +} + +static int snd_soc_lzo_compress_cache_block(struct snd_soc_codec *codec, + struct snd_soc_lzo_ctx *lzo_ctx) +{ + int ret; + + lzo_ctx->dst_len = lzo1x_worst_compress(PAGE_SIZE); + lzo_ctx->dst = kmalloc(lzo_ctx->dst_len, GFP_KERNEL); + if (!lzo_ctx->dst) { + lzo_ctx->dst_len = 0; + return -ENOMEM; + } + + ret = snd_soc_lzo_compress(lzo_ctx); + if (ret < 0) + return ret; + return 0; +} + +static int snd_soc_lzo_decompress_cache_block(struct snd_soc_codec *codec, + struct snd_soc_lzo_ctx *lzo_ctx) +{ + int ret; + + lzo_ctx->dst_len = lzo_ctx->decompressed_size; + lzo_ctx->dst = kmalloc(lzo_ctx->dst_len, GFP_KERNEL); + if (!lzo_ctx->dst) { + lzo_ctx->dst_len = 0; + return -ENOMEM; + } + + ret = snd_soc_lzo_decompress(lzo_ctx); + if (ret < 0) + return ret; + return 0; +} + +static inline int snd_soc_lzo_get_blkindex(struct snd_soc_codec *codec, + unsigned int reg) +{ + struct snd_soc_codec_driver *codec_drv; + size_t reg_size; + + codec_drv = codec->driver; + reg_size = codec_drv->reg_cache_size * codec_drv->reg_word_size; + return (reg * codec_drv->reg_word_size) / + DIV_ROUND_UP(reg_size, snd_soc_lzo_block_count()); +} + +static inline int snd_soc_lzo_get_blkpos(struct snd_soc_codec *codec, + unsigned int reg) +{ + struct snd_soc_codec_driver *codec_drv; + size_t reg_size; + + codec_drv = codec->driver; + reg_size = codec_drv->reg_cache_size * codec_drv->reg_word_size; + return reg % (DIV_ROUND_UP(reg_size, snd_soc_lzo_block_count()) / + codec_drv->reg_word_size); +} + +static inline int snd_soc_lzo_get_blksize(struct snd_soc_codec *codec) +{ + struct snd_soc_codec_driver *codec_drv; + size_t reg_size; + + codec_drv = codec->driver; + reg_size = codec_drv->reg_cache_size * codec_drv->reg_word_size; + return DIV_ROUND_UP(reg_size, snd_soc_lzo_block_count()); +} + +static int snd_soc_lzo_cache_sync(struct snd_soc_codec *codec) +{ + struct snd_soc_lzo_ctx **lzo_blocks; + unsigned int val; + int i; + + lzo_blocks = codec->reg_cache; + for_each_set_bit(i, lzo_blocks[0]->sync_bmp, lzo_blocks[0]->sync_bmp_nbits) { + snd_soc_cache_read(codec, i, &val); + snd_soc_write(codec, i, val); + dev_dbg(codec->dev, "Synced register %#x, value = %#x\n", + i, val); + } + + return 0; +} + +static int snd_soc_lzo_cache_write(struct snd_soc_codec *codec, + unsigned int reg, unsigned int value) +{ + struct snd_soc_lzo_ctx *lzo_block, **lzo_blocks; + int ret, blkindex, blkpos; + size_t blksize, tmp_dst_len; + void *tmp_dst; + + /* index of the compressed lzo block */ + blkindex = snd_soc_lzo_get_blkindex(codec, reg); + /* register index within the decompressed block */ + blkpos = snd_soc_lzo_get_blkpos(codec, reg); + /* size of the compressed block */ + blksize = snd_soc_lzo_get_blksize(codec); + lzo_blocks = codec->reg_cache; + lzo_block = lzo_blocks[blkindex]; + + /* save the pointer and length of the compressed block */ + tmp_dst = lzo_block->dst; + tmp_dst_len = lzo_block->dst_len; + + /* prepare the source to be the compressed block */ + lzo_block->src = lzo_block->dst; + lzo_block->src_len = lzo_block->dst_len; + + /* decompress the block */ + ret = snd_soc_lzo_decompress_cache_block(codec, lzo_block); + if (ret < 0) { + kfree(lzo_block->dst); + goto out; + } + + /* write the new value to the cache */ + switch (codec->driver->reg_word_size) { + case 1: { + u8 *cache; + cache = lzo_block->dst; + if (cache[blkpos] == value) { + kfree(lzo_block->dst); + goto out; + } + cache[blkpos] = value; + } + break; + case 2: { + u16 *cache; + cache = lzo_block->dst; + if (cache[blkpos] == value) { + kfree(lzo_block->dst); + goto out; + } + cache[blkpos] = value; + } + break; + default: + return -EINVAL; + } + + /* prepare the source to be the decompressed block */ + lzo_block->src = lzo_block->dst; + lzo_block->src_len = lzo_block->dst_len; + + /* compress the block */ + ret = snd_soc_lzo_compress_cache_block(codec, lzo_block); + if (ret < 0) { + kfree(lzo_block->dst); + kfree(lzo_block->src); + goto out; + } + + /* set the bit so we know we have to sync this register */ + set_bit(reg, lzo_block->sync_bmp); + kfree(tmp_dst); + kfree(lzo_block->src); + return 0; +out: + lzo_block->dst = tmp_dst; + lzo_block->dst_len = tmp_dst_len; + return ret; +} + +static int snd_soc_lzo_cache_read(struct snd_soc_codec *codec, + unsigned int reg, unsigned int *value) +{ + struct snd_soc_lzo_ctx *lzo_block, **lzo_blocks; + int ret, blkindex, blkpos; + size_t blksize, tmp_dst_len; + void *tmp_dst; + + *value = 0; + /* index of the compressed lzo block */ + blkindex = snd_soc_lzo_get_blkindex(codec, reg); + /* register index within the decompressed block */ + blkpos = snd_soc_lzo_get_blkpos(codec, reg); + /* size of the compressed block */ + blksize = snd_soc_lzo_get_blksize(codec); + lzo_blocks = codec->reg_cache; + lzo_block = lzo_blocks[blkindex]; + + /* save the pointer and length of the compressed block */ + tmp_dst = lzo_block->dst; + tmp_dst_len = lzo_block->dst_len; + + /* prepare the source to be the compressed block */ + lzo_block->src = lzo_block->dst; + lzo_block->src_len = lzo_block->dst_len; + + /* decompress the block */ + ret = snd_soc_lzo_decompress_cache_block(codec, lzo_block); + if (ret >= 0) { + /* fetch the value from the cache */ + switch (codec->driver->reg_word_size) { + case 1: { + u8 *cache; + cache = lzo_block->dst; + *value = cache[blkpos]; + } + break; + case 2: { + u16 *cache; + cache = lzo_block->dst; + *value = cache[blkpos]; + } + break; + default: + return -EINVAL; + } + } + + kfree(lzo_block->dst); + /* restore the pointer and length of the compressed block */ + lzo_block->dst = tmp_dst; + lzo_block->dst_len = tmp_dst_len; + return 0; +} + +static int snd_soc_lzo_cache_exit(struct snd_soc_codec *codec) +{ + struct snd_soc_lzo_ctx **lzo_blocks; + int i, blkcount; + + lzo_blocks = codec->reg_cache; + if (!lzo_blocks) + return 0; + + blkcount = snd_soc_lzo_block_count(); + /* + * the pointer to the bitmap used for syncing the cache + * is shared amongst all lzo_blocks. Ensure it is freed + * only once. + */ + if (lzo_blocks[0]) + kfree(lzo_blocks[0]->sync_bmp); + for (i = 0; i < blkcount; ++i) { + if (lzo_blocks[i]) { + kfree(lzo_blocks[i]->wmem); + kfree(lzo_blocks[i]->dst); + } + /* each lzo_block is a pointer returned by kmalloc or NULL */ + kfree(lzo_blocks[i]); + } + kfree(lzo_blocks); + codec->reg_cache = NULL; + return 0; +} + +static int snd_soc_lzo_cache_init(struct snd_soc_codec *codec) +{ + struct snd_soc_lzo_ctx **lzo_blocks; + size_t reg_size, bmp_size; + struct snd_soc_codec_driver *codec_drv; + int ret, tofree, i, blksize, blkcount; + const char *p, *end; + unsigned long *sync_bmp; + + ret = 0; + codec_drv = codec->driver; + reg_size = codec_drv->reg_cache_size * codec_drv->reg_word_size; + + /* + * If we have not been given a default register cache + * then allocate a dummy zero-ed out region, compress it + * and remember to free it afterwards. + */ + tofree = 0; + if (!codec_drv->reg_cache_default) + tofree = 1; + + if (!codec_drv->reg_cache_default) { + codec_drv->reg_cache_default = kzalloc(reg_size, + GFP_KERNEL); + if (!codec_drv->reg_cache_default) + return -ENOMEM; + } + + blkcount = snd_soc_lzo_block_count(); + codec->reg_cache = kzalloc(blkcount * sizeof(*lzo_blocks), + GFP_KERNEL); + if (!codec->reg_cache) { + ret = -ENOMEM; + goto err_tofree; + } + lzo_blocks = codec->reg_cache; + + /* + * allocate a bitmap to be used when syncing the cache with + * the hardware. Each time a register is modified, the corresponding + * bit is set in the bitmap, so we know that we have to sync + * that register. + */ + bmp_size = codec_drv->reg_cache_size; + sync_bmp = kmalloc(BITS_TO_LONGS(bmp_size) * sizeof(unsigned long), + GFP_KERNEL); + if (!sync_bmp) { + ret = -ENOMEM; + goto err; + } + bitmap_zero(sync_bmp, reg_size); + + /* allocate the lzo blocks and initialize them */ + for (i = 0; i < blkcount; ++i) { + lzo_blocks[i] = kzalloc(sizeof **lzo_blocks, + GFP_KERNEL); + if (!lzo_blocks[i]) { + kfree(sync_bmp); + ret = -ENOMEM; + goto err; + } + lzo_blocks[i]->sync_bmp = sync_bmp; + lzo_blocks[i]->sync_bmp_nbits = reg_size; + /* alloc the working space for the compressed block */ + ret = snd_soc_lzo_prepare(lzo_blocks[i]); + if (ret < 0) + goto err; + } + + blksize = snd_soc_lzo_get_blksize(codec); + p = codec_drv->reg_cache_default; + end = codec_drv->reg_cache_default + reg_size; + /* compress the register map and fill the lzo blocks */ + for (i = 0; i < blkcount; ++i, p += blksize) { + lzo_blocks[i]->src = p; + if (p + blksize > end) + lzo_blocks[i]->src_len = end - p; + else + lzo_blocks[i]->src_len = blksize; + ret = snd_soc_lzo_compress_cache_block(codec, + lzo_blocks[i]); + if (ret < 0) + goto err; + lzo_blocks[i]->decompressed_size = + lzo_blocks[i]->src_len; + } + + if (tofree) + kfree(codec_drv->reg_cache_default); + return 0; +err: + snd_soc_cache_exit(codec); +err_tofree: + if (tofree) + kfree(codec_drv->reg_cache_default); + return ret; +} + static int snd_soc_flat_cache_sync(struct snd_soc_codec *codec) { int i; @@ -879,6 +1284,14 @@ static const struct snd_soc_cache_ops cache_types[] = { .read = snd_soc_flat_cache_read, .write = snd_soc_flat_cache_write, .sync = snd_soc_flat_cache_sync + }, + { + .id = SND_SOC_LZO_COMPRESSION, + .init = snd_soc_lzo_cache_init, + .exit = snd_soc_lzo_cache_exit, + .read = snd_soc_lzo_cache_read, + .write = snd_soc_lzo_cache_write, + .sync = snd_soc_lzo_cache_sync } };
On Thu, Nov 04, 2010 at 02:22:43PM +0000, Dimitris Papastamos wrote:
This looks really good except for the handling of unsupported register sizes I mentioend for another patch and the fact that the LZO additions to Kconfig probably ought to go in here.
by using the LZO compression technique, one can get down to ~3-5kB. There might be a performance penalty associated with each individual read/write due to decompressing/compressing the underlying cache, however that should not
I strongly suspect that there will actually be a performance penalty associated with the (de-)compress :)
As we discussed previously this can be mitigated against in future by keeping the last accessed block of memory uncompressed so that we don't need to do the LZO operations so often when doing a sequence of accesses to the same area of the register map (this should work well during DAPM runs since the power bits tend to all be close together, for example). Timeouts or a different chunking algorithm could be used to reduce the memory cost of this, though we need to be careful we don't overengineer.
be noticeable. The memory benefits depend on whether the target architecture can get rid of the memory occupied by the original register defaults cache which is marked as __devinitconst.
Even if it doesn't manage to get rid of the original copy there will of course still be some memory gain as the runtime allocated register cache will be compressed - only the size of the win will be affected, there's no question that there will be some benefit unless the register map is pathologic and doesn't compress.
On Thu, 2010-11-04 at 18:45 -0400, Mark Brown wrote:
As we discussed previously this can be mitigated against in future by keeping the last accessed block of memory uncompressed so that we don't need to do the LZO operations so often when doing a sequence of accesses to the same area of the register map (this should work well during DAPM runs since the power bits tend to all be close together, for example). Timeouts or a different chunking algorithm could be used to reduce the memory cost of this, though we need to be careful we don't overengineer.
Yes that's the idea. I was also thinking, that during sync() we can have another flag like cache_bypass which would be set so when we are writing out the cache to the hardware, we don't write back to the cache again.
Thanks, Dimitrios
On Fri, Nov 05, 2010 at 10:22:31AM +0000, Dimitris Papastamos wrote:
Yes that's the idea. I was also thinking, that during sync() we can have another flag like cache_bypass which would be set so when we are writing out the cache to the hardware, we don't write back to the cache again.
Yeah, though if we do the thing with keeping the last page uncompressed that'd get the overwhelming bulk of that anyway.
This patch adds support for rbtree compression when storing the register cache. It does this by not adding any uninitialized registers (those whose value is 0). If any of those registers is written with a nonzero value they get added into the rbtree.
Consider a sample device with a large sparse register map. The register indices are between [0, 0x31ff]. An array of 12800 registers is thus created each of which is 2 bytes. This results in a 25kB region. This array normally lives outside soc-core, normally in the driver itself. The original soc-core code would kmemdup this region resulting in 50kB total memory. When using the rbtree compression technique and __devinitconst on the original array the figures are as follows. For this typical device, you might have 100 initialized registers, that is registers that are nonzero by default. We build an rbtree with 100 nodes, each of which is 21 bytes. This results in ~2kB of memory. Assuming that the target arch can freeup the memory used by the initial __devinitconst array, we end up using about ~2kB bytes of actual memory. The memory footprint will increase as uninitialized registers get written and thus new nodes created in the rbtree. In practice, most of those registers are never changed. If the target arch can't freeup the __devinitconst array, we end up using a total of ~27kB. The difference between the rbtree and the LZO caching techniques, is that if using the LZO technique the size of the cache will increase slower as more uninitialized registers get changed.
Signed-off-by: Dimitris Papastamos dp@opensource.wolfsonmicro.com --- include/sound/soc.h | 3 +- sound/soc/soc-cache.c | 233 +++++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 235 insertions(+), 1 deletions(-)
diff --git a/include/sound/soc.h b/include/sound/soc.h index 8135b3c..6e9f511 100644 --- a/include/sound/soc.h +++ b/include/sound/soc.h @@ -256,7 +256,8 @@ enum snd_soc_control_type {
enum snd_soc_compress_type { SND_SOC_NO_COMPRESSION, - SND_SOC_LZO_COMPRESSION + SND_SOC_LZO_COMPRESSION, + SND_SOC_RBTREE_COMPRESSION };
int snd_soc_register_platform(struct device *dev, diff --git a/sound/soc/soc-cache.c b/sound/soc/soc-cache.c index 63b7fe9..3d85ac0 100644 --- a/sound/soc/soc-cache.c +++ b/sound/soc/soc-cache.c @@ -16,6 +16,7 @@ #include <sound/soc.h> #include <linux/lzo.h> #include <linux/bitmap.h> +#include <linux/rbtree.h>
/* serialize access to *cache_read() and *cache_write() */ static DEFINE_MUTEX(cache_rw_mutex); @@ -759,6 +760,230 @@ int snd_soc_codec_set_cache_io(struct snd_soc_codec *codec, } EXPORT_SYMBOL_GPL(snd_soc_codec_set_cache_io);
+struct snd_soc_rbtree_node { + struct rb_node node; + unsigned int dirty:1; + unsigned int reg; + unsigned int value; +} __attribute__ ((packed)); + +struct snd_soc_rbtree_ctx { + struct rb_root root; +}; + +static struct snd_soc_rbtree_node *snd_soc_rbtree_lookup( + struct rb_root *root, unsigned int reg) +{ + struct rb_node *node; + struct snd_soc_rbtree_node *rbnode; + + node = root->rb_node; + while (node) { + rbnode = container_of(node, struct snd_soc_rbtree_node, node); + if (rbnode->reg < reg) + node = node->rb_left; + else if (rbnode->reg > reg) + node = node->rb_right; + else + return rbnode; + } + + return NULL; +} + + +static int snd_soc_rbtree_insert(struct rb_root *root, + struct snd_soc_rbtree_node *rbnode) +{ + struct rb_node **new, *parent; + struct snd_soc_rbtree_node *rbnode_tmp; + + parent = NULL; + new = &root->rb_node; + while (*new) { + rbnode_tmp = container_of(*new, struct snd_soc_rbtree_node, + node); + parent = *new; + if (rbnode_tmp->reg < rbnode->reg) + new = &((*new)->rb_left); + else if (rbnode_tmp->reg > rbnode->reg) + new = &((*new)->rb_right); + else + return 0; + } + + /* insert the node into the rbtree */ + rb_link_node(&rbnode->node, parent, new); + rb_insert_color(&rbnode->node, root); + + return 1; +} + +static int snd_soc_rbtree_cache_sync(struct snd_soc_codec *codec) +{ + struct snd_soc_rbtree_ctx *rbtree_ctx; + struct rb_node *node; + struct snd_soc_rbtree_node *rbnode; + unsigned int val; + + rbtree_ctx = codec->reg_cache; + for (node = rb_first(&rbtree_ctx->root); node; node = rb_next(node)) { + rbnode = rb_entry(node, struct snd_soc_rbtree_node, node); + if (!rbnode->dirty) + continue; + snd_soc_cache_read(codec, rbnode->reg, &val); + snd_soc_write(codec, rbnode->reg, val); + dev_dbg(codec->dev, "Synced register %#x, value = %#x\n", + rbnode->reg, val); + } + + return 0; +} + +static int snd_soc_rbtree_cache_write(struct snd_soc_codec *codec, + unsigned int reg, unsigned int value) +{ + struct snd_soc_rbtree_ctx *rbtree_ctx; + struct snd_soc_rbtree_node *rbnode; + + rbtree_ctx = codec->reg_cache; + rbnode = snd_soc_rbtree_lookup(&rbtree_ctx->root, reg); + if (rbnode) { + if (rbnode->value == value) + return 0; + rbnode->value = value; + } else { + /* bail out early, no need to create the rbnode yet */ + if (!value) + return 0; + /* + * for uninitialized registers whose value is changed + * from the default zero, create an rbnode and insert + * it into the tree. + */ + rbnode = kzalloc(sizeof *rbnode, GFP_KERNEL); + if (!rbnode) + return -ENOMEM; + rbnode->reg = reg; + rbnode->value = value; + snd_soc_rbtree_insert(&rbtree_ctx->root, rbnode); + } + /* remember that we have to sync this register */ + rbnode->dirty = 1; + + return 0; +} + +static int snd_soc_rbtree_cache_read(struct snd_soc_codec *codec, + unsigned int reg, unsigned int *value) +{ + struct snd_soc_rbtree_ctx *rbtree_ctx; + struct snd_soc_rbtree_node *rbnode; + + rbtree_ctx = codec->reg_cache; + rbnode = snd_soc_rbtree_lookup(&rbtree_ctx->root, reg); + if (rbnode) { + *value = rbnode->value; + } else { + /* uninitialized registers default to 0 */ + *value = 0; + } + + return 0; +} + +static int snd_soc_rbtree_cache_exit(struct snd_soc_codec *codec) +{ + struct rb_node *next; + struct snd_soc_rbtree_ctx *rbtree_ctx; + struct snd_soc_rbtree_node *rbtree_node; + + /* if we've already been called then just return */ + rbtree_ctx = codec->reg_cache; + if (!rbtree_ctx) + return 0; + + /* free up the rbtree */ + next = rb_first(&rbtree_ctx->root); + while (next) { + rbtree_node = rb_entry(next, struct snd_soc_rbtree_node, node); + next = rb_next(&rbtree_node->node); + rb_erase(&rbtree_node->node, &rbtree_ctx->root); + kfree(rbtree_node); + } + + /* release the resources */ + kfree(codec->reg_cache); + codec->reg_cache = NULL; + + return 0; +} + +static int snd_soc_rbtree_cache_init(struct snd_soc_codec *codec) +{ + struct snd_soc_rbtree_ctx *rbtree_ctx; + + codec->reg_cache = kmalloc(sizeof *rbtree_ctx, GFP_KERNEL); + if (!codec->reg_cache) + return -ENOMEM; + + rbtree_ctx = codec->reg_cache; + rbtree_ctx->root = RB_ROOT; + + if (!codec->driver->reg_cache_default) + return 0; + +/* + * populate the rbtree with the initialized registers. All other + * registers will be inserted into the tree when they are first written. + * + * The reasoning behind this, is that we need to step through and + * dereference the cache in u8/u16 increments without sacrificing + * portability. This could also be done using memcpy() but that would + * be slightly more cryptic. + */ +#define snd_soc_rbtree_populate(cache) \ +({ \ + int ret, i; \ + struct snd_soc_rbtree_node *rbtree_node; \ + \ + ret = 0; \ + cache = codec->driver->reg_cache_default; \ + for (i = 0; i < codec->driver->reg_cache_size; ++i) { \ + if (!cache[i]) \ + continue; \ + rbtree_node = kzalloc(sizeof *rbtree_node, GFP_KERNEL); \ + if (!rbtree_node) { \ + ret = -ENOMEM; \ + snd_soc_cache_exit(codec); \ + break; \ + } \ + rbtree_node->reg = i; \ + rbtree_node->value = cache[i]; \ + snd_soc_rbtree_insert(&rbtree_ctx->root, \ + rbtree_node); \ + } \ + ret; \ +}) + + switch (codec->driver->reg_word_size) { + case 1: { + const u8 *cache; + + return snd_soc_rbtree_populate(cache); + } + case 2: { + const u16 *cache; + + return snd_soc_rbtree_populate(cache); + } + default: + return -EINVAL; + } + + return 0; +} + struct snd_soc_lzo_ctx { void *wmem; void *dst; @@ -1292,6 +1517,14 @@ static const struct snd_soc_cache_ops cache_types[] = { .read = snd_soc_lzo_cache_read, .write = snd_soc_lzo_cache_write, .sync = snd_soc_lzo_cache_sync + }, + { + .id = SND_SOC_RBTREE_COMPRESSION, + .init = snd_soc_rbtree_cache_init, + .exit = snd_soc_rbtree_cache_exit, + .read = snd_soc_rbtree_cache_read, + .write = snd_soc_rbtree_cache_write, + .sync = snd_soc_rbtree_cache_sync } };
On Thu, Nov 04, 2010 at 02:22:44PM +0000, Dimitris Papastamos wrote:
This patch adds support for rbtree compression when storing the register cache. It does this by not adding any uninitialized registers (those whose value is 0). If any of those registers is written with a nonzero value they get added into the rbtree.
- rbtree_ctx = codec->reg_cache;
- for (node = rb_first(&rbtree_ctx->root); node; node = rb_next(node)) {
rbnode = rb_entry(node, struct snd_soc_rbtree_node, node);
if (!rbnode->dirty)
continue;
snd_soc_cache_read(codec, rbnode->reg, &val);
snd_soc_write(codec, rbnode->reg, val);
dev_dbg(codec->dev, "Synced register %#x, value = %#x\n",
rbnode->reg, val);
Hrm, dirty handling is kind of interesting. It is unconditionally set in the write function and never cleared so we'll always rewrite a register if it's ever been touched. Is it worth remembering the default values and just comparing with them, the memory overhead will probably be low since we only have one bitfield value here... (and remember that we'll be unlikely to allocate memory in 21 byte packed hunks with no overhead...).
On Thu, Nov 04, 2010 at 02:49:48PM -0400, Mark Brown wrote:
Hrm, dirty handling is kind of interesting. It is unconditionally set in the write function and never cleared so we'll always rewrite a register if it's ever been touched. Is it worth remembering the default values and just comparing with them, the memory overhead will probably be low since we only have one bitfield value here... (and remember that we'll be unlikely to allocate memory in 21 byte packed hunks with no overhead...).
BTW, this does look like a very nice win overall.
On Thu, 2010-11-04 at 14:49 -0400, Mark Brown wrote:
On Thu, Nov 04, 2010 at 02:22:44PM +0000, Dimitris Papastamos wrote:
This patch adds support for rbtree compression when storing the register cache. It does this by not adding any uninitialized registers (those whose value is 0). If any of those registers is written with a nonzero value they get added into the rbtree.
- rbtree_ctx = codec->reg_cache;
- for (node = rb_first(&rbtree_ctx->root); node; node = rb_next(node)) {
rbnode = rb_entry(node, struct snd_soc_rbtree_node, node);
if (!rbnode->dirty)
continue;
snd_soc_cache_read(codec, rbnode->reg, &val);
snd_soc_write(codec, rbnode->reg, val);
dev_dbg(codec->dev, "Synced register %#x, value = %#x\n",
rbnode->reg, val);
Hrm, dirty handling is kind of interesting. It is unconditionally set in the write function and never cleared so we'll always rewrite a register if it's ever been touched. Is it worth remembering the default values and just comparing with them, the memory overhead will probably be low since we only have one bitfield value here... (and remember that we'll be unlikely to allocate memory in 21 byte packed hunks with no overhead...).
Yes, I have deliberately not cleared those bits because at the moment I don't the default value of the register saved anywhere. So what I will do is, I will save the default value, and then I will only sync those registers whose value differs from their default value.
Thanks, Dimitrios
participants (2)
-
Dimitris Papastamos
-
Mark Brown