[alsa-devel] [PATCH v2] ALSA: hda/ca0132 - Update latency based on DSP state.
The DSP in the CA0132 codec adds a variable latency to audio depending on what processing is being done. Add a new patch op to return that latency for capture and playback streams. The latency is determined by which blocks are enabled and knowing how much latency is added by each block.
Signed-off-by: Dylan Reid dgreid@chromium.org --- Changes since v1: Rebase on tiwai's patch to add a pcm_op, this is dependent on that. Get ms to frames conversion right. Test loopback and compare measured vs reported latency.
sound/pci/hda/patch_ca0132.c | 55 ++++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 53 insertions(+), 2 deletions(-)
diff --git a/sound/pci/hda/patch_ca0132.c b/sound/pci/hda/patch_ca0132.c index 12eb21a..841842c 100644 --- a/sound/pci/hda/patch_ca0132.c +++ b/sound/pci/hda/patch_ca0132.c @@ -131,6 +131,13 @@ enum { /* Effects values size*/ #define EFFECT_VALS_MAX_COUNT 12
+/* Latency introduced by DSP blocks in milliseconds. */ +#define DSP_CAPTURE_INIT_LATENCY 0 +#define DSP_CRYSTAL_VOICE_LATENCY 124 +#define DSP_PLAYBACK_INIT_LATENCY 13 +#define DSP_PLAY_ENHANCEMENT_LATENCY 30 +#define DSP_SPEAKER_OUT_LATENCY 7 + struct ct_effect { char name[44]; hda_nid_t nid; @@ -2743,6 +2750,31 @@ static int ca0132_playback_pcm_cleanup(struct hda_pcm_stream *hinfo, return 0; }
+static unsigned int ca0132_playback_pcm_delay(struct hda_pcm_stream *info, + struct hda_codec *codec, + struct snd_pcm_substream *substream) +{ + struct ca0132_spec *spec = codec->spec; + unsigned int latency = DSP_PLAYBACK_INIT_LATENCY; + struct snd_pcm_runtime *runtime = substream->runtime; + + if (spec->dsp_state != DSP_DOWNLOADED) + return 0; + + /* Add latency if playback enhancement and either effect is enabled. */ + if (spec->effects_switch[PLAY_ENHANCEMENT - EFFECT_START_NID]) { + if ((spec->effects_switch[SURROUND - EFFECT_START_NID]) || + (spec->effects_switch[DIALOG_PLUS - EFFECT_START_NID])) + latency += DSP_PLAY_ENHANCEMENT_LATENCY; + } + + /* Applying Speaker EQ adds latency as well. */ + if (spec->cur_out_type == SPEAKER_OUT) + latency += DSP_SPEAKER_OUT_LATENCY; + + return (latency * runtime->rate) / 1000; +} + /* * Digital out */ @@ -2811,6 +2843,23 @@ static int ca0132_capture_pcm_cleanup(struct hda_pcm_stream *hinfo, return 0; }
+static unsigned int ca0132_capture_pcm_delay(struct hda_pcm_stream *info, + struct hda_codec *codec, + struct snd_pcm_substream *substream) +{ + struct ca0132_spec *spec = codec->spec; + unsigned int latency = DSP_CAPTURE_INIT_LATENCY; + struct snd_pcm_runtime *runtime = substream->runtime; + + if (spec->dsp_state != DSP_DOWNLOADED) + return 0; + + if (spec->effects_switch[CRYSTAL_VOICE - EFFECT_START_NID]) + latency += DSP_CRYSTAL_VOICE_LATENCY; + + return (latency * runtime->rate) / 1000; +} + /* * Controls stuffs. */ @@ -4002,7 +4051,8 @@ static struct hda_pcm_stream ca0132_pcm_analog_playback = { .channels_max = 6, .ops = { .prepare = ca0132_playback_pcm_prepare, - .cleanup = ca0132_playback_pcm_cleanup + .cleanup = ca0132_playback_pcm_cleanup, + .get_delay = ca0132_playback_pcm_delay, }, };
@@ -4012,7 +4062,8 @@ static struct hda_pcm_stream ca0132_pcm_analog_capture = { .channels_max = 2, .ops = { .prepare = ca0132_capture_pcm_prepare, - .cleanup = ca0132_capture_pcm_cleanup + .cleanup = ca0132_capture_pcm_cleanup, + .get_delay = ca0132_capture_pcm_delay, }, };
At Thu, 4 Apr 2013 13:55:09 -0700, Dylan Reid wrote:
The DSP in the CA0132 codec adds a variable latency to audio depending on what processing is being done. Add a new patch op to return that latency for capture and playback streams. The latency is determined by which blocks are enabled and knowing how much latency is added by each block.
Signed-off-by: Dylan Reid dgreid@chromium.org
Thanks, applied.
Takashi
Changes since v1: Rebase on tiwai's patch to add a pcm_op, this is dependent on that. Get ms to frames conversion right. Test loopback and compare measured vs reported latency.
sound/pci/hda/patch_ca0132.c | 55 ++++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 53 insertions(+), 2 deletions(-)
diff --git a/sound/pci/hda/patch_ca0132.c b/sound/pci/hda/patch_ca0132.c index 12eb21a..841842c 100644 --- a/sound/pci/hda/patch_ca0132.c +++ b/sound/pci/hda/patch_ca0132.c @@ -131,6 +131,13 @@ enum { /* Effects values size*/ #define EFFECT_VALS_MAX_COUNT 12
+/* Latency introduced by DSP blocks in milliseconds. */ +#define DSP_CAPTURE_INIT_LATENCY 0 +#define DSP_CRYSTAL_VOICE_LATENCY 124 +#define DSP_PLAYBACK_INIT_LATENCY 13 +#define DSP_PLAY_ENHANCEMENT_LATENCY 30 +#define DSP_SPEAKER_OUT_LATENCY 7
struct ct_effect { char name[44]; hda_nid_t nid; @@ -2743,6 +2750,31 @@ static int ca0132_playback_pcm_cleanup(struct hda_pcm_stream *hinfo, return 0; }
+static unsigned int ca0132_playback_pcm_delay(struct hda_pcm_stream *info,
struct hda_codec *codec,
struct snd_pcm_substream *substream)
+{
- struct ca0132_spec *spec = codec->spec;
- unsigned int latency = DSP_PLAYBACK_INIT_LATENCY;
- struct snd_pcm_runtime *runtime = substream->runtime;
- if (spec->dsp_state != DSP_DOWNLOADED)
return 0;
- /* Add latency if playback enhancement and either effect is enabled. */
- if (spec->effects_switch[PLAY_ENHANCEMENT - EFFECT_START_NID]) {
if ((spec->effects_switch[SURROUND - EFFECT_START_NID]) ||
(spec->effects_switch[DIALOG_PLUS - EFFECT_START_NID]))
latency += DSP_PLAY_ENHANCEMENT_LATENCY;
- }
- /* Applying Speaker EQ adds latency as well. */
- if (spec->cur_out_type == SPEAKER_OUT)
latency += DSP_SPEAKER_OUT_LATENCY;
- return (latency * runtime->rate) / 1000;
+}
/*
- Digital out
*/ @@ -2811,6 +2843,23 @@ static int ca0132_capture_pcm_cleanup(struct hda_pcm_stream *hinfo, return 0; }
+static unsigned int ca0132_capture_pcm_delay(struct hda_pcm_stream *info,
struct hda_codec *codec,
struct snd_pcm_substream *substream)
+{
- struct ca0132_spec *spec = codec->spec;
- unsigned int latency = DSP_CAPTURE_INIT_LATENCY;
- struct snd_pcm_runtime *runtime = substream->runtime;
- if (spec->dsp_state != DSP_DOWNLOADED)
return 0;
- if (spec->effects_switch[CRYSTAL_VOICE - EFFECT_START_NID])
latency += DSP_CRYSTAL_VOICE_LATENCY;
- return (latency * runtime->rate) / 1000;
+}
/*
- Controls stuffs.
*/ @@ -4002,7 +4051,8 @@ static struct hda_pcm_stream ca0132_pcm_analog_playback = { .channels_max = 6, .ops = { .prepare = ca0132_playback_pcm_prepare,
.cleanup = ca0132_playback_pcm_cleanup
.cleanup = ca0132_playback_pcm_cleanup,
},.get_delay = ca0132_playback_pcm_delay,
};
@@ -4012,7 +4062,8 @@ static struct hda_pcm_stream ca0132_pcm_analog_capture = { .channels_max = 2, .ops = { .prepare = ca0132_capture_pcm_prepare,
.cleanup = ca0132_capture_pcm_cleanup
.cleanup = ca0132_capture_pcm_cleanup,
},.get_delay = ca0132_capture_pcm_delay,
};
-- 1.8.1.5
The DSP in the CA0132 codec adds a variable latency to audio depending on what processing is being done. Add a new patch op to return that latency for capture and playback streams. The latency is determined by which blocks are enabled and knowing how much latency is added by each block.
Signed-off-by: Dylan Reid dgreid@chromium.org
Thanks, applied.
This conflicts with the audio timestamp stuff I contributed last year. If the hardware supports a WALLCLK, and HDA does, the timestamp is just a read of the counter plus a translation to ns. If it doesn't, the timestamp corresponds to the delay. If you modify the definition of the delay then the timestamp will be off on some platforms. -Pierre
At Fri, 05 Apr 2013 10:01:04 -0500, Pierre-Louis Bossart wrote:
The DSP in the CA0132 codec adds a variable latency to audio depending on what processing is being done. Add a new patch op to return that latency for capture and playback streams. The latency is determined by which blocks are enabled and knowing how much latency is added by each block.
Signed-off-by: Dylan Reid dgreid@chromium.org
Thanks, applied.
This conflicts with the audio timestamp stuff I contributed last year. If the hardware supports a WALLCLK, and HDA does, the timestamp is just a read of the counter plus a translation to ns. If it doesn't, the timestamp corresponds to the delay. If you modify the definition of the delay then the timestamp will be off on some platforms.
I thought that the timestamp and the runtime delay are basically independent parameters. Why must the former be disabled?
It'd be easy to disable the wallclock, as a patch below. But I don't understand the reason...
thanks,
Takashi
--- diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c index 31754d2..14d285a 100644 --- a/sound/pci/hda/hda_intel.c +++ b/sound/pci/hda/hda_intel.c @@ -2002,6 +2002,8 @@ static int azx_pcm_open(struct snd_pcm_substream *substream) until we figure out how to handle digital inputs */ if (substream->stream == SNDRV_PCM_STREAM_CAPTURE) runtime->hw.info &= ~SNDRV_PCM_INFO_HAS_WALL_CLOCK; + else if (hinfo->ops.get_delay) + runtime->hw.info &= ~SNDRV_PCM_INFO_HAS_WALL_CLOCK;
spin_lock_irqsave(&chip->reg_lock, flags); azx_dev->substream = substream;
I thought that the timestamp and the runtime delay are basically independent parameters. Why must the former be disabled?
In HDA you have DPIB and LPIB. The position in the buffer is reported by DPIB and the # of samples rendered is reported by LPIB. The delay is the difference between the two when you use the LPIB_DELAY implementation. As a result, when you look-up the timestamps and query the delay, what you get is the current time as seen by the HDA interface. If you increment the runtime delay to add the codec delay, then you must also increment by the same amount in azx_get_wallclock_tstamp(). Just updating the runtime delay is not enough. One problem I also have with this approach is that the delay may be different if you have multiple streams processed with different algorithms, and that isn't exposed here. -Pierre
On Fri, Apr 5, 2013 at 9:28 AM, Pierre-Louis Bossart pierre-louis.bossart@linux.intel.com wrote:
I thought that the timestamp and the runtime delay are basically independent parameters. Why must the former be disabled?
In HDA you have DPIB and LPIB. The position in the buffer is reported by DPIB and the # of samples rendered is reported by LPIB. The delay is the difference between the two when you use the LPIB_DELAY implementation. As a result, when you look-up the timestamps and query the delay, what you get is the current time as seen by the HDA interface. If you increment the runtime delay to add the codec delay, then you must also increment by the same amount in azx_get_wallclock_tstamp(). Just updating the runtime delay is not enough.
Thanks for the explanation Pierre, but I'm still a little confused.
Does the timestamp represent the time the last sample was put in the buffer or the time the last sample was rendered to the bus?
Is the "current time" meant to be the timestamp that the next sample sent will be rendered? (timestamp + delay)
One problem I also have with this approach is that the delay may be different if you have multiple streams processed with different algorithms, and that isn't exposed here. -Pierre
Thanks for the explanation Pierre, but I'm still a little confused.
Does the timestamp represent the time the last sample was put in the buffer or the time the last sample was rendered to the bus?
Is the "current time" meant to be the timestamp that the next sample sent will be rendered? (timestamp + delay)
When you query with snd_pcm_status(), you get all sort of information, including 2 timestamps. The 'regular' timestamp is just the system time (tsc) and the 'audio' timestamp is the time as reported by the audio hardware. This can be inferred by reading sample counters (# of samples pushed on the serial link) or wallclock counters, the wallclock is essentially identical but gives you a higher degree of precision. The audio time really needs to be taken at the last possible stage, not when you write into a buffer. The two timestamps can be used to find out what the drift is between the two time references (eg in PulseAudio) In other words, if you add the codec delay, it's fine but it needs to be accounted for in a symmetrical manner, otherwise the audio timestamp and the delay will be off by a fixed amount. Correcting the wallclock value and substracting the codec delay in azx_get_wallclk() would realign accounting methods. -Pierre
On Fri, Apr 5, 2013 at 10:20 AM, Pierre-Louis Bossart pierre-louis.bossart@linux.intel.com wrote:
Thanks for the explanation Pierre, but I'm still a little confused.
Does the timestamp represent the time the last sample was put in the buffer or the time the last sample was rendered to the bus?
Is the "current time" meant to be the timestamp that the next sample sent will be rendered? (timestamp + delay)
When you query with snd_pcm_status(), you get all sort of information, including 2 timestamps. The 'regular' timestamp is just the system time (tsc) and the 'audio' timestamp is the time as reported by the audio hardware. This can be inferred by reading sample counters (# of samples pushed on the serial link) or wallclock counters, the wallclock is essentially identical but gives you a higher degree of precision. The audio time really needs to be taken at the last possible stage, not when you write into a buffer. The two timestamps can be used to find out what the drift is between the two time references (eg in PulseAudio)
I think I'm starting to understand this. To go just a little bit deeper:
Is the 'audio' timestamp the one calculated in snd_pcm_update_hw_ptr0, based on either the hardware or the delay (INFO_HAS_WALL_CLOCK)?
In other words, if you add the codec delay, it's fine but it needs to be accounted for in a symmetrical manner, otherwise the audio timestamp and the delay will be off by a fixed amount. Correcting the wallclock value and substracting the codec delay in azx_get_wallclk() would realign accounting methods.
Would this problem manifest on systems that don't support wall clock? Because the time based on delay will contain the codec delay?
There are two distinct use cases:
Wanting to know how the audio clock is drifting with respect to the system clock. (don't care about codec delay) Wanting to know when a sample will be rendered for AEC or A/V sync. (codec delay critical)
Can we keep them from affecting one another? This seems possible if the timestamp and delay are reported separately.
-Pierre
Is the 'audio' timestamp the one calculated in snd_pcm_update_hw_ptr0, based on either the hardware or the delay (INFO_HAS_WALL_CLOCK)?
Yes, correct.
In other words, if you add the codec delay, it's fine but it needs to be accounted for in a symmetrical manner, otherwise the audio timestamp and the delay will be off by a fixed amount. Correcting the wallclock value and substracting the codec delay in azx_get_wallclk() would realign accounting methods.
Would this problem manifest on systems that don't support wall clock? Because the time based on delay will contain the codec delay?
If wall clock is supported, audio timestamp doesn't account for codec delay If Wall clock is not suported, audio timestamp does account for codec delay with your patches.
There are two distinct use cases:
Wanting to know how the audio clock is drifting with respect to the system clock. (don't care about codec delay) Wanting to know when a sample will be rendered for AEC or A/V sync. (codec delay critical)
Can we keep them from affecting one another? This seems possible if the timestamp and delay are reported separately.
My suggestion is that the audio timestamp always accounts for codec delays, which will allow you to track drift and track rendering time. Btw for capture wallclocks are disabled for now since we don't really know how to use them for digital inputs, and we really need to have a consistent behavior for capture and playback. -Pierre
On Fri, Apr 5, 2013 at 2:24 PM, Pierre-Louis Bossart pierre-louis.bossart@linux.intel.com wrote:
Is the 'audio' timestamp the one calculated in snd_pcm_update_hw_ptr0, based on either the hardware or the delay (INFO_HAS_WALL_CLOCK)?
Yes, correct.
In other words, if you add the codec delay, it's fine but it needs to be accounted for in a symmetrical manner, otherwise the audio timestamp and the delay will be off by a fixed amount. Correcting the wallclock value and substracting the codec delay in azx_get_wallclk() would realign accounting methods.
Would this problem manifest on systems that don't support wall clock? Because the time based on delay will contain the codec delay?
If wall clock is supported, audio timestamp doesn't account for codec delay If Wall clock is not suported, audio timestamp does account for codec delay with your patches.
Good, I get it, thanks for the education!
There are two distinct use cases:
Wanting to know how the audio clock is drifting with respect to the system clock. (don't care about codec delay) Wanting to know when a sample will be rendered for AEC or A/V sync. (codec delay critical)
Can we keep them from affecting one another? This seems possible if the timestamp and delay are reported separately.
My suggestion is that the audio timestamp always accounts for codec delays, which will allow you to track drift and track rendering time. Btw for capture wallclocks are disabled for now since we don't really know how to use them for digital inputs, and we really need to have a consistent behavior for capture and playback.
Sounds good, I'll take a look at adding the delay to the wallclock time as well.
-Pierre
participants (3)
-
Dylan Reid
-
Pierre-Louis Bossart
-
Takashi Iwai