[alsa-devel] [Question about DPCM] dpcm_run_new_update races again xrun
Qiao Zhou
zhouqiao at marvell.com
Tue Nov 4 07:43:21 CET 2014
>-----Original Message-----
>From: Liam Girdwood [mailto:liam.r.girdwood at linux.intel.com]
>Sent: 2014年11月4日 1:20
>To: Takashi Iwai
>Cc: Qiao Zhou; Mark Brown; perex at perex.cz; alsa-devel at alsa-project.org;
>Wilbur Wang; Chao Xie
>Subject: Re: [Question about DPCM] dpcm_run_new_update races again xrun
>
>On Mon, 2014-11-03 at 17:54 +0100, Takashi Iwai wrote:
>> At Mon, 03 Nov 2014 15:42:16 +0100,
>> Takashi Iwai wrote:
>> >
>> > At Mon, 03 Nov 2014 13:32:04 +0000,
>> > Liam Girdwood wrote:
>> > >
>> > > On Mon, 2014-11-03 at 03:28 -0800, Qiao Zhou wrote:
>> > > > Hi Mark, Liam
>> > > >
>> > > > I met one BE not-function issue when developing DPCM feature,
>> > > > and found dpcm_run_new_update is not protected well against
>xrun(interrupt context).
>> > > > Could you help to give some suggestions?
>> > > >
>> > >
>> > > I'm wondering if this would be better solved by improving the
>> > > locking so that an XRUN could not run at the same time as the
>> > > runtime update. Both functions are async, but are only protected
>> > > by a mutex atm (like the rest of PCM ops except trigger which is
>> > > atomic). We maybe need to add a spinlock to both runtime_update()
>> > > and dpcm_fe_dai_trigger()
>> >
>> > Be careful, you cannot take spinlock while hw_params, for example.
>> >
>> > Why do you need to set runtime_update to NO in the trigger callback
>> > in the first place? The trigger may influence on the FE streaming
>> > state, but not on the FE/BE connection state, right?
>>
>> Thinking of this again, this doesn't look like a good suggestion,
>> either.
>>
>> As mentioned, we can't take a lock for the whole lengthy operation
>> like prepare or hw_params. So, instead of taking a lock, the trigger
>> should be delayed when some operation is being performed. (I thought
>> at first just performing FE's trigger and delaying BE later, but FE/BE
>> trigger can be more complex than that, so it ends up with the whole
>> delayed trigger.)
>>
>> The patch below is a quick hack (only compile tested!). It uses PCM's
>> stream lock for protecting against the race for the state transition,
>> and the trigger callback checks the state, aborts immediately if there
>> is a concurrent operation. At the exit of state change, it invokes
>> the delayed trigger.
>>
>> Let me know if this really works as imagined. Then I'll cook up a
>> proper patch.
>>
>
>I'll give the patch a try tomorrow and it would be good for Marvell to
>test tomorrow too (as my system does not XRUN).
>
>Thanks
>
>Liam
>
>>
>> Takashi
>>
>> ---
>> diff --git a/include/sound/soc-dpcm.h b/include/sound/soc-dpcm.h index
>> 2883a7a6f9f3..98f2ade0266e 100644
>> --- a/include/sound/soc-dpcm.h
>> +++ b/include/sound/soc-dpcm.h
>> @@ -102,6 +102,8 @@ struct snd_soc_dpcm_runtime {
>> /* state and update */
>> enum snd_soc_dpcm_update runtime_update;
>> enum snd_soc_dpcm_state state;
>> +
>> + int trigger_pending; /* trigger cmd + 1 if pending, 0 if not */
>> };
>>
>> /* can this BE stop and free */
>> diff --git a/sound/soc/soc-pcm.c b/sound/soc/soc-pcm.c index
>> 002311afdeaa..fbd570c55a67 100644
>> --- a/sound/soc/soc-pcm.c
>> +++ b/sound/soc/soc-pcm.c
>> @@ -1522,13 +1522,36 @@ static void dpcm_set_fe_runtime(struct
>snd_pcm_substream *substream)
>> dpcm_init_runtime_hw(runtime, &cpu_dai_drv->capture); }
>>
>> +static int dpcm_fe_dai_do_trigger(struct snd_pcm_substream
>> +*substream, int cmd);
>> +
>> +/* Set FE's runtime_update state; the state is protected via PCM
>> +stream lock
>> + * for avoiding the race with trigger callback.
>> + * If the state is unset and a trigger is pending while the previous
>> +operation,
>> + * process the pending trigger action here.
>> + */
>> +static void dpcm_set_fe_update_state(struct snd_soc_pcm_runtime *fe,
>> + int stream, enum snd_soc_dpcm_update state)
>{
>> + struct snd_pcm_substream *substream =
>> + snd_soc_dpcm_get_substream(fe, stream);
>> +
>> + snd_pcm_stream_lock_irq(substream);
>> + fe->dpcm[stream].runtime_update = state;
>> + if (state == SND_SOC_DPCM_UPDATE_NO && fe-
>>dpcm[stream].trigger_pending) {
>> + dpcm_fe_dai_do_trigger(substream,
>> + fe->dpcm[stream].trigger_pending - 1);
>> + fe->dpcm[stream].trigger_pending = 0;
>> + }
>> + snd_pcm_stream_unlock_irq(substream);
>> +}
>> +
>> static int dpcm_fe_dai_startup(struct snd_pcm_substream
>> *fe_substream) {
>> struct snd_soc_pcm_runtime *fe = fe_substream->private_data;
>> struct snd_pcm_runtime *runtime = fe_substream->runtime;
>> int stream = fe_substream->stream, ret = 0;
>>
>> - fe->dpcm[stream].runtime_update = SND_SOC_DPCM_UPDATE_FE;
>> + dpcm_set_fe_update_state(fe, stream, SND_SOC_DPCM_UPDATE_FE);
>>
>> ret = dpcm_be_dai_startup(fe, fe_substream->stream);
>> if (ret < 0) {
>> @@ -1550,13 +1573,13 @@ static int dpcm_fe_dai_startup(struct
>snd_pcm_substream *fe_substream)
>> dpcm_set_fe_runtime(fe_substream);
>> snd_pcm_limit_hw_rates(runtime);
>>
>> - fe->dpcm[stream].runtime_update = SND_SOC_DPCM_UPDATE_NO;
>> + dpcm_set_fe_update_state(fe, stream, SND_SOC_DPCM_UPDATE_NO);
>> return 0;
>>
>> unwind:
>> dpcm_be_dai_startup_unwind(fe, fe_substream->stream);
>> be_err:
>> - fe->dpcm[stream].runtime_update = SND_SOC_DPCM_UPDATE_NO;
>> + dpcm_set_fe_update_state(fe, stream, SND_SOC_DPCM_UPDATE_NO);
>> return ret;
>> }
>>
>> @@ -1603,7 +1626,7 @@ static int dpcm_fe_dai_shutdown(struct
>snd_pcm_substream *substream)
>> struct snd_soc_pcm_runtime *fe = substream->private_data;
>> int stream = substream->stream;
>>
>> - fe->dpcm[stream].runtime_update = SND_SOC_DPCM_UPDATE_FE;
>> + dpcm_set_fe_update_state(fe, stream, SND_SOC_DPCM_UPDATE_FE);
>>
>> /* shutdown the BEs */
>> dpcm_be_dai_shutdown(fe, substream->stream); @@ -1617,7 +1640,7 @@
>> static int dpcm_fe_dai_shutdown(struct snd_pcm_substream *substream)
>> dpcm_dapm_stream_event(fe, stream, SND_SOC_DAPM_STREAM_STOP);
>>
>> fe->dpcm[stream].state = SND_SOC_DPCM_STATE_CLOSE;
>> - fe->dpcm[stream].runtime_update = SND_SOC_DPCM_UPDATE_NO;
>> + dpcm_set_fe_update_state(fe, stream, SND_SOC_DPCM_UPDATE_NO);
>> return 0;
>> }
>>
>> @@ -1665,7 +1688,7 @@ static int dpcm_fe_dai_hw_free(struct
>snd_pcm_substream *substream)
>> int err, stream = substream->stream;
>>
>> mutex_lock_nested(&fe->card->mutex, SND_SOC_CARD_CLASS_RUNTIME);
>> - fe->dpcm[stream].runtime_update = SND_SOC_DPCM_UPDATE_FE;
>> + dpcm_set_fe_update_state(fe, stream, SND_SOC_DPCM_UPDATE_FE);
>>
>> dev_dbg(fe->dev, "ASoC: hw_free FE %s\n", fe->dai_link->name);
>>
>> @@ -1680,7 +1703,7 @@ static int dpcm_fe_dai_hw_free(struct
>snd_pcm_substream *substream)
>> err = dpcm_be_dai_hw_free(fe, stream);
>>
>> fe->dpcm[stream].state = SND_SOC_DPCM_STATE_HW_FREE;
>> - fe->dpcm[stream].runtime_update = SND_SOC_DPCM_UPDATE_NO;
>> + dpcm_set_fe_update_state(fe, stream, SND_SOC_DPCM_UPDATE_NO);
>>
>> mutex_unlock(&fe->card->mutex);
>> return 0;
>> @@ -1773,7 +1796,7 @@ static int dpcm_fe_dai_hw_params(struct
>snd_pcm_substream *substream,
>> int ret, stream = substream->stream;
>>
>> mutex_lock_nested(&fe->card->mutex, SND_SOC_CARD_CLASS_RUNTIME);
>> - fe->dpcm[stream].runtime_update = SND_SOC_DPCM_UPDATE_FE;
>> + dpcm_set_fe_update_state(fe, stream, SND_SOC_DPCM_UPDATE_FE);
>>
>> memcpy(&fe->dpcm[substream->stream].hw_params, params,
>> sizeof(struct snd_pcm_hw_params)); @@ -1796,7 +1819,7
>@@ static
>> int dpcm_fe_dai_hw_params(struct snd_pcm_substream *substream,
>> fe->dpcm[stream].state = SND_SOC_DPCM_STATE_HW_PARAMS;
>>
>> out:
>> - fe->dpcm[stream].runtime_update = SND_SOC_DPCM_UPDATE_NO;
>> + dpcm_set_fe_update_state(fe, stream, SND_SOC_DPCM_UPDATE_NO);
>> mutex_unlock(&fe->card->mutex);
>> return ret;
>> }
>> @@ -1910,14 +1933,12 @@ int dpcm_be_dai_trigger(struct
>> snd_soc_pcm_runtime *fe, int stream, }
>> EXPORT_SYMBOL_GPL(dpcm_be_dai_trigger);
>>
>> -static int dpcm_fe_dai_trigger(struct snd_pcm_substream *substream,
>> int cmd)
>> +static int dpcm_fe_dai_do_trigger(struct snd_pcm_substream
>> +*substream, int cmd)
>> {
>> struct snd_soc_pcm_runtime *fe = substream->private_data;
>> int stream = substream->stream, ret;
>> enum snd_soc_dpcm_trigger trigger = fe->dai_link->trigger[stream];
>>
>> - fe->dpcm[stream].runtime_update = SND_SOC_DPCM_UPDATE_FE;
>> -
>> switch (trigger) {
>> case SND_SOC_DPCM_TRIGGER_PRE:
>> /* call trigger on the frontend before the backend. */ @@ -
>1928,7
>> +1949,7 @@ static int dpcm_fe_dai_trigger(struct snd_pcm_substream
>*substream, int cmd)
>> ret = soc_pcm_trigger(substream, cmd);
>> if (ret < 0) {
>> dev_err(fe->dev,"ASoC: trigger FE failed %d\n", ret);
>> - goto out;
>> + return ret;
>> }
>>
>> ret = dpcm_be_dai_trigger(fe, substream->stream, cmd); @@ -
>1939,7
>> +1960,7 @@ static int dpcm_fe_dai_trigger(struct snd_pcm_substream
>*substream, int cmd)
>> ret = dpcm_be_dai_trigger(fe, substream->stream, cmd);
>> if (ret < 0) {
>> dev_err(fe->dev,"ASoC: trigger FE failed %d\n", ret);
>> - goto out;
>> + return ret;
>> }
>>
>> dev_dbg(fe->dev, "ASoC: post trigger FE %s cmd %d\n", @@ -
>1956,14
>> +1977,13 @@ static int dpcm_fe_dai_trigger(struct snd_pcm_substream
>*substream, int cmd)
>> ret = soc_pcm_bespoke_trigger(substream, cmd);
>> if (ret < 0) {
>> dev_err(fe->dev,"ASoC: trigger FE failed %d\n", ret);
>> - goto out;
>> + return ret;
>> }
>> break;
>> default:
>> dev_err(fe->dev, "ASoC: invalid trigger cmd %d for %s\n",
>cmd,
>> fe->dai_link->name);
>> - ret = -EINVAL;
>> - goto out;
>> + return -EINVAL;
>> }
>>
>> switch (cmd) {
>> @@ -1979,7 +1999,25 @@ static int dpcm_fe_dai_trigger(struct
>snd_pcm_substream *substream, int cmd)
>> break;
>> }
>>
>> -out:
>> + return 0;
>> +}
>> +
>> +static int dpcm_fe_dai_trigger(struct snd_pcm_substream *substream,
>> +int cmd) {
>> + struct snd_soc_pcm_runtime *fe = substream->private_data;
>> + int stream = substream->stream, ret;
>> +
>> + /* if FE's runtime_update is already set, we're in race;
>> + * process this trigger later at exit
>> + */
>> + if (fe->dpcm[stream].runtime_update != SND_SOC_DPCM_UPDATE_NO) {
>> + fe->dpcm[stream].trigger_pending = cmd + 1;
>> + return 0; /* delayed, assuming it's successful */
>> + }
>> +
>> + /* we're alone, let's trigger */
>> + fe->dpcm[stream].runtime_update = SND_SOC_DPCM_UPDATE_FE;
>> + ret = dpcm_fe_dai_do_trigger(substream, cmd);
>> fe->dpcm[stream].runtime_update = SND_SOC_DPCM_UPDATE_NO;
>> return ret;
>> }
>> @@ -2027,7 +2065,7 @@ static int dpcm_fe_dai_prepare(struct
>> snd_pcm_substream *substream)
>>
>> dev_dbg(fe->dev, "ASoC: prepare FE %s\n", fe->dai_link->name);
>>
>> - fe->dpcm[stream].runtime_update = SND_SOC_DPCM_UPDATE_FE;
>> + dpcm_set_fe_update_state(fe, stream, SND_SOC_DPCM_UPDATE_FE);
>>
>> /* there is no point preparing this FE if there are no BEs */
>> if (list_empty(&fe->dpcm[stream].be_clients)) { @@ -2054,7 +2092,7
>> @@ static int dpcm_fe_dai_prepare(struct snd_pcm_substream *substream)
>> fe->dpcm[stream].state = SND_SOC_DPCM_STATE_PREPARE;
>>
>> out:
>> - fe->dpcm[stream].runtime_update = SND_SOC_DPCM_UPDATE_NO;
>> + dpcm_set_fe_update_state(fe, stream, SND_SOC_DPCM_UPDATE_NO);
>> mutex_unlock(&fe->card->mutex);
>>
>> return ret;
>> @@ -2201,11 +2239,11 @@ static int dpcm_run_new_update(struct
>> snd_soc_pcm_runtime *fe, int stream) {
>> int ret;
>>
>> - fe->dpcm[stream].runtime_update = SND_SOC_DPCM_UPDATE_BE;
>> + dpcm_set_fe_update_state(fe, stream, SND_SOC_DPCM_UPDATE_BE);
>> ret = dpcm_run_update_startup(fe, stream);
>> if (ret < 0)
>> dev_err(fe->dev, "ASoC: failed to startup some BEs\n");
>> - fe->dpcm[stream].runtime_update = SND_SOC_DPCM_UPDATE_NO;
>> + dpcm_set_fe_update_state(fe, stream, SND_SOC_DPCM_UPDATE_NO);
>>
>> return ret;
>> }
>> @@ -2214,11 +2252,11 @@ static int dpcm_run_old_update(struct
>> snd_soc_pcm_runtime *fe, int stream) {
>> int ret;
>>
>> - fe->dpcm[stream].runtime_update = SND_SOC_DPCM_UPDATE_BE;
>> + dpcm_set_fe_update_state(fe, stream, SND_SOC_DPCM_UPDATE_BE);
>> ret = dpcm_run_update_shutdown(fe, stream);
>> if (ret < 0)
>> dev_err(fe->dev, "ASoC: failed to shutdown some BEs\n");
>> - fe->dpcm[stream].runtime_update = SND_SOC_DPCM_UPDATE_NO;
>> + dpcm_set_fe_update_state(fe, stream, SND_SOC_DPCM_UPDATE_NO);
>>
>> return ret;
>> }
>
Takashi, Liam
We have verified again and This patch fixes the race issue. Thanks a
lot for your patch and suggestions.
Best Regards
Qiao
More information about the Alsa-devel
mailing list