[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <db933c9e-bf7b-dec6-8022-75074f9cebf7@linux.intel.com>
Date: Wed, 23 Mar 2022 09:08:25 +0100
From: Amadeusz Sławiński
<amadeuszx.slawinski@...ux.intel.com>
To: Takashi Iwai <tiwai@...e.de>, alsa-devel@...a-project.org
Cc: Hu Jiahui <kirin.say@...il.com>, linux-kernel@...r.kernel.org
Subject: Re: [PATCH 3/4] ALSA: pcm: Fix races among concurrent prepare and
hw_params/hw_free calls
On 3/22/2022 6:07 PM, Takashi Iwai wrote:
> Like the previous fixes to hw_params and hw_free ioctl races, we need
> to paper over the concurrent prepare ioctl calls against hw_params and
> hw_free, too.
>
> This patch implements the locking with the existing
> runtime->buffer_mutex for prepare ioctls. Unlike the previous case
> for snd_pcm_hw_hw_params() and snd_pcm_hw_free(), snd_pcm_prepare() is
> performed to the linked streams, hence the lock can't be applied
> simply on the top. For tracking the lock in each linked substream, we
> modify snd_pcm_action_group() slightly and apply the buffer_mutex for
> the case stream_lock=false (formerly there was no lock applied)
> there.
>
> Cc: <stable@...r.kernel.org>
> Signed-off-by: Takashi Iwai <tiwai@...e.de>
> ---
> sound/core/pcm_native.c | 32 ++++++++++++++++++--------------
> 1 file changed, 18 insertions(+), 14 deletions(-)
>
> diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c
> index 266895374b83..0e4fbf5fd87b 100644
> --- a/sound/core/pcm_native.c
> +++ b/sound/core/pcm_native.c
> @@ -1190,15 +1190,17 @@ struct action_ops {
> static int snd_pcm_action_group(const struct action_ops *ops,
> struct snd_pcm_substream *substream,
> snd_pcm_state_t state,
> - bool do_lock)
> + bool stream_lock)
> {
> struct snd_pcm_substream *s = NULL;
> struct snd_pcm_substream *s1;
> int res = 0, depth = 1;
>
> snd_pcm_group_for_each_entry(s, substream) {
> - if (do_lock && s != substream) {
> - if (s->pcm->nonatomic)
> + if (s != substream) {
> + if (!stream_lock)
> + mutex_lock_nested(&s->runtime->buffer_mutex, depth);
> + else if (s->pcm->nonatomic)
> mutex_lock_nested(&s->self_group.mutex, depth);
> else
> spin_lock_nested(&s->self_group.lock, depth);
Maybe
if (!stream_lock)
mutex_lock_nested(&s->runtime->buffer_mutex, depth);
else
snd_pcm_group_lock(&s->self_group, s->pcm->nonatomic);
?
> @@ -1226,18 +1228,18 @@ static int snd_pcm_action_group(const struct action_ops *ops,
> ops->post_action(s, state);
> }
> _unlock:
> - if (do_lock) {
> - /* unlock streams */
> - snd_pcm_group_for_each_entry(s1, substream) {
> - if (s1 != substream) {
> - if (s1->pcm->nonatomic)
> - mutex_unlock(&s1->self_group.mutex);
> - else
> - spin_unlock(&s1->self_group.lock);
> - }
> - if (s1 == s) /* end */
> - break;
> + /* unlock streams */
> + snd_pcm_group_for_each_entry(s1, substream) {
> + if (s1 != substream) {
> + if (!stream_lock)
> + mutex_unlock(&s1->runtime->buffer_mutex);
> + else if (s1->pcm->nonatomic)
> + mutex_unlock(&s1->self_group.mutex);
> + else
> + spin_unlock(&s1->self_group.lock);
And similarly to above, use snd_pcm_group_unlock() here?
> }
> + if (s1 == s) /* end */
> + break;
> }
> return res;
> }
> @@ -1367,10 +1369,12 @@ static int snd_pcm_action_nonatomic(const struct action_ops *ops,
>
> /* Guarantee the group members won't change during non-atomic action */
> down_read(&snd_pcm_link_rwsem);
> + mutex_lock(&substream->runtime->buffer_mutex);
> if (snd_pcm_stream_linked(substream))
> res = snd_pcm_action_group(ops, substream, state, false);
> else
> res = snd_pcm_action_single(ops, substream, state);
> + mutex_unlock(&substream->runtime->buffer_mutex);
> up_read(&snd_pcm_link_rwsem);
> return res;
> }
Powered by blists - more mailing lists