[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20220510130731.083315630@linuxfoundation.org>
Date: Tue, 10 May 2022 15:08:11 +0200
From: Greg Kroah-Hartman <gregkh@...uxfoundation.org>
To: linux-kernel@...r.kernel.org, stable@...r.kernel.org
Cc: Greg Kroah-Hartman <gregkh@...uxfoundation.org>,
Hu Jiahui <kirin.say@...il.com>,
Jaroslav Kysela <perex@...ex.cz>, Takashi Iwai <tiwai@...e.de>,
Ovidiu Panait <ovidiu.panait@...driver.com>
Subject: [PATCH 5.4 42/52] ALSA: pcm: Fix races among concurrent hw_params and hw_free calls
From: Takashi Iwai <tiwai@...e.de>
commit 92ee3c60ec9fe64404dc035e7c41277d74aa26cb upstream.
Currently we have neither proper check nor protection against the
concurrent calls of PCM hw_params and hw_free ioctls, which may result
in a UAF. Since the existing PCM stream lock can't be used for
protecting the whole ioctl operations, we need a new mutex to protect
those racy calls.
This patch introduced a new mutex, runtime->buffer_mutex, and applies
it to both hw_params and hw_free ioctl code paths. Along with it, the
both functions are slightly modified (the mmap_count check is moved
into the state-check block) for code simplicity.
Reported-by: Hu Jiahui <kirin.say@...il.com>
Cc: <stable@...r.kernel.org>
Reviewed-by: Jaroslav Kysela <perex@...ex.cz>
Link: https://lore.kernel.org/r/20220322170720.3529-2-tiwai@suse.de
Signed-off-by: Takashi Iwai <tiwai@...e.de>
[OP: backport to 5.4: adjusted context]
Signed-off-by: Ovidiu Panait <ovidiu.panait@...driver.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@...uxfoundation.org>
---
include/sound/pcm.h | 1
sound/core/pcm.c | 2 +
sound/core/pcm_native.c | 55 +++++++++++++++++++++++++++++++-----------------
3 files changed, 39 insertions(+), 19 deletions(-)
--- a/include/sound/pcm.h
+++ b/include/sound/pcm.h
@@ -395,6 +395,7 @@ struct snd_pcm_runtime {
wait_queue_head_t sleep; /* poll sleep */
wait_queue_head_t tsleep; /* transfer sleep */
struct fasync_struct *fasync;
+ struct mutex buffer_mutex; /* protect for buffer changes */
/* -- private section -- */
void *private_data;
--- a/sound/core/pcm.c
+++ b/sound/core/pcm.c
@@ -969,6 +969,7 @@ int snd_pcm_attach_substream(struct snd_
init_waitqueue_head(&runtime->tsleep);
runtime->status->state = SNDRV_PCM_STATE_OPEN;
+ mutex_init(&runtime->buffer_mutex);
substream->runtime = runtime;
substream->private_data = pcm->private_data;
@@ -1000,6 +1001,7 @@ void snd_pcm_detach_substream(struct snd
substream->runtime = NULL;
if (substream->timer)
spin_unlock_irq(&substream->timer->lock);
+ mutex_destroy(&runtime->buffer_mutex);
kfree(runtime);
put_pid(substream->pid);
substream->pid = NULL;
--- a/sound/core/pcm_native.c
+++ b/sound/core/pcm_native.c
@@ -630,33 +630,40 @@ static int snd_pcm_hw_params_choose(stru
return 0;
}
+#if IS_ENABLED(CONFIG_SND_PCM_OSS)
+#define is_oss_stream(substream) ((substream)->oss.oss)
+#else
+#define is_oss_stream(substream) false
+#endif
+
static int snd_pcm_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params)
{
struct snd_pcm_runtime *runtime;
- int err, usecs;
+ int err = 0, usecs;
unsigned int bits;
snd_pcm_uframes_t frames;
if (PCM_RUNTIME_CHECK(substream))
return -ENXIO;
runtime = substream->runtime;
+ mutex_lock(&runtime->buffer_mutex);
snd_pcm_stream_lock_irq(substream);
switch (runtime->status->state) {
case SNDRV_PCM_STATE_OPEN:
case SNDRV_PCM_STATE_SETUP:
case SNDRV_PCM_STATE_PREPARED:
+ if (!is_oss_stream(substream) &&
+ atomic_read(&substream->mmap_count))
+ err = -EBADFD;
break;
default:
- snd_pcm_stream_unlock_irq(substream);
- return -EBADFD;
+ err = -EBADFD;
+ break;
}
snd_pcm_stream_unlock_irq(substream);
-#if IS_ENABLED(CONFIG_SND_PCM_OSS)
- if (!substream->oss.oss)
-#endif
- if (atomic_read(&substream->mmap_count))
- return -EBADFD;
+ if (err)
+ goto unlock;
params->rmask = ~0U;
err = snd_pcm_hw_refine(substream, params);
@@ -733,14 +740,19 @@ static int snd_pcm_hw_params(struct snd_
if ((usecs = period_to_usecs(runtime)) >= 0)
pm_qos_add_request(&substream->latency_pm_qos_req,
PM_QOS_CPU_DMA_LATENCY, usecs);
- return 0;
+ err = 0;
_error:
- /* hardware might be unusable from this time,
- so we force application to retry to set
- the correct hardware parameter settings */
- snd_pcm_set_state(substream, SNDRV_PCM_STATE_OPEN);
- if (substream->ops->hw_free != NULL)
- substream->ops->hw_free(substream);
+ if (err) {
+ /* hardware might be unusable from this time,
+ * so we force application to retry to set
+ * the correct hardware parameter settings
+ */
+ snd_pcm_set_state(substream, SNDRV_PCM_STATE_OPEN);
+ if (substream->ops->hw_free != NULL)
+ substream->ops->hw_free(substream);
+ }
+ unlock:
+ mutex_unlock(&runtime->buffer_mutex);
return err;
}
@@ -773,22 +785,27 @@ static int snd_pcm_hw_free(struct snd_pc
if (PCM_RUNTIME_CHECK(substream))
return -ENXIO;
runtime = substream->runtime;
+ mutex_lock(&runtime->buffer_mutex);
snd_pcm_stream_lock_irq(substream);
switch (runtime->status->state) {
case SNDRV_PCM_STATE_SETUP:
case SNDRV_PCM_STATE_PREPARED:
+ if (atomic_read(&substream->mmap_count))
+ result = -EBADFD;
break;
default:
- snd_pcm_stream_unlock_irq(substream);
- return -EBADFD;
+ result = -EBADFD;
+ break;
}
snd_pcm_stream_unlock_irq(substream);
- if (atomic_read(&substream->mmap_count))
- return -EBADFD;
+ if (result)
+ goto unlock;
if (substream->ops->hw_free)
result = substream->ops->hw_free(substream);
snd_pcm_set_state(substream, SNDRV_PCM_STATE_OPEN);
pm_qos_remove_request(&substream->latency_pm_qos_req);
+ unlock:
+ mutex_unlock(&runtime->buffer_mutex);
return result;
}
Powered by blists - more mailing lists