[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20251017085307.4325-12-srinivas.kandagatla@oss.qualcomm.com>
Date: Fri, 17 Oct 2025 09:53:06 +0100
From: Srinivas Kandagatla <srinivas.kandagatla@....qualcomm.com>
To: broonie@...nel.org
Cc: perex@...ex.cz, tiwai@...e.com, srini@...nel.org, alexey.klimov@...aro.org,
linux-sound@...r.kernel.org, linux-kernel@...r.kernel.org,
linux-arm-msm@...r.kernel.org,
Srinivas Kandagatla <srinivas.kandagatla@....qualcomm.com>
Subject: [PATCH 11/12] ASoC: qcom: q6asm-dai: Use guard() for spin locks
Clean up the code using guard() for spin locks.
No functional changes, just cleanup.
Signed-off-by: Srinivas Kandagatla <srinivas.kandagatla@....qualcomm.com>
Signed-off-by: Srinivas Kandagatla <srinivas.kandagatla@....qualcomm.com>
---
sound/soc/qcom/qdsp6/q6asm-dai.c | 23 +++++------------------
1 file changed, 5 insertions(+), 18 deletions(-)
diff --git a/sound/soc/qcom/qdsp6/q6asm-dai.c b/sound/soc/qcom/qdsp6/q6asm-dai.c
index b616ce316d2f..665a5d1ec4cf 100644
--- a/sound/soc/qcom/qdsp6/q6asm-dai.c
+++ b/sound/soc/qcom/qdsp6/q6asm-dai.c
@@ -493,15 +493,15 @@ static void compress_event_handler(uint32_t opcode, uint32_t token,
{
struct q6asm_dai_rtd *prtd = priv;
struct snd_compr_stream *substream = prtd->cstream;
- unsigned long flags;
u32 wflags = 0;
uint64_t avail;
uint32_t bytes_written, bytes_to_write;
bool is_last_buffer = false;
+ guard(spinlock_irqsave)(&prtd->lock);
+
switch (opcode) {
case ASM_CLIENT_EVENT_CMD_RUN_DONE:
- spin_lock_irqsave(&prtd->lock, flags);
if (!prtd->bytes_sent) {
q6asm_stream_remove_initial_silence(prtd->audio_client,
prtd->stream_id,
@@ -512,11 +512,9 @@ static void compress_event_handler(uint32_t opcode, uint32_t token,
prtd->bytes_sent += prtd->pcm_count;
}
- spin_unlock_irqrestore(&prtd->lock, flags);
break;
case ASM_CLIENT_EVENT_CMD_EOS_DONE:
- spin_lock_irqsave(&prtd->lock, flags);
if (prtd->notify_on_drain) {
if (substream->partial_drain) {
/*
@@ -539,20 +537,16 @@ static void compress_event_handler(uint32_t opcode, uint32_t token,
} else {
prtd->state = Q6ASM_STREAM_STOPPED;
}
- spin_unlock_irqrestore(&prtd->lock, flags);
break;
case ASM_CLIENT_EVENT_DATA_WRITE_DONE:
- spin_lock_irqsave(&prtd->lock, flags);
bytes_written = token >> ASM_WRITE_TOKEN_LEN_SHIFT;
prtd->copied_total += bytes_written;
snd_compr_fragment_elapsed(substream);
- if (prtd->state != Q6ASM_STREAM_RUNNING) {
- spin_unlock_irqrestore(&prtd->lock, flags);
+ if (prtd->state != Q6ASM_STREAM_RUNNING)
break;
- }
avail = prtd->bytes_received - prtd->bytes_sent;
if (avail > prtd->pcm_count) {
@@ -581,7 +575,6 @@ static void compress_event_handler(uint32_t opcode, uint32_t token,
q6asm_cmd_nowait(prtd->audio_client,
prtd->stream_id, CMD_EOS);
- spin_unlock_irqrestore(&prtd->lock, flags);
break;
default:
@@ -1031,17 +1024,14 @@ static int q6asm_dai_compr_pointer(struct snd_soc_component *component,
{
struct snd_compr_runtime *runtime = stream->runtime;
struct q6asm_dai_rtd *prtd = runtime->private_data;
- unsigned long flags;
uint64_t temp_copied_total;
- spin_lock_irqsave(&prtd->lock, flags);
+ guard(spinlock_irqsave)(&prtd->lock);
tstamp->copied_total = prtd->copied_total;
temp_copied_total = tstamp->copied_total;
tstamp->byte_offset = do_div(temp_copied_total, prtd->pcm_size);
- spin_unlock_irqrestore(&prtd->lock, flags);
-
return 0;
}
@@ -1051,7 +1041,6 @@ static int q6asm_compr_copy(struct snd_soc_component *component,
{
struct snd_compr_runtime *runtime = stream->runtime;
struct q6asm_dai_rtd *prtd = runtime->private_data;
- unsigned long flags;
u32 wflags = 0;
uint64_t avail, bytes_in_flight = 0;
void *dstn;
@@ -1087,7 +1076,7 @@ static int q6asm_compr_copy(struct snd_soc_component *component,
return -EFAULT;
}
- spin_lock_irqsave(&prtd->lock, flags);
+ guard(spinlock_irqsave)(&prtd->lock);
bytes_in_flight = prtd->bytes_received - prtd->copied_total;
@@ -1113,8 +1102,6 @@ static int q6asm_compr_copy(struct snd_soc_component *component,
prtd->bytes_sent += bytes_to_write;
}
- spin_unlock_irqrestore(&prtd->lock, flags);
-
return count;
}
--
2.51.0
Powered by blists - more mailing lists