[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <1657087331-32455-2-git-send-email-quic_clew@quicinc.com>
Date: Tue, 5 Jul 2022 23:02:08 -0700
From: Chris Lew <quic_clew@...cinc.com>
To: <agross@...nel.org>, <bjorn.andersson@...aro.org>,
<konrad.dybcio@...ainline.org>
CC: <linux-arm-msm@...r.kernel.org>, <linux-kernel@...r.kernel.org>,
<quic_clew@...cinc.com>
Subject: [PATCH 1/4] soc: qcom: smp2p: Introduce pending state for virtual irq
If a smp2p change occurs while a virtual interrupt is disabled, smp2p
should be able to resend that interrupt on enablement.
This functionality requires the CONFIG_HARDIRQS_SW_RESEND to be enabled
to reschedule the interrupts. To ensure the mask and unmask functions
are called during enabled and disable, set the flag to disable lazy
IRQ state handling (IRQ_DISABLE_UNLAZY).
Signed-off-by: Chris Lew <quic_clew@...cinc.com>
---
drivers/soc/qcom/smp2p.c | 18 +++++++++++++-----
1 file changed, 13 insertions(+), 5 deletions(-)
diff --git a/drivers/soc/qcom/smp2p.c b/drivers/soc/qcom/smp2p.c
index 59dbf4b61e6c..1c3259fe98be 100644
--- a/drivers/soc/qcom/smp2p.c
+++ b/drivers/soc/qcom/smp2p.c
@@ -101,6 +101,7 @@ struct smp2p_entry {
struct irq_domain *domain;
DECLARE_BITMAP(irq_enabled, 32);
+ DECLARE_BITMAP(irq_pending, 32);
DECLARE_BITMAP(irq_rising, 32);
DECLARE_BITMAP(irq_falling, 32);
@@ -146,6 +147,7 @@ struct qcom_smp2p {
unsigned local_pid;
unsigned remote_pid;
+ int irq;
struct regmap *ipc_regmap;
int ipc_offset;
int ipc_bit;
@@ -217,8 +219,8 @@ static void qcom_smp2p_notify_in(struct qcom_smp2p *smp2p)
{
struct smp2p_smem_item *in;
struct smp2p_entry *entry;
+ unsigned long status;
int irq_pin;
- u32 status;
char buf[SMP2P_MAX_ENTRY_NAME];
u32 val;
int i;
@@ -247,19 +249,22 @@ static void qcom_smp2p_notify_in(struct qcom_smp2p *smp2p)
status = val ^ entry->last_value;
entry->last_value = val;
+ status |= *entry->irq_pending;
/* No changes of this entry? */
if (!status)
continue;
- for_each_set_bit(i, entry->irq_enabled, 32) {
- if (!(status & BIT(i)))
- continue;
-
+ for_each_set_bit(i, &status, 32) {
if ((val & BIT(i) && test_bit(i, entry->irq_rising)) ||
(!(val & BIT(i)) && test_bit(i, entry->irq_falling))) {
irq_pin = irq_find_mapping(entry->domain, i);
handle_nested_irq(irq_pin);
+
+ if (test_bit(i, entry->irq_enabled))
+ clear_bit(i, entry->irq_pending);
+ else
+ set_bit(i, entry->irq_pending);
}
}
}
@@ -365,6 +370,8 @@ static int smp2p_irq_map(struct irq_domain *d,
irq_set_chip_data(irq, entry);
irq_set_nested_thread(irq, 1);
irq_set_noprobe(irq);
+ irq_set_parent(irq, entry->smp2p->irq);
+ irq_set_status_flags(irq, IRQ_DISABLE_UNLAZY);
return 0;
}
@@ -609,6 +616,7 @@ static int qcom_smp2p_probe(struct platform_device *pdev)
/* Kick the outgoing edge after allocating entries */
qcom_smp2p_kick(smp2p);
+ smp2p->irq = irq;
ret = devm_request_threaded_irq(&pdev->dev, irq,
NULL, qcom_smp2p_intr,
IRQF_ONESHOT,
--
2.7.4
Powered by blists - more mailing lists