[<prev] [next>] [<thread-prev] [day] [month] [year] [list]
Message-ID: <20190122190048.GE114153@art_vandelay>
Date: Tue, 22 Jan 2019 14:00:48 -0500
From: Sean Paul <sean@...rly.run>
To: Stephen Boyd <swboyd@...omium.org>
Cc: Rob Clark <robdclark@...il.com>, linux-kernel@...r.kernel.org,
linux-arm-msm@...r.kernel.org,
linux-arm-kernel@...ts.infradead.org,
dri-devel@...ts.freedesktop.org, freedreno@...ts.freedesktop.org,
Sean Paul <seanpaul@...omium.org>,
Jordan Crouse <jcrouse@...eaurora.org>,
Jayant Shekhar <jshekhar@...eaurora.org>,
Rajesh Yadav <ryadav@...eaurora.org>,
Jeykumar Sankaran <jsanka@...eaurora.org>
Subject: Re: [PATCH] drm/msm/dpu: Convert to a chained irq chip
On Thu, Jan 03, 2019 at 11:06:02AM -0800, Stephen Boyd wrote:
> Devices that make up DPU, i.e. graphics card, request their interrupts
> from this "virtual" interrupt chip. The interrupt chip builds upon a GIC
> SPI interrupt that raises high when any of the interrupts in the DPU's
> irq status register are triggered. From the kernel's perspective this is
> a chained irq chip, so requesting a flow handler for the GIC SPI and
> then calling generic IRQ handling code from that irq handler is not
> completely proper. It's better to convert this to a chained irq so that
> the GIC SPI irq doesn't appear in /proc/interrupts, can't have CPU
> affinity changed, and won't be accounted for with irq stats. Doing this
> also silences a recursive lockdep warning because we can specify a
> different lock class for the chained interrupts, silencing a warning
> that is easy to see with 'threadirqs' on the kernel commandline.
>
> WARNING: inconsistent lock state
> 4.19.10 #76 Tainted: G W
> --------------------------------
> inconsistent {IN-HARDIRQ-W} -> {HARDIRQ-ON-W} usage.
> irq/40-dpu_mdss/203 [HC0[0]:SC0[2]:HE1:SE0] takes:
> 0000000053ea9021 (&irq_desc_lock_class){?.-.}, at: handle_level_irq+0x34/0x26c
> {IN-HARDIRQ-W} state was registered at:
> lock_acquire+0x244/0x360
> _raw_spin_lock+0x64/0xa0
> handle_fasteoi_irq+0x54/0x2ec
> generic_handle_irq+0x44/0x5c
> __handle_domain_irq+0x9c/0x11c
> gic_handle_irq+0x208/0x260
> el1_irq+0xb4/0x130
> arch_cpu_idle+0x178/0x3cc
> default_idle_call+0x3c/0x54
> do_idle+0x1a8/0x3dc
> cpu_startup_entry+0x24/0x28
> rest_init+0x240/0x270
> start_kernel+0x5a8/0x6bc
> irq event stamp: 18
> hardirqs last enabled at (17): [<ffffff9042385e80>] _raw_spin_unlock_irq+0x40/0xc0
> hardirqs last disabled at (16): [<ffffff904237a1f4>] __schedule+0x20c/0x1bbc
> softirqs last enabled at (0): [<ffffff9040f318d0>] copy_process+0xb50/0x3964
> softirqs last disabled at (18): [<ffffff9041036364>] local_bh_disable+0x8/0x20
>
> other info that might help us debug this:
> Possible unsafe locking scenario:
>
> CPU0
> ----
> lock(&irq_desc_lock_class);
> <Interrupt>
> lock(&irq_desc_lock_class);
>
> *** DEADLOCK ***
>
> no locks held by irq/40-dpu_mdss/203.
>
> stack backtrace:
> CPU: 0 PID: 203 Comm: irq/40-dpu_mdss Tainted: G W 4.19.10 #76
> Call trace:
> dump_backtrace+0x0/0x2f8
> show_stack+0x20/0x2c
> __dump_stack+0x20/0x28
> dump_stack+0xcc/0x10c
> mark_lock+0xbe0/0xe24
> __lock_acquire+0x4cc/0x2708
> lock_acquire+0x244/0x360
> _raw_spin_lock+0x64/0xa0
> handle_level_irq+0x34/0x26c
> generic_handle_irq+0x44/0x5c
> dpu_mdss_irq+0x64/0xec
> irq_forced_thread_fn+0x58/0x9c
> irq_thread+0x120/0x1dc
> kthread+0x248/0x260
> ret_from_fork+0x10/0x18
> ------------[ cut here ]------------
> irq 169 handler irq_default_primary_handler+0x0/0x18 enabled interrupts
>
> Cc: Sean Paul <seanpaul@...omium.org>
> Cc: Jordan Crouse <jcrouse@...eaurora.org>
> Cc: Jayant Shekhar <jshekhar@...eaurora.org>
> Cc: Rajesh Yadav <ryadav@...eaurora.org>
> Cc: Jeykumar Sankaran <jsanka@...eaurora.org>
> Signed-off-by: Stephen Boyd <swboyd@...omium.org>
LGTM, applied to dpu-staging.
Thanks,
Sean
> ---
>
> drivers/gpu/drm/msm/disp/dpu1/dpu_mdss.c | 36 ++++++++++++++----------
> 1 file changed, 21 insertions(+), 15 deletions(-)
>
> diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_mdss.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_mdss.c
> index cb307a2abf06..7316b4ab1b85 100644
> --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_mdss.c
> +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_mdss.c
> @@ -23,11 +23,14 @@ struct dpu_mdss {
> struct dpu_irq_controller irq_controller;
> };
>
> -static irqreturn_t dpu_mdss_irq(int irq, void *arg)
> +static void dpu_mdss_irq(struct irq_desc *desc)
> {
> - struct dpu_mdss *dpu_mdss = arg;
> + struct dpu_mdss *dpu_mdss = irq_desc_get_handler_data(desc);
> + struct irq_chip *chip = irq_desc_get_chip(desc);
> u32 interrupts;
>
> + chained_irq_enter(chip, desc);
> +
> interrupts = readl_relaxed(dpu_mdss->mmio + HW_INTR_STATUS);
>
> while (interrupts) {
> @@ -39,20 +42,20 @@ static irqreturn_t dpu_mdss_irq(int irq, void *arg)
> hwirq);
> if (mapping == 0) {
> DRM_ERROR("couldn't find irq mapping for %lu\n", hwirq);
> - return IRQ_NONE;
> + break;
> }
>
> rc = generic_handle_irq(mapping);
> if (rc < 0) {
> DRM_ERROR("handle irq fail: irq=%lu mapping=%u rc=%d\n",
> hwirq, mapping, rc);
> - return IRQ_NONE;
> + break;
> }
>
> interrupts &= ~(1 << hwirq);
> }
>
> - return IRQ_HANDLED;
> + chained_irq_exit(chip, desc);
> }
>
> static void dpu_mdss_irq_mask(struct irq_data *irqd)
> @@ -83,16 +86,16 @@ static struct irq_chip dpu_mdss_irq_chip = {
> .irq_unmask = dpu_mdss_irq_unmask,
> };
>
> +static struct lock_class_key dpu_mdss_lock_key, dpu_mdss_request_key;
> +
> static int dpu_mdss_irqdomain_map(struct irq_domain *domain,
> unsigned int irq, irq_hw_number_t hwirq)
> {
> struct dpu_mdss *dpu_mdss = domain->host_data;
> - int ret;
>
> + irq_set_lockdep_class(irq, &dpu_mdss_lock_key, &dpu_mdss_request_key);
> irq_set_chip_and_handler(irq, &dpu_mdss_irq_chip, handle_level_irq);
> - ret = irq_set_chip_data(irq, dpu_mdss);
> -
> - return ret;
> + return irq_set_chip_data(irq, dpu_mdss);
> }
>
> static const struct irq_domain_ops dpu_mdss_irqdomain_ops = {
> @@ -159,11 +162,13 @@ static void dpu_mdss_destroy(struct drm_device *dev)
> struct msm_drm_private *priv = dev->dev_private;
> struct dpu_mdss *dpu_mdss = to_dpu_mdss(priv->mdss);
> struct dss_module_power *mp = &dpu_mdss->mp;
> + int irq;
>
> pm_runtime_suspend(dev->dev);
> pm_runtime_disable(dev->dev);
> _dpu_mdss_irq_domain_fini(dpu_mdss);
> - free_irq(platform_get_irq(pdev, 0), dpu_mdss);
> + irq = platform_get_irq(pdev, 0);
> + irq_set_chained_handler_and_data(irq, NULL, NULL);
> msm_dss_put_clk(mp->clk_config, mp->num_clk);
> devm_kfree(&pdev->dev, mp->clk_config);
>
> @@ -187,6 +192,7 @@ int dpu_mdss_init(struct drm_device *dev)
> struct dpu_mdss *dpu_mdss;
> struct dss_module_power *mp;
> int ret = 0;
> + int irq;
>
> dpu_mdss = devm_kzalloc(dev->dev, sizeof(*dpu_mdss), GFP_KERNEL);
> if (!dpu_mdss)
> @@ -219,12 +225,12 @@ int dpu_mdss_init(struct drm_device *dev)
> if (ret)
> goto irq_domain_error;
>
> - ret = request_irq(platform_get_irq(pdev, 0),
> - dpu_mdss_irq, 0, "dpu_mdss_isr", dpu_mdss);
> - if (ret) {
> - DPU_ERROR("failed to init irq: %d\n", ret);
> + irq = platform_get_irq(pdev, 0);
> + if (irq < 0)
> goto irq_error;
> - }
> +
> + irq_set_chained_handler_and_data(irq, dpu_mdss_irq,
> + dpu_mdss);
>
> pm_runtime_enable(dev->dev);
>
> --
> Sent by a computer through tubes
>
--
Sean Paul, Software Engineer, Google / Chromium OS
Powered by blists - more mailing lists