[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <DM5PR12MB1276642553DDD5AF85B65E01DAD30@DM5PR12MB1276.namprd12.prod.outlook.com>
Date: Thu, 23 Apr 2020 09:28:28 +0000
From: Gustavo Pimentel <Gustavo.Pimentel@...opsys.com>
To: Alan Mikhak <alan.mikhak@...ive.com>,
"dmaengine@...r.kernel.org" <dmaengine@...r.kernel.org>,
"linux-kernel@...r.kernel.org" <linux-kernel@...r.kernel.org>,
"linux-pci@...r.kernel.org" <linux-pci@...r.kernel.org>,
"dan.j.williams@...el.com" <dan.j.williams@...el.com>,
"vkoul@...nel.org" <vkoul@...nel.org>,
"kishon@...com" <kishon@...com>, "maz@...nel.org" <maz@...nel.org>,
"paul.walmsley@...ive.com" <paul.walmsley@...ive.com>
Subject: RE: [PATCH v2][next] dmaengine: dw-edma: Check MSI descriptor before
copying
On Thu, Apr 23, 2020 at 2:58:21, Alan Mikhak <alan.mikhak@...ive.com>
wrote:
> From: Alan Mikhak <alan.mikhak@...ive.com>
>
> Modify dw_edma_irq_request() to check if a struct msi_desc entry exists
> before copying the contents of its struct msi_msg pointer.
>
> Without this sanity check, __get_cached_msi_msg() crashes when invoked by
> dw_edma_irq_request() running on a Linux-based PCIe endpoint device. MSI
> interrupt are not received by PCIe endpoint devices. If irq_get_msi_desc()
> returns null, then there is no cached struct msi_msg to be copied.
>
> This patch depends on the following patch:
> [PATCH v2] dmaengine: dw-edma: Decouple dw-edma-core.c from struct pci_dev
> https://urldefense.com/v3/__https://patchwork.kernel.org/patch/11491757/__;!!A4F2R9G_pg!L_vf_Tml7Ca4sWVvZp5crRCp7YsMj6B93G9cMAO8Dj3w9I0MArjwuwNKtDz9rr0RlpXiqPg$
>
> Rebased on linux-next which has above patch applied.
>
> Fixes: Build error with config x86_64-randconfig-f003-20200422
> Fixes: Build error with config s390-allmodconfig
> Reported-by: kbuild test robot <lkp@...el.com>
> Signed-off-by: Alan Mikhak <alan.mikhak@...ive.com>
> ---
> drivers/dma/dw-edma/dw-edma-core.c | 17 ++++++++++-------
> 1 file changed, 10 insertions(+), 7 deletions(-)
>
> diff --git a/drivers/dma/dw-edma/dw-edma-core.c b/drivers/dma/dw-edma/dw-edma-core.c
> index db401eb11322..306ab50462be 100644
> --- a/drivers/dma/dw-edma/dw-edma-core.c
> +++ b/drivers/dma/dw-edma/dw-edma-core.c
> @@ -13,6 +13,7 @@
> #include <linux/dmaengine.h>
> #include <linux/err.h>
> #include <linux/interrupt.h>
> +#include <linux/irq.h>
> #include <linux/dma/edma.h>
> #include <linux/dma-mapping.h>
>
> @@ -773,6 +774,7 @@ static int dw_edma_irq_request(struct dw_edma_chip *chip,
> u32 rd_mask = 1;
> int i, err = 0;
> u32 ch_cnt;
> + int irq;
>
> ch_cnt = dw->wr_ch_cnt + dw->rd_ch_cnt;
>
> @@ -781,16 +783,16 @@ static int dw_edma_irq_request(struct dw_edma_chip *chip,
>
> if (dw->nr_irqs == 1) {
> /* Common IRQ shared among all channels */
> - err = request_irq(dw->ops->irq_vector(dev, 0),
> - dw_edma_interrupt_common,
> + irq = dw->ops->irq_vector(dev, 0);
> + err = request_irq(irq, dw_edma_interrupt_common,
> IRQF_SHARED, dw->name, &dw->irq[0]);
> if (err) {
> dw->nr_irqs = 0;
> return err;
> }
>
> - get_cached_msi_msg(dw->ops->irq_vector(dev, 0),
> - &dw->irq[0].msi);
> + if (irq_get_msi_desc(irq))
> + get_cached_msi_msg(irq, &dw->irq[0].msi);
> } else {
> /* Distribute IRQs equally among all channels */
> int tmp = dw->nr_irqs;
> @@ -804,7 +806,8 @@ static int dw_edma_irq_request(struct dw_edma_chip *chip,
> dw_edma_add_irq_mask(&rd_mask, *rd_alloc, dw->rd_ch_cnt);
>
> for (i = 0; i < (*wr_alloc + *rd_alloc); i++) {
> - err = request_irq(dw->ops->irq_vector(dev, i),
> + irq = dw->ops->irq_vector(dev, i);
> + err = request_irq(irq,
> i < *wr_alloc ?
> dw_edma_interrupt_write :
> dw_edma_interrupt_read,
> @@ -815,8 +818,8 @@ static int dw_edma_irq_request(struct dw_edma_chip *chip,
> return err;
> }
>
> - get_cached_msi_msg(dw->ops->irq_vector(dev, i),
> - &dw->irq[i].msi);
> + if (irq_get_msi_desc(irq))
> + get_cached_msi_msg(irq, &dw->irq[i].msi);
> }
>
> dw->nr_irqs = i;
> --
> 2.7.4
Acked-by: Gustavo Pimentel <gustavo.pimentel@...opsys.com>
Powered by blists - more mailing lists