[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <kdwwyx4digujlculbu4ivyiedqbmivuuddpfadpp7tup2gmvjv@6j53b4wqxbd7>
Date: Mon, 9 Dec 2024 10:34:14 +0100
From: Krzysztof Kozlowski <krzk@...nel.org>
To: Chen Wang <unicornxw@...il.com>
Cc: u.kleine-koenig@...libre.com, aou@...s.berkeley.edu, arnd@...db.de,
unicorn_wang@...look.com, conor+dt@...nel.org, guoren@...nel.org, inochiama@...look.com,
krzk+dt@...nel.org, palmer@...belt.com, paul.walmsley@...ive.com, robh@...nel.org,
tglx@...utronix.de, devicetree@...r.kernel.org, linux-kernel@...r.kernel.org,
linux-riscv@...ts.infradead.org, chao.wei@...hgo.com, xiaoguang.xing@...hgo.com,
fengchun.li@...hgo.com
Subject: Re: [PATCH v2 2/3] irqchip: Add the Sophgo SG2042 MSI interrupt
controller
On Mon, Dec 09, 2024 at 03:12:00PM +0800, Chen Wang wrote:
> +static void sg2042_msi_irq_ack(struct irq_data *d)
> +{
> + struct sg2042_msi_data *data = irq_data_get_irq_chip_data(d);
> + int bit_off = d->hwirq - data->irq_first;
> +
> + writel(1 << bit_off, (unsigned int *)data->reg_clr);
> +
> + irq_chip_ack_parent(d);
> +}
> +
> +static void sg2042_msi_irq_compose_msi_msg(struct irq_data *data,
> + struct msi_msg *msg)
> +{
> + struct sg2042_msi_data *priv = irq_data_get_irq_chip_data(data);
> +
> + msg->address_hi = upper_32_bits(priv->doorbell_addr);
> + msg->address_lo = lower_32_bits(priv->doorbell_addr);
> + msg->data = 1 << (data->hwirq - priv->irq_first);
> +
> + pr_debug("%s hwirq[%ld]: address_hi[%#x], address_lo[%#x], data[%#x]\n",
> + __func__, data->hwirq, msg->address_hi, msg->address_lo, msg->data);
Don't print addresses, it is useless - it will be a constant.
> +}
> +
> +static struct irq_chip sg2042_msi_middle_irq_chip = {
> + .name = "SG2042 MSI",
> + .irq_ack = sg2042_msi_irq_ack,
> + .irq_mask = irq_chip_mask_parent,
> + .irq_unmask = irq_chip_unmask_parent,
> +#ifdef CONFIG_SMP
> + .irq_set_affinity = irq_chip_set_affinity_parent,
> +#endif
> + .irq_compose_msi_msg = sg2042_msi_irq_compose_msi_msg,
> +};
...
> +static int sg2042_msi_probe(struct platform_device *pdev)
> +{
> + struct of_phandle_args args = {};
> + struct sg2042_msi_data *data;
> + int ret;
> +
> + data = devm_kzalloc(&pdev->dev, sizeof(struct sg2042_msi_data), GFP_KERNEL);
> + if (!data)
> + return -ENOMEM;
> +
> + data->reg_clr = devm_platform_ioremap_resource_byname(pdev, "clr");
> + if (IS_ERR(data->reg_clr)) {
> + dev_err(&pdev->dev, "Failed to map clear register\n");
> + return PTR_ERR(data->reg_clr);
> + }
> +
> + if (of_property_read_u64(pdev->dev.of_node, "sophgo,msi-doorbell-addr",
> + &data->doorbell_addr)) {
> + dev_err(&pdev->dev, "Unable to parse MSI doorbell addr\n");
> + return -EINVAL;
> + }
> +
> + ret = of_parse_phandle_with_args(pdev->dev.of_node, "msi-ranges",
> + "#interrupt-cells", 0, &args);
> + if (ret) {
> + dev_err(&pdev->dev, "Unable to parse MSI vec base\n");
> + return ret;
> + }
You leak the phandle. You leak much more, btw...
> + data->irq_first = (u32)args.args[0];
> +
> + ret = of_property_read_u32_index(pdev->dev.of_node, "msi-ranges",
> + args.args_count + 1, &data->num_irqs);
> + if (ret) {
> + dev_err(&pdev->dev, "Unable to parse MSI vec number\n");
> + return ret;
> + }
> +
> + if (data->irq_first < SG2042_VECTOR_MIN ||
> + (data->irq_first + data->num_irqs - 1) > SG2042_VECTOR_MAX) {
> + dev_err(&pdev->dev, "msi-ranges is incorrect!\n");
> + return -EINVAL;
> + }
> +
> + mutex_init(&data->msi_map_lock);
> +
> + data->msi_map = bitmap_zalloc(data->num_irqs, GFP_KERNEL);
This also leaks during removal.
> + if (!data->msi_map)
> + return -ENOMEM;
> +
> + ret = sg2042_msi_init_domains(data, pdev->dev.of_node);
> + if (ret)
> + bitmap_free(data->msi_map);
> +
> + return ret;
> +}
> +
> +static const struct of_device_id sg2042_msi_of_match[] = {
> + { .compatible = "sophgo,sg2042-msi" },
> + {}
> +};
> +
> +static struct platform_driver sg2042_msi_driver = {
> + .driver = {
> + .name = "sg2042-msi",
> + .of_match_table = of_match_ptr(sg2042_msi_of_match),
Drop of_match_ptr(), unnecessary and might lead to warnings even if this
is not a module.
Best regards,
Krzysztof
Powered by blists - more mailing lists