[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20220110005612-mutt-send-email-mst@kernel.org>
Date: Mon, 10 Jan 2022 01:04:32 -0500
From: "Michael S. Tsirkin" <mst@...hat.com>
To: Zhu Lingshan <lingshan.zhu@...el.com>
Cc: jasowang@...hat.com, netdev@...r.kernel.org
Subject: Re: [PATCH 7/7] vDPA/ifcvf: improve irq requester, to handle
per_vq/shared/config irq
On Mon, Jan 10, 2022 at 01:18:51PM +0800, Zhu Lingshan wrote:
> This commit expends irq requester abilities to handle per vq irq,
> shared irq and config irq.
>
> On some platforms, the device can not get enough vectors for every
> virtqueue and config interrupt, the device needs to work under such
> circumstances.
>
> Normally a device can get enough vectors, so every virtqueue and
> config interrupt can have its own vector/irq. If the total vector
> number is less than all virtqueues + 1(config interrupt), all
> virtqueues need to share a vector/irq and config interrupt is
> enabled. If the total vector number < 2, all vitequeues share
> a vector/irq, and config interrupt is disabled. Otherwise it will
> fail if allocation for vectors fails.
>
> This commit also made necessary chages to the irq cleaner to
> free per vq irq/shared irq and config irq.
>
> Signed-off-by: Zhu Lingshan <lingshan.zhu@...el.com>
In this case, shouldn't you also check VIRTIO_PCI_ISR_CONFIG?
doing that will skip the need
> ---
> drivers/vdpa/ifcvf/ifcvf_base.h | 6 +--
> drivers/vdpa/ifcvf/ifcvf_main.c | 78 +++++++++++++++------------------
> 2 files changed, 38 insertions(+), 46 deletions(-)
>
> diff --git a/drivers/vdpa/ifcvf/ifcvf_base.h b/drivers/vdpa/ifcvf/ifcvf_base.h
> index 1d5431040d7d..1d0afb63f06c 100644
> --- a/drivers/vdpa/ifcvf/ifcvf_base.h
> +++ b/drivers/vdpa/ifcvf/ifcvf_base.h
> @@ -27,8 +27,6 @@
>
> #define IFCVF_QUEUE_ALIGNMENT PAGE_SIZE
> #define IFCVF_QUEUE_MAX 32768
> -#define IFCVF_MSI_CONFIG_OFF 0
> -#define IFCVF_MSI_QUEUE_OFF 1
> #define IFCVF_PCI_MAX_RESOURCE 6
>
> #define IFCVF_LM_CFG_SIZE 0x40
> @@ -102,11 +100,13 @@ struct ifcvf_hw {
> u8 notify_bar;
> /* Notificaiton bar address */
> void __iomem *notify_base;
> + u8 vector_per_vq;
> + u16 padding;
What is this padding doing?
> phys_addr_t notify_base_pa;
> u32 notify_off_multiplier;
> + u32 dev_type;
> u64 req_features;
> u64 hw_features;
> - u32 dev_type;
moving things around ... optimization? split out.
> struct virtio_pci_common_cfg __iomem *common_cfg;
> void __iomem *net_cfg;
> struct vring_info vring[IFCVF_MAX_QUEUES];
> diff --git a/drivers/vdpa/ifcvf/ifcvf_main.c b/drivers/vdpa/ifcvf/ifcvf_main.c
> index 414b5dfd04ca..ec76e342bd7e 100644
> --- a/drivers/vdpa/ifcvf/ifcvf_main.c
> +++ b/drivers/vdpa/ifcvf/ifcvf_main.c
> @@ -17,6 +17,8 @@
> #define DRIVER_AUTHOR "Intel Corporation"
> #define IFCVF_DRIVER_NAME "ifcvf"
>
> +static struct vdpa_config_ops ifc_vdpa_ops;
> +
there can be multiple devices thinkably.
reusing a global ops does not sound reasonable.
> static irqreturn_t ifcvf_config_changed(int irq, void *arg)
> {
> struct ifcvf_hw *vf = arg;
> @@ -63,13 +65,20 @@ static void ifcvf_free_irq(struct ifcvf_adapter *adapter, int queues)
> struct ifcvf_hw *vf = &adapter->vf;
> int i;
>
> + if (vf->vector_per_vq)
> + for (i = 0; i < queues; i++) {
> + devm_free_irq(&pdev->dev, vf->vring[i].irq, &vf->vring[i]);
> + vf->vring[i].irq = -EINVAL;
> + }
> + else
> + devm_free_irq(&pdev->dev, vf->vring[0].irq, vf);
>
> - for (i = 0; i < queues; i++) {
> - devm_free_irq(&pdev->dev, vf->vring[i].irq, &vf->vring[i]);
> - vf->vring[i].irq = -EINVAL;
> +
> + if (vf->config_irq != -EINVAL) {
> + devm_free_irq(&pdev->dev, vf->config_irq, vf);
> + vf->config_irq = -EINVAL;
> }
what about other error types?
>
> - devm_free_irq(&pdev->dev, vf->config_irq, vf);
> ifcvf_free_irq_vectors(pdev);
> }
>
> @@ -191,52 +200,35 @@ static int ifcvf_request_config_irq(struct ifcvf_adapter *adapter, int config_ve
>
> static int ifcvf_request_irq(struct ifcvf_adapter *adapter)
> {
> - struct pci_dev *pdev = adapter->pdev;
> struct ifcvf_hw *vf = &adapter->vf;
> - int vector, i, ret, irq;
> - u16 max_intr;
> + u16 nvectors, max_vectors;
> + int config_vector, ret;
>
> - /* all queues and config interrupt */
> - max_intr = vf->nr_vring + 1;
> + nvectors = ifcvf_alloc_vectors(adapter);
> + if (nvectors < 0)
> + return nvectors;
>
> - ret = pci_alloc_irq_vectors(pdev, max_intr,
> - max_intr, PCI_IRQ_MSIX);
> - if (ret < 0) {
> - IFCVF_ERR(pdev, "Failed to alloc IRQ vectors\n");
> - return ret;
> - }
> + vf->vector_per_vq = true;
> + max_vectors = vf->nr_vring + 1;
> + config_vector = vf->nr_vring;
>
> - snprintf(vf->config_msix_name, 256, "ifcvf[%s]-config\n",
> - pci_name(pdev));
> - vector = 0;
> - vf->config_irq = pci_irq_vector(pdev, vector);
> - ret = devm_request_irq(&pdev->dev, vf->config_irq,
> - ifcvf_config_changed, 0,
> - vf->config_msix_name, vf);
> - if (ret) {
> - IFCVF_ERR(pdev, "Failed to request config irq\n");
> - return ret;
> + if (nvectors < max_vectors) {
> + vf->vector_per_vq = false;
> + config_vector = 1;
> + ifc_vdpa_ops.get_vq_irq = NULL;
> }
>
> - for (i = 0; i < vf->nr_vring; i++) {
> - snprintf(vf->vring[i].msix_name, 256, "ifcvf[%s]-%d\n",
> - pci_name(pdev), i);
> - vector = i + IFCVF_MSI_QUEUE_OFF;
> - irq = pci_irq_vector(pdev, vector);
> - ret = devm_request_irq(&pdev->dev, irq,
> - ifcvf_intr_handler, 0,
> - vf->vring[i].msix_name,
> - &vf->vring[i]);
> - if (ret) {
> - IFCVF_ERR(pdev,
> - "Failed to request irq for vq %d\n", i);
> - ifcvf_free_irq(adapter, i);
> + if (nvectors < 2)
> + config_vector = 0;
>
> - return ret;
> - }
> + ret = ifcvf_request_vq_irq(adapter, vf->vector_per_vq);
> + if (ret)
> + return ret;
>
> - vf->vring[i].irq = irq;
> - }
> + ret = ifcvf_request_config_irq(adapter, config_vector);
> +
> + if (ret)
> + return ret;
here on error we need to cleanup vq irq we requested, need we not?
>
> return 0;
> }
> @@ -573,7 +565,7 @@ static struct vdpa_notification_area ifcvf_get_vq_notification(struct vdpa_devic
> * IFCVF currently does't have on-chip IOMMU, so not
> * implemented set_map()/dma_map()/dma_unmap()
> */
> -static const struct vdpa_config_ops ifc_vdpa_ops = {
> +static struct vdpa_config_ops ifc_vdpa_ops = {
> .get_features = ifcvf_vdpa_get_features,
> .set_features = ifcvf_vdpa_set_features,
> .get_status = ifcvf_vdpa_get_status,
> --
> 2.27.0
Powered by blists - more mailing lists