[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <87sgazgl0b.fsf@nanos.tec.linutronix.de>
Date: Wed, 30 Sep 2020 20:47:00 +0200
From: Thomas Gleixner <tglx@...utronix.de>
To: Dave Jiang <dave.jiang@...el.com>, vkoul@...nel.org,
megha.dey@...el.com, maz@...nel.org, bhelgaas@...gle.com,
alex.williamson@...hat.com, jacob.jun.pan@...el.com,
ashok.raj@...el.com, jgg@...lanox.com, yi.l.liu@...el.com,
baolu.lu@...el.com, kevin.tian@...el.com, sanjay.k.kumar@...el.com,
tony.luck@...el.com, jing.lin@...el.com, dan.j.williams@...el.com,
kwankhede@...dia.com, eric.auger@...hat.com, parav@...lanox.com,
jgg@...lanox.com, rafael@...nel.org, netanelg@...lanox.com,
shahafs@...lanox.com, yan.y.zhao@...ux.intel.com,
pbonzini@...hat.com, samuel.ortiz@...el.com, mona.hossain@...el.com
Cc: dmaengine@...r.kernel.org, linux-kernel@...r.kernel.org,
x86@...nel.org, linux-pci@...r.kernel.org, kvm@...r.kernel.org
Subject: Re: [PATCH v3 05/18] dmaengine: idxd: add IMS support in base driver
On Tue, Sep 15 2020 at 16:28, Dave Jiang wrote:
> struct idxd_device {
> @@ -170,6 +171,7 @@ struct idxd_device {
>
> int num_groups;
>
> + u32 ims_offset;
> u32 msix_perm_offset;
> u32 wqcfg_offset;
> u32 grpcfg_offset;
> @@ -177,6 +179,7 @@ struct idxd_device {
>
> u64 max_xfer_bytes;
> u32 max_batch_size;
> + int ims_size;
> int max_groups;
> int max_engines;
> int max_tokens;
> @@ -196,6 +199,7 @@ struct idxd_device {
> struct work_struct work;
>
> int *int_handles;
> + struct sbitmap ims_sbmap;
This bitmap is needed for what?
> --- a/drivers/dma/idxd/init.c
> +++ b/drivers/dma/idxd/init.c
> @@ -231,10 +231,51 @@ static void idxd_read_table_offsets(struct idxd_device *idxd)
> idxd->msix_perm_offset = offsets.msix_perm * 0x100;
> dev_dbg(dev, "IDXD MSIX Permission Offset: %#x\n",
> idxd->msix_perm_offset);
> + idxd->ims_offset = offsets.ims * 0x100;
Magic constant pulled out of thin air. #define ....
> + dev_dbg(dev, "IDXD IMS Offset: %#x\n", idxd->ims_offset);
> idxd->perfmon_offset = offsets.perfmon * 0x100;
> dev_dbg(dev, "IDXD Perfmon Offset: %#x\n", idxd->perfmon_offset);
> }
>
> +#define PCI_DEVSEC_CAP 0x23
> +#define SIOVDVSEC1(offset) ((offset) + 0x4)
> +#define SIOVDVSEC2(offset) ((offset) + 0x8)
> +#define DVSECID 0x5
> +#define SIOVCAP(offset) ((offset) + 0x14)
> +
> +static void idxd_check_siov(struct idxd_device *idxd)
> +{
> + struct pci_dev *pdev = idxd->pdev;
> + struct device *dev = &pdev->dev;
> + int dvsec;
> + u16 val16;
> + u32 val32;
> +
> + dvsec = pci_find_ext_capability(pdev, PCI_DEVSEC_CAP);
> + pci_read_config_word(pdev, SIOVDVSEC1(dvsec), &val16);
> + if (val16 != PCI_VENDOR_ID_INTEL) {
> + dev_dbg(&pdev->dev, "DVSEC vendor id is not Intel\n");
> + return;
> + }
> +
> + pci_read_config_word(pdev, SIOVDVSEC2(dvsec), &val16);
> + if (val16 != DVSECID) {
> + dev_dbg(&pdev->dev, "DVSEC ID is not SIOV\n");
> + return;
> + }
> +
> + pci_read_config_dword(pdev, SIOVCAP(dvsec), &val32);
> + if ((val32 & 0x1) && idxd->hw.gen_cap.max_ims_mult) {
> + idxd->ims_size = idxd->hw.gen_cap.max_ims_mult * 256ULL;
> + dev_dbg(dev, "IMS size: %u\n", idxd->ims_size);
> + set_bit(IDXD_FLAG_SIOV_SUPPORTED, &idxd->flags);
> + dev_dbg(&pdev->dev, "IMS supported for device\n");
> + return;
> + }
> +
> + dev_dbg(&pdev->dev, "SIOV unsupported for device\n");
It's really hard to find the code inside all of this dev_dbg()
noise. But why is this capability check done in this driver? Is this
capability stuff really IDXD specific or is the next device which
supports this going to copy and pasta the above?
> static void idxd_read_caps(struct idxd_device *idxd)
> {
> struct device *dev = &idxd->pdev->dev;
> @@ -253,6 +294,7 @@ static void idxd_read_caps(struct idxd_device *idxd)
> dev_dbg(dev, "max xfer size: %llu bytes\n", idxd->max_xfer_bytes);
> idxd->max_batch_size = 1U << idxd->hw.gen_cap.max_batch_shift;
> dev_dbg(dev, "max batch size: %u\n", idxd->max_batch_size);
> + idxd_check_siov(idxd);
> if (idxd->hw.gen_cap.config_en)
> set_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags);
>
> @@ -347,9 +389,19 @@ static int idxd_probe(struct idxd_device *idxd)
>
> idxd->major = idxd_cdev_get_major(idxd);
>
> + if (idxd->ims_size) {
> + rc = sbitmap_init_node(&idxd->ims_sbmap, idxd->ims_size, -1,
> + GFP_KERNEL, dev_to_node(dev));
> + if (rc < 0)
> + goto sbitmap_fail;
> + }
Ah, here the bitmap is allocated, but it's still completely unclear what
it is used for.
The subject line is misleading as hell. This does not add support, it's
doing some magic capability checks and allocates stuff which nobody
knows what it is used for.
Thanks,
tglx
Powered by blists - more mailing lists