lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [day] [month] [year] [list]
Message-ID:
 <SA1PR12MB8120E42BCD62469B275102BF95DBA@SA1PR12MB8120.namprd12.prod.outlook.com>
Date: Mon, 1 Dec 2025 09:58:06 +0000
From: "Verma, Devendra" <Devendra.Verma@....com>
To: "Verma, Devendra" <Devendra.Verma@....com>, "bhelgaas@...gle.com"
	<bhelgaas@...gle.com>, "mani@...nel.org" <mani@...nel.org>,
	"vkoul@...nel.org" <vkoul@...nel.org>
CC: "dmaengine@...r.kernel.org" <dmaengine@...r.kernel.org>,
	"linux-pci@...r.kernel.org" <linux-pci@...r.kernel.org>,
	"linux-kernel@...r.kernel.org" <linux-kernel@...r.kernel.org>, "Simek,
 Michal" <michal.simek@....com>
Subject: RE: [PATCH RESEND v6 1/2] dmaengine: dw-edma: Add AMD MDB Endpoint
 Support

[AMD Official Use Only - AMD Internal Distribution Only]

Hi All

Could you all please review the following patch?

Regards,
Dev

> -----Original Message-----
> From: Devendra K Verma <devendra.verma@....com>
> Sent: Friday, November 21, 2025 5:05 PM
> To: bhelgaas@...gle.com; mani@...nel.org; vkoul@...nel.org
> Cc: dmaengine@...r.kernel.org; linux-pci@...r.kernel.org; linux-
> kernel@...r.kernel.org; Simek, Michal <michal.simek@....com>; Verma,
> Devendra <Devendra.Verma@....com>
> Subject: [PATCH RESEND v6 1/2] dmaengine: dw-edma: Add AMD MDB
> Endpoint Support
>
> AMD MDB PCIe endpoint support. For AMD specific support added the
> following
>   - AMD supported PCIe Device IDs and Vendor ID (Xilinx).
>   - AMD MDB specific driver data
>   - AMD MDB specific VSEC capability to retrieve the device DDR
>     base address.
>
> Signed-off-by: Devendra K Verma <devendra.verma@....com>
> ---
> Changes in v6:
> Included "sizes.h" header and used the appropriate definitions instead of
> constants.
>
> Changes in v5:
> Added the definitions for Xilinx specific VSEC header id, revision, and register
> offsets.
> Corrected the error type when no physical offset found for device side
> memory.
> Corrected the order of variables.
>
> Changes in v4:
> Configured 8 read and 8 write channels for Xilinx vendor Added checks to
> validate vendor ID for vendor specific vsec id.
> Added Xilinx specific vendor id for vsec specific to Xilinx Added the LL and
> data region offsets, size as input params to function
> dw_edma_set_chan_region_offset().
> Moved the LL and data region offsets assignment to function for Xilinx specific
> case.
> Corrected comments.
>
> Changes in v3:
> Corrected a typo when assigning AMD (Xilinx) vsec id macro and condition
> check.
>
> Changes in v2:
> Reverted the devmem_phys_off type to u64.
> Renamed the function appropriately to suit the functionality for setting the LL
> & data region offsets.
>
> Changes in v1:
> Removed the pci device id from pci_ids.h file.
> Added the vendor id macro as per the suggested method.
> Changed the type of the newly added devmem_phys_off variable.
> Added to logic to assign offsets for LL and data region blocks in case more
> number of channels are enabled than given in amd_mdb_data struct.
> ---
>  drivers/dma/dw-edma/dw-edma-pcie.c | 139
> ++++++++++++++++++++++++++++++++++++-
>  1 file changed, 137 insertions(+), 2 deletions(-)
>
> diff --git a/drivers/dma/dw-edma/dw-edma-pcie.c b/drivers/dma/dw-
> edma/dw-edma-pcie.c
> index 3371e0a7..3d7247c 100644
> --- a/drivers/dma/dw-edma/dw-edma-pcie.c
> +++ b/drivers/dma/dw-edma/dw-edma-pcie.c
> @@ -14,15 +14,31 @@
>  #include <linux/pci-epf.h>
>  #include <linux/msi.h>
>  #include <linux/bitfield.h>
> +#include <linux/sizes.h>
>
>  #include "dw-edma-core.h"
>
> +/* Synopsys */
>  #define DW_PCIE_VSEC_DMA_ID                  0x6
>  #define DW_PCIE_VSEC_DMA_BAR                 GENMASK(10, 8)
>  #define DW_PCIE_VSEC_DMA_MAP                 GENMASK(2, 0)
>  #define DW_PCIE_VSEC_DMA_WR_CH                       GENMASK(9, 0)
>  #define DW_PCIE_VSEC_DMA_RD_CH                       GENMASK(25, 16)
>
> +/* AMD MDB (Xilinx) specific defines */
> +#define DW_PCIE_XILINX_MDB_VSEC_DMA_ID               0x6
> +#define DW_PCIE_XILINX_MDB_VSEC_ID           0x20
> +#define PCI_DEVICE_ID_AMD_MDB_B054           0xb054
> +#define DW_PCIE_AMD_MDB_INVALID_ADDR         (~0ULL)
> +#define DW_PCIE_XILINX_LL_OFF_GAP            0x200000
> +#define DW_PCIE_XILINX_LL_SIZE                       0x800
> +#define DW_PCIE_XILINX_DT_OFF_GAP            0x100000
> +#define DW_PCIE_XILINX_DT_SIZE                       0x800
> +#define DW_PCIE_XILINX_MDB_VSEC_HDR_ID               0x20
> +#define DW_PCIE_XILINX_MDB_VSEC_REV          0x1
> +#define DW_PCIE_XILINX_MDB_DEVMEM_OFF_REG_HIGH       0xc
> +#define DW_PCIE_XILINX_MDB_DEVMEM_OFF_REG_LOW        0x8
> +
>  #define DW_BLOCK(a, b, c) \
>       { \
>               .bar = a, \
> @@ -50,6 +66,7 @@ struct dw_edma_pcie_data {
>       u8                              irqs;
>       u16                             wr_ch_cnt;
>       u16                             rd_ch_cnt;
> +     u64                             devmem_phys_off;
>  };
>
>  static const struct dw_edma_pcie_data snps_edda_data = { @@ -90,6
> +107,64 @@ struct dw_edma_pcie_data {
>       .rd_ch_cnt                      = 2,
>  };
>
> +static const struct dw_edma_pcie_data amd_mdb_data = {
> +     /* MDB registers location */
> +     .rg.bar                         = BAR_0,
> +     .rg.off                         = SZ_4K,        /*  4 Kbytes */
> +     .rg.sz                          = SZ_8K,        /*  8 Kbytes */
> +
> +     /* Other */
> +     .mf                             = EDMA_MF_HDMA_NATIVE,
> +     .irqs                           = 1,
> +     .wr_ch_cnt                      = 8,
> +     .rd_ch_cnt                      = 8,
> +};
> +
> +static void dw_edma_set_chan_region_offset(struct dw_edma_pcie_data
> *pdata,
> +                                        enum pci_barno bar, off_t start_off,
> +                                        off_t ll_off_gap, size_t ll_size,
> +                                        off_t dt_off_gap, size_t dt_size) {
> +     u16 wr_ch = pdata->wr_ch_cnt;
> +     u16 rd_ch = pdata->rd_ch_cnt;
> +     off_t off;
> +     u16 i;
> +
> +     off = start_off;
> +
> +     /* Write channel LL region */
> +     for (i = 0; i < wr_ch; i++) {
> +             pdata->ll_wr[i].bar = bar;
> +             pdata->ll_wr[i].off = off;
> +             pdata->ll_wr[i].sz = ll_size;
> +             off += ll_off_gap;
> +     }
> +
> +     /* Read channel LL region */
> +     for (i = 0; i < rd_ch; i++) {
> +             pdata->ll_rd[i].bar = bar;
> +             pdata->ll_rd[i].off = off;
> +             pdata->ll_rd[i].sz = ll_size;
> +             off += ll_off_gap;
> +     }
> +
> +     /* Write channel data region */
> +     for (i = 0; i < wr_ch; i++) {
> +             pdata->dt_wr[i].bar = bar;
> +             pdata->dt_wr[i].off = off;
> +             pdata->dt_wr[i].sz = dt_size;
> +             off += dt_off_gap;
> +     }
> +
> +     /* Read channel data region */
> +     for (i = 0; i < rd_ch; i++) {
> +             pdata->dt_rd[i].bar = bar;
> +             pdata->dt_rd[i].off = off;
> +             pdata->dt_rd[i].sz = dt_size;
> +             off += dt_off_gap;
> +     }
> +}
> +
>  static int dw_edma_pcie_irq_vector(struct device *dev, unsigned int nr)  {
>       return pci_irq_vector(to_pci_dev(dev), nr); @@ -120,9 +195,24 @@
> static void dw_edma_pcie_get_vsec_dma_data(struct pci_dev *pdev,
>       u32 val, map;
>       u16 vsec;
>       u64 off;
> +     int cap;
> +
> +     /*
> +      * Synopsys and AMD (Xilinx) use the same VSEC ID for the purpose
> +      * of map, channel counts, etc.
> +      */
> +     switch (pdev->vendor) {
> +     case PCI_VENDOR_ID_SYNOPSYS:
> +             cap = DW_PCIE_VSEC_DMA_ID;
> +             break;
> +     case PCI_VENDOR_ID_XILINX:
> +             cap = DW_PCIE_XILINX_MDB_VSEC_DMA_ID;
> +             break;
> +     default:
> +             return;
> +     }
>
> -     vsec = pci_find_vsec_capability(pdev, PCI_VENDOR_ID_SYNOPSYS,
> -                                     DW_PCIE_VSEC_DMA_ID);
> +     vsec = pci_find_vsec_capability(pdev, pdev->vendor, cap);
>       if (!vsec)
>               return;
>
> @@ -155,6 +245,28 @@ static void
> dw_edma_pcie_get_vsec_dma_data(struct pci_dev *pdev,
>       off <<= 32;
>       off |= val;
>       pdata->rg.off = off;
> +
> +     /* Xilinx specific VSEC capability */
> +     vsec = pci_find_vsec_capability(pdev, PCI_VENDOR_ID_XILINX,
> +                                     DW_PCIE_XILINX_MDB_VSEC_ID);
> +     if (!vsec)
> +             return;
> +
> +     pci_read_config_dword(pdev, vsec + PCI_VNDR_HEADER, &val);
> +     if (PCI_VNDR_HEADER_ID(val) !=
> DW_PCIE_XILINX_MDB_VSEC_HDR_ID ||
> +         PCI_VNDR_HEADER_REV(val) != DW_PCIE_XILINX_MDB_VSEC_REV)
> +             return;
> +
> +     pci_read_config_dword(pdev,
> +                           vsec +
> DW_PCIE_XILINX_MDB_DEVMEM_OFF_REG_HIGH,
> +                           &val);
> +     off = val;
> +     pci_read_config_dword(pdev,
> +                           vsec +
> DW_PCIE_XILINX_MDB_DEVMEM_OFF_REG_LOW,
> +                           &val);
> +     off <<= 32;
> +     off |= val;
> +     pdata->devmem_phys_off = off;
>  }
>
>  static int dw_edma_pcie_probe(struct pci_dev *pdev, @@ -179,6 +291,7 @@
> static int dw_edma_pcie_probe(struct pci_dev *pdev,
>       }
>
>       memcpy(vsec_data, pdata, sizeof(struct dw_edma_pcie_data));
> +     vsec_data->devmem_phys_off =
> DW_PCIE_AMD_MDB_INVALID_ADDR;
>
>       /*
>        * Tries to find if exists a PCIe Vendor-Specific Extended Capability
> @@ -186,6 +299,26 @@ static int dw_edma_pcie_probe(struct pci_dev
> *pdev,
>        */
>       dw_edma_pcie_get_vsec_dma_data(pdev, vsec_data);
>
> +     if (pdev->vendor == PCI_VENDOR_ID_XILINX) {
> +             /*
> +              * There is no valid address found for the LL memory
> +              * space on the device side.
> +              */
> +             if (vsec_data->devmem_phys_off ==
> DW_PCIE_AMD_MDB_INVALID_ADDR)
> +                     return -ENOMEM;
> +
> +             /*
> +              * Configure the channel LL and data blocks if number of
> +              * channels enabled in VSEC capability are more than the
> +              * channels configured in amd_mdb_data.
> +              */
> +             dw_edma_set_chan_region_offset(vsec_data, BAR_2, 0,
> +                                            DW_PCIE_XILINX_LL_OFF_GAP,
> +                                            DW_PCIE_XILINX_LL_SIZE,
> +                                            DW_PCIE_XILINX_DT_OFF_GAP,
> +                                            DW_PCIE_XILINX_DT_SIZE);
> +     }
> +
>       /* Mapping PCI BAR regions */
>       mask = BIT(vsec_data->rg.bar);
>       for (i = 0; i < vsec_data->wr_ch_cnt; i++) { @@ -367,6 +500,8 @@
> static void dw_edma_pcie_remove(struct pci_dev *pdev)
>
>  static const struct pci_device_id dw_edma_pcie_id_table[] = {
>       { PCI_DEVICE_DATA(SYNOPSYS, EDDA, &snps_edda_data) },
> +     { PCI_VDEVICE(XILINX, PCI_DEVICE_ID_AMD_MDB_B054),
> +       (kernel_ulong_t)&amd_mdb_data },
>       { }
>  };
>  MODULE_DEVICE_TABLE(pci, dw_edma_pcie_id_table);
> --
> 1.8.3.1


Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ