[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <a550db5b-a36d-982c-0783-134abdeb1f70@linux.intel.com>
Date: Tue, 28 Oct 2025 15:28:09 +0200 (EET)
From: Ilpo Järvinen <ilpo.jarvinen@...ux.intel.com>
To: Devendra K Verma <devendra.verma@....com>
cc: bhelgaas@...gle.com, mani@...nel.org, vkoul@...nel.org,
dmaengine@...r.kernel.org, linux-pci@...r.kernel.org,
LKML <linux-kernel@...r.kernel.org>, michal.simek@....com
Subject: Re: [PATCH v5 1/2] dmaengine: dw-edma: Add AMD MDB Endpoint
Support
On Tue, 28 Oct 2025, Devendra K Verma wrote:
> AMD MDB PCIe endpoint support. For AMD specific support
> added the following
> - AMD supported PCIe Device IDs and Vendor ID (Xilinx).
> - AMD MDB specific driver data
> - AMD MDB specific VSEC capability to retrieve the device DDR
> base address.
>
> Signed-off-by: Devendra K Verma <devendra.verma@....com>
> ---
> Changes in v5:
> Added the definitions for Xilinx specific VSEC header id,
> revision, and register offsets.
> Corrected the error type when no physical offset found for
> device side memory.
> Corrected the order of variables.
>
> Changes in v4:
> Configured 8 read and 8 write channels for Xilinx vendor
> Added checks to validate vendor ID for vendor
> specific vsec id.
> Added Xilinx specific vendor id for vsec specific to Xilinx
> Added the LL and data region offsets, size as input params to
> function dw_edma_set_chan_region_offset().
> Moved the LL and data region offsets assignment to function
> for Xilinx specific case.
> Corrected comments.
>
> Changes in v3:
> Corrected a typo when assigning AMD (Xilinx) vsec id macro
> and condition check.
>
> Changes in v2:
> Reverted the devmem_phys_off type to u64.
> Renamed the function appropriately to suit the
> functionality for setting the LL & data region offsets.
>
> Changes in v1:
> Removed the pci device id from pci_ids.h file.
> Added the vendor id macro as per the suggested method.
> Changed the type of the newly added devmem_phys_off variable.
> Added to logic to assign offsets for LL and data region blocks
> in case more number of channels are enabled than given in
> amd_mdb_data struct.
> ---
> drivers/dma/dw-edma/dw-edma-pcie.c | 138 ++++++++++++++++++++++++++++++++++++-
> 1 file changed, 136 insertions(+), 2 deletions(-)
>
> diff --git a/drivers/dma/dw-edma/dw-edma-pcie.c b/drivers/dma/dw-edma/dw-edma-pcie.c
> index 3371e0a7..7b991a0 100644
> --- a/drivers/dma/dw-edma/dw-edma-pcie.c
> +++ b/drivers/dma/dw-edma/dw-edma-pcie.c
> @@ -17,12 +17,27 @@
>
> #include "dw-edma-core.h"
>
> +/* Synopsys */
> #define DW_PCIE_VSEC_DMA_ID 0x6
> #define DW_PCIE_VSEC_DMA_BAR GENMASK(10, 8)
> #define DW_PCIE_VSEC_DMA_MAP GENMASK(2, 0)
> #define DW_PCIE_VSEC_DMA_WR_CH GENMASK(9, 0)
> #define DW_PCIE_VSEC_DMA_RD_CH GENMASK(25, 16)
>
> +/* AMD MDB (Xilinx) specific defines */
> +#define DW_PCIE_XILINX_MDB_VSEC_DMA_ID 0x6
> +#define DW_PCIE_XILINX_MDB_VSEC_ID 0x20
> +#define PCI_DEVICE_ID_AMD_MDB_B054 0xb054
> +#define DW_PCIE_AMD_MDB_INVALID_ADDR (~0ULL)
> +#define DW_PCIE_XILINX_LL_OFF_GAP 0x200000
> +#define DW_PCIE_XILINX_LL_SIZE 0x800
> +#define DW_PCIE_XILINX_DT_OFF_GAP 0x100000
> +#define DW_PCIE_XILINX_DT_SIZE 0x800
> +#define DW_PCIE_XILINX_MDB_VSEC_HDR_ID 0x20
> +#define DW_PCIE_XILINX_MDB_VSEC_REV 0x1
> +#define DW_PCIE_XILINX_MDB_DEVMEM_OFF_REG_HIGH 0xc
> +#define DW_PCIE_XILINX_MDB_DEVMEM_OFF_REG_LOW 0x8
> +
> #define DW_BLOCK(a, b, c) \
> { \
> .bar = a, \
> @@ -50,6 +65,7 @@ struct dw_edma_pcie_data {
> u8 irqs;
> u16 wr_ch_cnt;
> u16 rd_ch_cnt;
> + u64 devmem_phys_off;
> };
>
> static const struct dw_edma_pcie_data snps_edda_data = {
> @@ -90,6 +106,64 @@ struct dw_edma_pcie_data {
> .rd_ch_cnt = 2,
> };
>
> +static const struct dw_edma_pcie_data amd_mdb_data = {
> + /* MDB registers location */
> + .rg.bar = BAR_0,
> + .rg.off = 0x00001000, /* 4 Kbytes */
> + .rg.sz = 0x00002000, /* 8 Kbytes */
Please use SZ_* + check that this file #includes correct header for them.
You can then drop those comments.
--
i.
> +
> + /* Other */
> + .mf = EDMA_MF_HDMA_NATIVE,
> + .irqs = 1,
> + .wr_ch_cnt = 8,
> + .rd_ch_cnt = 8,
> +};
> +
> +static void dw_edma_set_chan_region_offset(struct dw_edma_pcie_data *pdata,
> + enum pci_barno bar, off_t start_off,
> + off_t ll_off_gap, size_t ll_size,
> + off_t dt_off_gap, size_t dt_size)
> +{
> + u16 wr_ch = pdata->wr_ch_cnt;
> + u16 rd_ch = pdata->rd_ch_cnt;
> + off_t off;
> + u16 i;
> +
> + off = start_off;
> +
> + /* Write channel LL region */
> + for (i = 0; i < wr_ch; i++) {
> + pdata->ll_wr[i].bar = bar;
> + pdata->ll_wr[i].off = off;
> + pdata->ll_wr[i].sz = ll_size;
> + off += ll_off_gap;
> + }
> +
> + /* Read channel LL region */
> + for (i = 0; i < rd_ch; i++) {
> + pdata->ll_rd[i].bar = bar;
> + pdata->ll_rd[i].off = off;
> + pdata->ll_rd[i].sz = ll_size;
> + off += ll_off_gap;
> + }
> +
> + /* Write channel data region */
> + for (i = 0; i < wr_ch; i++) {
> + pdata->dt_wr[i].bar = bar;
> + pdata->dt_wr[i].off = off;
> + pdata->dt_wr[i].sz = dt_size;
> + off += dt_off_gap;
> + }
> +
> + /* Read channel data region */
> + for (i = 0; i < rd_ch; i++) {
> + pdata->dt_rd[i].bar = bar;
> + pdata->dt_rd[i].off = off;
> + pdata->dt_rd[i].sz = dt_size;
> + off += dt_off_gap;
> + }
> +}
> +
> static int dw_edma_pcie_irq_vector(struct device *dev, unsigned int nr)
> {
> return pci_irq_vector(to_pci_dev(dev), nr);
> @@ -120,9 +194,24 @@ static void dw_edma_pcie_get_vsec_dma_data(struct pci_dev *pdev,
> u32 val, map;
> u16 vsec;
> u64 off;
> + int cap;
>
> - vsec = pci_find_vsec_capability(pdev, PCI_VENDOR_ID_SYNOPSYS,
> - DW_PCIE_VSEC_DMA_ID);
> + /*
> + * Synopsys and AMD (Xilinx) use the same VSEC ID for the purpose
> + * of map, channel counts, etc.
> + */
> + switch (pdev->vendor) {
> + case PCI_VENDOR_ID_SYNOPSYS:
> + cap = DW_PCIE_VSEC_DMA_ID;
> + break;
> + case PCI_VENDOR_ID_XILINX:
> + cap = DW_PCIE_XILINX_MDB_VSEC_DMA_ID;
> + break;
> + default:
> + return;
> + }
> +
> + vsec = pci_find_vsec_capability(pdev, pdev->vendor, cap);
> if (!vsec)
> return;
>
> @@ -155,6 +244,28 @@ static void dw_edma_pcie_get_vsec_dma_data(struct pci_dev *pdev,
> off <<= 32;
> off |= val;
> pdata->rg.off = off;
> +
> + /* Xilinx specific VSEC capability */
> + vsec = pci_find_vsec_capability(pdev, PCI_VENDOR_ID_XILINX,
> + DW_PCIE_XILINX_MDB_VSEC_ID);
> + if (!vsec)
> + return;
> +
> + pci_read_config_dword(pdev, vsec + PCI_VNDR_HEADER, &val);
> + if (PCI_VNDR_HEADER_ID(val) != DW_PCIE_XILINX_MDB_VSEC_HDR_ID ||
> + PCI_VNDR_HEADER_REV(val) != DW_PCIE_XILINX_MDB_VSEC_REV)
> + return;
> +
> + pci_read_config_dword(pdev,
> + vsec + DW_PCIE_XILINX_MDB_DEVMEM_OFF_REG_HIGH,
> + &val);
> + off = val;
> + pci_read_config_dword(pdev,
> + vsec + DW_PCIE_XILINX_MDB_DEVMEM_OFF_REG_LOW,
> + &val);
> + off <<= 32;
> + off |= val;
> + pdata->devmem_phys_off = off;
> }
>
> static int dw_edma_pcie_probe(struct pci_dev *pdev,
> @@ -179,6 +290,7 @@ static int dw_edma_pcie_probe(struct pci_dev *pdev,
> }
>
> memcpy(vsec_data, pdata, sizeof(struct dw_edma_pcie_data));
> + vsec_data->devmem_phys_off = DW_PCIE_AMD_MDB_INVALID_ADDR;
>
> /*
> * Tries to find if exists a PCIe Vendor-Specific Extended Capability
> @@ -186,6 +298,26 @@ static int dw_edma_pcie_probe(struct pci_dev *pdev,
> */
> dw_edma_pcie_get_vsec_dma_data(pdev, vsec_data);
>
> + if (pdev->vendor == PCI_VENDOR_ID_XILINX) {
> + /*
> + * There is no valid address found for the LL memory
> + * space on the device side.
> + */
> + if (vsec_data->devmem_phys_off == DW_PCIE_AMD_MDB_INVALID_ADDR)
> + return -ENOMEM;
> +
> + /*
> + * Configure the channel LL and data blocks if number of
> + * channels enabled in VSEC capability are more than the
> + * channels configured in amd_mdb_data.
> + */
> + dw_edma_set_chan_region_offset(vsec_data, BAR_2, 0,
> + DW_PCIE_XILINX_LL_OFF_GAP,
> + DW_PCIE_XILINX_LL_SIZE,
> + DW_PCIE_XILINX_DT_OFF_GAP,
> + DW_PCIE_XILINX_DT_SIZE);
> + }
> +
> /* Mapping PCI BAR regions */
> mask = BIT(vsec_data->rg.bar);
> for (i = 0; i < vsec_data->wr_ch_cnt; i++) {
> @@ -367,6 +499,8 @@ static void dw_edma_pcie_remove(struct pci_dev *pdev)
>
> static const struct pci_device_id dw_edma_pcie_id_table[] = {
> { PCI_DEVICE_DATA(SYNOPSYS, EDDA, &snps_edda_data) },
> + { PCI_VDEVICE(XILINX, PCI_DEVICE_ID_AMD_MDB_B054),
> + (kernel_ulong_t)&amd_mdb_data },
> { }
> };
> MODULE_DEVICE_TABLE(pci, dw_edma_pcie_id_table);
>
Powered by blists - more mailing lists