[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <CAL_JsqLNteS0m_32HuCjY8Mk9Wf+z6=HBpM7Wv=zLVqNs-7Y1Q@mail.gmail.com>
Date: Wed, 23 Apr 2025 21:58:58 -0500
From: Rob Herring <robh@...nel.org>
To: Manikandan Karunakaran Pillai <mpillai@...ence.com>
Cc: "hans.zhang@...tech.com" <hans.zhang@...tech.com>, "bhelgaas@...gle.com" <bhelgaas@...gle.com>,
"lpieralisi@...nel.org" <lpieralisi@...nel.org>, "kw@...ux.com" <kw@...ux.com>,
"manivannan.sadhasivam@...aro.org" <manivannan.sadhasivam@...aro.org>,
"krzk+dt@...nel.org" <krzk+dt@...nel.org>, "conor+dt@...nel.org" <conor+dt@...nel.org>,
"linux-pci@...r.kernel.org" <linux-pci@...r.kernel.org>,
"devicetree@...r.kernel.org" <devicetree@...r.kernel.org>,
"linux-kernel@...r.kernel.org" <linux-kernel@...r.kernel.org>
Subject: Re: [PATCH v3 5/6] PCI: cadence: Add callback functions for RP and EP controller
On Sun, Apr 13, 2025 at 10:52 PM Manikandan Karunakaran Pillai
<mpillai@...ence.com> wrote:
>
> >> +void __iomem *cdns_pci_hpa_map_bus(struct pci_bus *bus, unsigned int
> >devfn,
> >> + int where)
> >> +{
> >> + struct pci_host_bridge *bridge = pci_find_host_bridge(bus);
> >> + struct cdns_pcie_rc *rc = pci_host_bridge_priv(bridge);
> >> + struct cdns_pcie *pcie = &rc->pcie;
> >> + unsigned int busn = bus->number;
> >> + u32 addr0, desc0, desc1, ctrl0;
> >> + u32 regval;
> >> +
> >> + if (pci_is_root_bus(bus)) {
> >> + /*
> >> + * Only the root port (devfn == 0) is connected to this bus.
> >> + * All other PCI devices are behind some bridge hence on
> >another
> >> + * bus.
> >> + */
> >> + if (devfn)
> >> + return NULL;
> >> +
> >> + return pcie->reg_base + (where & 0xfff);
> >> + }
> >> +
> >> + /*
> >> + * Clear AXI link-down status
> >> + */
> >
> >That is an odd thing to do in map_bus. Also, it is completely racy
> >because...
> >
> >> + regval = cdns_pcie_hpa_readl(pcie, REG_BANK_AXI_SLAVE,
> >CDNS_PCIE_HPA_AT_LINKDOWN);
> >> + cdns_pcie_hpa_writel(pcie, REG_BANK_AXI_SLAVE,
> >CDNS_PCIE_HPA_AT_LINKDOWN,
> >> + (regval & GENMASK(0, 0)));
> >> +
> >
> >What if the link goes down again here.
> >
> >> + desc1 = 0;
> >> + ctrl0 = 0;
> >> +
> >> + /*
> >> + * Update Output registers for AXI region 0.
> >> + */
> >> + addr0 = CDNS_PCIE_HPA_AT_OB_REGION_PCI_ADDR0_NBITS(12) |
> >> + CDNS_PCIE_HPA_AT_OB_REGION_PCI_ADDR0_DEVFN(devfn) |
> >> + CDNS_PCIE_HPA_AT_OB_REGION_PCI_ADDR0_BUS(busn);
> >> + cdns_pcie_hpa_writel(pcie, REG_BANK_AXI_SLAVE,
> >> + CDNS_PCIE_HPA_AT_OB_REGION_PCI_ADDR0(0),
> >addr0);
> >> +
> >> + desc1 = cdns_pcie_hpa_readl(pcie, REG_BANK_AXI_SLAVE,
> >> +
> >CDNS_PCIE_HPA_AT_OB_REGION_DESC1(0));
> >> + desc1 &= ~CDNS_PCIE_HPA_AT_OB_REGION_DESC1_DEVFN_MASK;
> >> + desc1 |= CDNS_PCIE_HPA_AT_OB_REGION_DESC1_DEVFN(0);
> >> + ctrl0 = CDNS_PCIE_HPA_AT_OB_REGION_CTRL0_SUPPLY_BUS |
> >> + CDNS_PCIE_HPA_AT_OB_REGION_CTRL0_SUPPLY_DEV_FN;
> >> +
> >> + if (busn == bridge->busnr + 1)
> >> + desc0 |=
> >CDNS_PCIE_HPA_AT_OB_REGION_DESC0_TYPE_CONF_TYPE0;
> >> + else
> >> + desc0 |=
> >CDNS_PCIE_HPA_AT_OB_REGION_DESC0_TYPE_CONF_TYPE1;
> >> +
> >> + cdns_pcie_hpa_writel(pcie, REG_BANK_AXI_SLAVE,
> >> + CDNS_PCIE_HPA_AT_OB_REGION_DESC0(0), desc0);
> >> + cdns_pcie_hpa_writel(pcie, REG_BANK_AXI_SLAVE,
> >> + CDNS_PCIE_HPA_AT_OB_REGION_DESC1(0), desc1);
> >> + cdns_pcie_hpa_writel(pcie, REG_BANK_AXI_SLAVE,
> >> + CDNS_PCIE_HPA_AT_OB_REGION_CTRL0(0), ctrl0);
> >
> >This is also racy with the read and write functions. Don't worry, lots
> >of other broken h/w like this...
> >
> >Surely this new h/w supports ECAM style config accesses? If so, use
> >and support that mode instead.
> >
>
> As an IP related driver, the ECAM address in the SoC is not available. For SoC, the
> Vendor can override this function in their code with the ECAM address.
>
> >> +
> >> + return rc->cfg_base + (where & 0xfff);
> >> +}
> >> +
> >> static struct pci_ops cdns_pcie_host_ops = {
> >> .map_bus = cdns_pci_map_bus,
> >> .read = pci_generic_config_read,
> >> .write = pci_generic_config_write,
> >> };
> >>
> >> +static struct pci_ops cdns_pcie_hpa_host_ops = {
> >> + .map_bus = cdns_pci_hpa_map_bus,
> >> + .read = pci_generic_config_read,
> >> + .write = pci_generic_config_write,
> >> +};
> >> +
> >> static int cdns_pcie_host_training_complete(struct cdns_pcie *pcie)
> >> {
> >> u32 pcie_cap_off = CDNS_PCIE_RP_CAP_OFFSET;
> >> @@ -154,8 +220,14 @@ static void
> >cdns_pcie_host_enable_ptm_response(struct cdns_pcie *pcie)
> >> {
> >> u32 val;
> >>
> >> - val = cdns_pcie_readl(pcie, CDNS_PCIE_LM_PTM_CTRL);
> >> - cdns_pcie_writel(pcie, CDNS_PCIE_LM_PTM_CTRL, val |
> >CDNS_PCIE_LM_TPM_CTRL_PTMRSEN);
> >> + if (!pcie->is_hpa) {
> >> + val = cdns_pcie_readl(pcie, CDNS_PCIE_LM_PTM_CTRL);
> >> + cdns_pcie_writel(pcie, CDNS_PCIE_LM_PTM_CTRL, val |
> >CDNS_PCIE_LM_TPM_CTRL_PTMRSEN);
> >> + } else {
> >> + val = cdns_pcie_hpa_readl(pcie, REG_BANK_IP_REG,
> >CDNS_PCIE_HPA_LM_PTM_CTRL);
> >> + cdns_pcie_hpa_writel(pcie, REG_BANK_IP_REG,
> >CDNS_PCIE_HPA_LM_PTM_CTRL,
> >> + val |
> >CDNS_PCIE_HPA_LM_TPM_CTRL_PTMRSEN);
> >> + }
> >> }
> >>
> >> static int cdns_pcie_host_start_link(struct cdns_pcie_rc *rc)
> >> @@ -340,8 +412,8 @@ static int cdns_pcie_host_bar_config(struct
> >cdns_pcie_rc *rc,
> >> */
> >> bar = cdns_pcie_host_find_min_bar(rc, size);
> >> if (bar != RP_BAR_UNDEFINED) {
> >> - ret = cdns_pcie_host_bar_ib_config(rc, bar, cpu_addr,
> >> - size, flags);
> >> + ret = pcie->ops->host_bar_ib_config(rc, bar, cpu_addr,
> >> + size, flags);
> >> if (ret)
> >> dev_err(dev, "IB BAR: %d config failed\n", bar);
> >> return ret;
> >> @@ -366,8 +438,7 @@ static int cdns_pcie_host_bar_config(struct
> >cdns_pcie_rc *rc,
> >> }
> >>
> >> winsize = bar_max_size[bar];
> >> - ret = cdns_pcie_host_bar_ib_config(rc, bar, cpu_addr, winsize,
> >> - flags);
> >> + ret = pcie->ops->host_bar_ib_config(rc, bar, cpu_addr, winsize,
> >flags);
> >> if (ret) {
> >> dev_err(dev, "IB BAR: %d config failed\n", bar);
> >> return ret;
> >> @@ -408,8 +479,8 @@ static int cdns_pcie_host_map_dma_ranges(struct
> >cdns_pcie_rc *rc)
> >> if (list_empty(&bridge->dma_ranges)) {
> >> of_property_read_u32(np, "cdns,no-bar-match-nbits",
> >> &no_bar_nbits);
> >> - err = cdns_pcie_host_bar_ib_config(rc, RP_NO_BAR, 0x0,
> >> - (u64)1 << no_bar_nbits, 0);
> >> + err = pcie->ops->host_bar_ib_config(rc, RP_NO_BAR, 0x0,
> >> + (u64)1 << no_bar_nbits, 0);
> >> if (err)
> >> dev_err(dev, "IB BAR: %d config failed\n",
> >RP_NO_BAR);
> >> return err;
> >> @@ -467,17 +538,159 @@ int
> >cdns_pcie_host_init_address_translation(struct cdns_pcie_rc *rc)
> >> u64 pci_addr = res->start - entry->offset;
> >>
> >> if (resource_type(res) == IORESOURCE_IO)
> >> - cdns_pcie_set_outbound_region(pcie, busnr, 0, r,
> >> - true,
> >> - pci_pio_to_address(res-
> >>start),
> >> - pci_addr,
> >> - resource_size(res));
> >> + pcie->ops->set_outbound_region(pcie, busnr, 0, r,
> >> + true,
> >> + pci_pio_to_address(res-
> >>start),
> >> + pci_addr,
> >> + resource_size(res));
> >> + else
> >> + pcie->ops->set_outbound_region(pcie, busnr, 0, r,
> >> + false,
> >> + res->start,
> >> + pci_addr,
> >> + resource_size(res));
> >> +
> >> + r++;
> >> + }
> >> +
> >> + return cdns_pcie_host_map_dma_ranges(rc);
> >> +}
> >> +
> >> +int cdns_pcie_hpa_host_init_root_port(struct cdns_pcie_rc *rc)
> >> +{
> >> + struct cdns_pcie *pcie = &rc->pcie;
> >> + u32 value, ctrl;
> >> +
> >> + /*
> >> + * Set the root complex BAR configuration register:
> >> + * - disable both BAR0 and BAR1.
> >> + * - enable Prefetchable Memory Base and Limit registers in type 1
> >> + * config space (64 bits).
> >> + * - enable IO Base and Limit registers in type 1 config
> >> + * space (32 bits).
> >> + */
> >> +
> >> + ctrl = CDNS_PCIE_HPA_LM_BAR_CFG_CTRL_DISABLED;
> >> + value = CDNS_PCIE_HPA_LM_RC_BAR_CFG_BAR0_CTRL(ctrl) |
> >> + CDNS_PCIE_HPA_LM_RC_BAR_CFG_BAR1_CTRL(ctrl) |
> >> + CDNS_PCIE_HPA_LM_RC_BAR_CFG_PREFETCH_MEM_ENABLE
> >|
> >> + CDNS_PCIE_HPA_LM_RC_BAR_CFG_PREFETCH_MEM_64BITS |
> >> + CDNS_PCIE_HPA_LM_RC_BAR_CFG_IO_ENABLE |
> >> + CDNS_PCIE_HPA_LM_RC_BAR_CFG_IO_32BITS;
> >> + cdns_pcie_hpa_writel(pcie, REG_BANK_IP_CFG_CTRL_REG,
> >> + CDNS_PCIE_HPA_LM_RC_BAR_CFG, value);
> >> +
> >> + if (rc->vendor_id != 0xffff)
> >> + cdns_pcie_rp_writew(pcie, PCI_VENDOR_ID, rc->vendor_id);
> >> +
> >> + if (rc->device_id != 0xffff)
> >> + cdns_pcie_rp_writew(pcie, PCI_DEVICE_ID, rc->device_id);
> >> +
> >> + cdns_pcie_rp_writeb(pcie, PCI_CLASS_REVISION, 0);
> >> + cdns_pcie_rp_writeb(pcie, PCI_CLASS_PROG, 0);
> >> + cdns_pcie_rp_writew(pcie, PCI_CLASS_DEVICE,
> >PCI_CLASS_BRIDGE_PCI);
> >> +
> >> + return 0;
> >> +}
> >> +
> >> +int cdns_pcie_hpa_host_bar_ib_config(struct cdns_pcie_rc *rc,
> >> + enum cdns_pcie_rp_bar bar,
> >> + u64 cpu_addr, u64 size,
> >> + unsigned long flags)
> >> +{
> >> + struct cdns_pcie *pcie = &rc->pcie;
> >> + u32 addr0, addr1, aperture, value;
> >> +
> >> + if (!rc->avail_ib_bar[bar])
> >> + return -EBUSY;
> >> +
> >> + rc->avail_ib_bar[bar] = false;
> >> +
> >> + aperture = ilog2(size);
> >> + addr0 = CDNS_PCIE_HPA_AT_IB_RP_BAR_ADDR0_NBITS(aperture) |
> >> + (lower_32_bits(cpu_addr) & GENMASK(31, 8));
> >> + addr1 = upper_32_bits(cpu_addr);
> >> + cdns_pcie_hpa_writel(pcie, REG_BANK_AXI_MASTER,
> >> + CDNS_PCIE_HPA_AT_IB_RP_BAR_ADDR0(bar),
> >addr0);
> >> + cdns_pcie_hpa_writel(pcie, REG_BANK_AXI_MASTER,
> >> + CDNS_PCIE_HPA_AT_IB_RP_BAR_ADDR1(bar),
> >addr1);
> >> +
> >> + if (bar == RP_NO_BAR)
> >> + return 0;
> >> +
> >> + value = cdns_pcie_hpa_readl(pcie, REG_BANK_IP_CFG_CTRL_REG,
> >CDNS_PCIE_HPA_LM_RC_BAR_CFG);
> >> + value &= ~(HPA_LM_RC_BAR_CFG_CTRL_MEM_64BITS(bar) |
> >> + HPA_LM_RC_BAR_CFG_CTRL_PREF_MEM_64BITS(bar) |
> >> + HPA_LM_RC_BAR_CFG_CTRL_MEM_32BITS(bar) |
> >> + HPA_LM_RC_BAR_CFG_CTRL_PREF_MEM_32BITS(bar) |
> >> + HPA_LM_RC_BAR_CFG_APERTURE(bar,
> >bar_aperture_mask[bar] + 2));
> >> + if (size + cpu_addr >= SZ_4G) {
> >> + if (!(flags & IORESOURCE_PREFETCH))
> >> + value |=
> >HPA_LM_RC_BAR_CFG_CTRL_MEM_64BITS(bar);
> >> + value |=
> >HPA_LM_RC_BAR_CFG_CTRL_PREF_MEM_64BITS(bar);
> >> + } else {
> >> + if (!(flags & IORESOURCE_PREFETCH))
> >> + value |=
> >HPA_LM_RC_BAR_CFG_CTRL_MEM_32BITS(bar);
> >> + value |=
> >HPA_LM_RC_BAR_CFG_CTRL_PREF_MEM_32BITS(bar);
> >> + }
> >> +
> >> + value |= HPA_LM_RC_BAR_CFG_APERTURE(bar, aperture);
> >> + cdns_pcie_hpa_writel(pcie, REG_BANK_IP_CFG_CTRL_REG,
> >CDNS_PCIE_HPA_LM_RC_BAR_CFG, value);
> >> +
> >> + return 0;
> >> +}
> >> +
> >> +int cdns_pcie_hpa_host_init_address_translation(struct cdns_pcie_rc *rc)
> >> +{
> >> + struct cdns_pcie *pcie = &rc->pcie;
> >> + struct pci_host_bridge *bridge = pci_host_bridge_from_priv(rc);
> >> + struct resource *cfg_res = rc->cfg_res;
> >> + struct resource_entry *entry;
> >> + u64 cpu_addr = cfg_res->start;
> >> + u32 addr0, addr1, desc1;
> >> + int r, busnr = 0;
> >> +
> >> + entry = resource_list_first_type(&bridge->windows,
> >IORESOURCE_BUS);
> >> + if (entry)
> >> + busnr = entry->res->start;
> >> +
> >> + /*
> >> + * Reserve region 0 for PCI configure space accesses:
> >> + * OB_REGION_PCI_ADDR0 and OB_REGION_DESC0 are updated
> >dynamically by
> >> + * cdns_pci_map_bus(), other region registers are set here once for all.
> >> + */
> >> + addr1 = 0;
> >> + desc1 = CDNS_PCIE_HPA_AT_OB_REGION_DESC1_BUS(busnr);
> >> + cdns_pcie_hpa_writel(pcie, REG_BANK_AXI_SLAVE,
> >> + CDNS_PCIE_HPA_AT_OB_REGION_PCI_ADDR1(0),
> >addr1);
> >> + cdns_pcie_hpa_writel(pcie, REG_BANK_AXI_SLAVE,
> >> + CDNS_PCIE_HPA_AT_OB_REGION_DESC1(0), desc1);
> >> +
> >> + addr0 = CDNS_PCIE_HPA_AT_OB_REGION_CPU_ADDR0_NBITS(12) |
> >> + (lower_32_bits(cpu_addr) & GENMASK(31, 8));
> >> + addr1 = upper_32_bits(cpu_addr);
> >> + cdns_pcie_hpa_writel(pcie, REG_BANK_AXI_SLAVE,
> >> + CDNS_PCIE_HPA_AT_OB_REGION_CPU_ADDR0(0),
> >addr0);
> >> + cdns_pcie_hpa_writel(pcie, REG_BANK_AXI_SLAVE,
> >> + CDNS_PCIE_HPA_AT_OB_REGION_CPU_ADDR1(0),
> >addr1);
> >> +
> >> + r = 1;
> >> + resource_list_for_each_entry(entry, &bridge->windows) {
> >> + struct resource *res = entry->res;
> >> + u64 pci_addr = res->start - entry->offset;
> >> +
> >> + if (resource_type(res) == IORESOURCE_IO)
> >> + pcie->ops->set_outbound_region(pcie, busnr, 0, r,
> >> + true,
> >> + pci_pio_to_address(res-
> >>start),
> >> + pci_addr,
> >> + resource_size(res));
> >> else
> >> - cdns_pcie_set_outbound_region(pcie, busnr, 0, r,
> >> - false,
> >> - res->start,
> >> - pci_addr,
> >> - resource_size(res));
> >> + pcie->ops->set_outbound_region(pcie, busnr, 0, r,
> >> + false,
> >> + res->start,
> >> + pci_addr,
> >> + resource_size(res));
> >>
> >> r++;
> >> }
> >> @@ -489,11 +702,11 @@ int cdns_pcie_host_init(struct cdns_pcie_rc *rc)
> >> {
> >> int err;
> >>
> >> - err = cdns_pcie_host_init_root_port(rc);
> >> + err = rc->pcie.ops->host_init_root_port(rc);
> >> if (err)
> >> return err;
> >>
> >> - return cdns_pcie_host_init_address_translation(rc);
> >> + return rc->pcie.ops->host_init_address_translation(rc);
> >> }
> >>
> >> int cdns_pcie_host_link_setup(struct cdns_pcie_rc *rc)
> >> @@ -503,7 +716,7 @@ int cdns_pcie_host_link_setup(struct cdns_pcie_rc
> >*rc)
> >> int ret;
> >>
> >> if (rc->quirk_detect_quiet_flag)
> >> - cdns_pcie_detect_quiet_min_delay_set(&rc->pcie);
> >> + pcie->ops->detect_quiet_min_delay_set(&rc->pcie);
> >>
> >> cdns_pcie_host_enable_ptm_response(pcie);
> >>
> >> @@ -566,8 +779,12 @@ int cdns_pcie_host_setup(struct cdns_pcie_rc *rc)
> >> if (ret)
> >> return ret;
> >>
> >> - if (!bridge->ops)
> >> - bridge->ops = &cdns_pcie_host_ops;
> >> + if (!bridge->ops) {
> >> + if (pcie->is_hpa)
> >> + bridge->ops = &cdns_pcie_hpa_host_ops;
> >> + else
> >> + bridge->ops = &cdns_pcie_host_ops;
> >> + }
> >>
> >> ret = pci_host_probe(bridge);
> >> if (ret < 0)
> >> diff --git a/drivers/pci/controller/cadence/pcie-cadence-plat.c
> >b/drivers/pci/controller/cadence/pcie-cadence-plat.c
> >> index b24176d4df1f..8d5fbaef0a3c 100644
> >> --- a/drivers/pci/controller/cadence/pcie-cadence-plat.c
> >> +++ b/drivers/pci/controller/cadence/pcie-cadence-plat.c
> >> @@ -43,7 +43,30 @@ static u64 cdns_plat_cpu_addr_fixup(struct cdns_pcie
> >*pcie, u64 cpu_addr)
> >> }
> >>
> >> static const struct cdns_pcie_ops cdns_plat_ops = {
> >> + .link_up = cdns_pcie_linkup,
> >> .cpu_addr_fixup = cdns_plat_cpu_addr_fixup,
> >> + .host_init_root_port = cdns_pcie_host_init_root_port,
> >> + .host_bar_ib_config = cdns_pcie_host_bar_ib_config,
> >> + .host_init_address_translation =
> >cdns_pcie_host_init_address_translation,
> >> + .detect_quiet_min_delay_set =
> >cdns_pcie_detect_quiet_min_delay_set,
> >> + .set_outbound_region = cdns_pcie_set_outbound_region,
> >> + .set_outbound_region_for_normal_msg =
> >> +
> >cdns_pcie_set_outbound_region_for_normal_msg,
> >> + .reset_outbound_region = cdns_pcie_reset_outbound_region,
> >> +};
> >> +
> >> +static const struct cdns_pcie_ops cdns_hpa_plat_ops = {
> >> + .start_link = cdns_pcie_hpa_startlink,
> >> + .stop_link = cdns_pcie_hpa_stop_link,
> >> + .link_up = cdns_pcie_hpa_linkup,
> >> + .host_init_root_port = cdns_pcie_hpa_host_init_root_port,
> >> + .host_bar_ib_config = cdns_pcie_hpa_host_bar_ib_config,
> >> + .host_init_address_translation =
> >cdns_pcie_hpa_host_init_address_translation,
> >> + .detect_quiet_min_delay_set =
> >cdns_pcie_hpa_detect_quiet_min_delay_set,
> >> + .set_outbound_region = cdns_pcie_hpa_set_outbound_region,
> >> + .set_outbound_region_for_normal_msg =
> >> +
> >cdns_pcie_hpa_set_outbound_region_for_normal_msg,
> >> + .reset_outbound_region = cdns_pcie_hpa_reset_outbound_region,
> >
> >What exactly is shared between these 2 implementations. Link handling,
> >config space accesses, address translation, and host init are all
> >different. What's left to share? MSIs (if not passed thru) and
> >interrupts? I think it's questionable that this be the same driver.
> >
> The address of both these have changed as the controller architecture has
> changed. In the event these driver have to be same driver, there will lot of
> sprinkled "if(is_hpa)" and that was already rejected in earlier version of code.
I'm saying they should *not* be the same driver because you don't
share hardly anything. Again, what is really common here?
> Hence it was done similar to other drivers by architecture specific "ops".
Yes, and IMO driver private/custom ops is the wrong direction to go.
Read my prior reply below again.
> The "if(is_hpa)" is now very limited where a specific ops functions does not make
> any sense.
But you still have them. In a separate driver, you would have 0.
> >A bunch of driver specific 'ops' is not the right direction despite
> >other drivers (DWC) having that. If there are common parts, then make
> >them library functions multiple drivers can call.
Powered by blists - more mailing lists