[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <ed6d811e-608e-e6aa-7b07-2f2d2d68adbd@ti.com>
Date: Tue, 5 Dec 2017 14:49:52 +0530
From: Kishon Vijay Abraham I <kishon@...com>
To: Lorenzo Pieralisi <lorenzo.pieralisi@....com>,
Cyrille Pitchen <cyrille.pitchen@...e-electrons.com>
CC: <bhelgaas@...gle.com>, <linux-pci@...r.kernel.org>,
<adouglas@...ence.com>, <stelford@...ence.com>,
<dgary@...ence.com>, <kgopi@...ence.com>, <eandrews@...ence.com>,
<thomas.petazzoni@...e-electrons.com>, <sureshp@...ence.com>,
<nsekhar@...com>, <linux-kernel@...r.kernel.org>,
<robh@...nel.org>, <devicetree@...r.kernel.org>
Subject: Re: [PATCH 5/5] PCI: cadence: add EndPoint Controller driver for
Cadence PCIe controller
Hi,
On Friday 01 December 2017 05:50 PM, Lorenzo Pieralisi wrote:
> On Thu, Nov 23, 2017 at 04:01:50PM +0100, Cyrille Pitchen wrote:
>> This patch adds support to the Cadence PCIe controller in endpoint mode.
>
> Please add a brief description to the log to describe the most salient
> features.
>
>> Signed-off-by: Cyrille Pitchen <cyrille.pitchen@...e-electrons.com>
>> ---
>> drivers/pci/cadence/Kconfig | 9 +
>> drivers/pci/cadence/Makefile | 1 +
>> drivers/pci/cadence/pcie-cadence-ep.c | 553 ++++++++++++++++++++++++++++++++++
>> 3 files changed, 563 insertions(+)
>> create mode 100644 drivers/pci/cadence/pcie-cadence-ep.c
>>
>> diff --git a/drivers/pci/cadence/Kconfig b/drivers/pci/cadence/Kconfig
>> index 120306cae2aa..b2e6af71f39e 100644
>> --- a/drivers/pci/cadence/Kconfig
>> +++ b/drivers/pci/cadence/Kconfig
>> @@ -21,4 +21,13 @@ config PCIE_CADENCE_HOST
>> mode. This PCIe controller may be embedded into many different vendors
>> SoCs.
>>
>> +config PCIE_CADENCE_EP
>> + bool "Cadence PCIe endpoint controller"
>> + depends on PCI_ENDPOINT
>> + select PCIE_CADENCE
>> + help
>> + Say Y here if you want to support the Cadence PCIe controller in
>> + endpoint mode. This PCIe controller may be embedded into many
>> + different vendors SoCs.
>> +
>> endif # PCI_CADENCE
>> diff --git a/drivers/pci/cadence/Makefile b/drivers/pci/cadence/Makefile
>> index d57d192d2595..61e9c8d6839d 100644
>> --- a/drivers/pci/cadence/Makefile
>> +++ b/drivers/pci/cadence/Makefile
>> @@ -1,2 +1,3 @@
>> obj-$(CONFIG_PCIE_CADENCE) += pcie-cadence.o
>> obj-$(CONFIG_PCIE_CADENCE_HOST) += pcie-cadence-host.o
>> +obj-$(CONFIG_PCIE_CADENCE_EP) += pcie-cadence-ep.o
>> diff --git a/drivers/pci/cadence/pcie-cadence-ep.c b/drivers/pci/cadence/pcie-cadence-ep.c
>> new file mode 100644
>> index 000000000000..a1d761101a9c
>> --- /dev/null
>> +++ b/drivers/pci/cadence/pcie-cadence-ep.c
>> @@ -0,0 +1,553 @@
>> +/*
>> + * Cadence PCIe host controller driver.
>
> You should update this comment.
>
>> + *
>> + * Copyright (c) 2017 Cadence
>> + *
>> + * Author: Cyrille Pitchen <cyrille.pitchen@...e-electrons.com>
>> + *
>> + * This program is free software; you can redistribute it and/or modify
>> + * it under the terms of the GNU General Public License version 2 as
>> + * published by the Free Software Foundation.
>> + *
>> + * This program is distributed in the hope that it will be useful,
>> + * but WITHOUT ANY WARRANTY; without even the implied warranty of
>> + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
>> + * GNU General Public License for more details.
>> + *
>> + * You should have received a copy of the GNU General Public License along with
>> + * this program. If not, see <http://www.gnu.org/licenses/>.
>> + */
>> +#include <linux/kernel.h>
>> +#include <linux/of.h>
>> +#include <linux/pci-epc.h>
>> +#include <linux/platform_device.h>
>> +#include <linux/pm_runtime.h>
>> +#include <linux/sizes.h>
>> +#include <linux/delay.h>
>
> Nit: alphabetical order.
>
>> +#include "pcie-cadence.h"
>> +
>> +#define CDNS_PCIE_EP_MIN_APERTURE 128 /* 128 bytes */
>> +#define CDNS_PCIE_EP_IRQ_PCI_ADDR_NONE 0x1
>> +#define CDNS_PCIE_EP_IRQ_PCI_ADDR_LEGACY 0x3
>> +
>> +/**
>> + * struct cdns_pcie_ep_data - hardware specific data
>> + * @max_regions: maximum nmuber of regions supported by hardware
>
> s/nmuber/number
>
>> + */
>> +struct cdns_pcie_ep_data {
>> + size_t max_regions;
>> +};
>> +
>> +/**
>> + * struct cdns_pcie_ep - private data for this PCIe endpoint controller driver
>> + * @pcie: Cadence PCIe controller
>> + * @data: pointer to a 'struct cdns_pcie_data'
>> + */
>> +struct cdns_pcie_ep {
>> + struct cdns_pcie pcie;
>> + const struct cdns_pcie_ep_data *data;
>> + struct pci_epc *epc;
>> + unsigned long ob_region_map;
>> + phys_addr_t *ob_addr;
>> + phys_addr_t irq_phys_addr;
>> + void __iomem *irq_cpu_addr;
>> + u64 irq_pci_addr;
>> + u8 irq_pending;
>> +};
>> +
>> +static int cdns_pcie_ep_write_header(struct pci_epc *epc,
>> + struct pci_epf_header *hdr)
>> +{
>> + struct cdns_pcie_ep *ep = epc_get_drvdata(epc);
>> + struct cdns_pcie *pcie = &ep->pcie;
>> + u8 fn = 0;
>> +
>> + if (fn == 0) {
>
> I think there is some code to retrieve fn missing here.
hmm.. the endpoint core has to send the function number which right now it's
not doing though it has the function number info in pci_epf.
>
>> + u32 id = CDNS_PCIE_LM_ID_VENDOR(hdr->vendorid) |
>> + CDNS_PCIE_LM_ID_SUBSYS(hdr->subsys_vendor_id);
>> + cdns_pcie_writel(pcie, CDNS_PCIE_LM_ID, id);
>> + }
>> + cdns_pcie_ep_fn_writew(pcie, fn, PCI_DEVICE_ID, hdr->deviceid);
>> + cdns_pcie_ep_fn_writeb(pcie, fn, PCI_REVISION_ID, hdr->revid);
>> + cdns_pcie_ep_fn_writeb(pcie, fn, PCI_CLASS_PROG, hdr->progif_code);
>> + cdns_pcie_ep_fn_writew(pcie, fn, PCI_CLASS_DEVICE,
>> + hdr->subclass_code | hdr->baseclass_code << 8);
>> + cdns_pcie_ep_fn_writeb(pcie, fn, PCI_CACHE_LINE_SIZE,
>> + hdr->cache_line_size);
>> + cdns_pcie_ep_fn_writew(pcie, fn, PCI_SUBSYSTEM_ID, hdr->subsys_id);
>> + cdns_pcie_ep_fn_writeb(pcie, fn, PCI_INTERRUPT_PIN, hdr->interrupt_pin);
>> +
>> + return 0;
>> +}
>> +
>> +static int cdns_pcie_ep_set_bar(struct pci_epc *epc, enum pci_barno bar,
>> + dma_addr_t bar_phys, size_t size, int flags)
>> +{
>> + struct cdns_pcie_ep *ep = epc_get_drvdata(epc);
>> + struct cdns_pcie *pcie = &ep->pcie;
>> + u32 addr0, addr1, reg, cfg, b, aperture, ctrl;
>> + u8 fn = 0;
Here too endpoint core should send the function number..
>> + u64 sz;
>> +
>> + /* BAR size is 2^(aperture + 7) */
>> + sz = max_t(size_t, size, CDNS_PCIE_EP_MIN_APERTURE);
>> + sz = 1ULL << fls64(sz - 1);
>> + aperture = ilog2(sz) - 7; /* 128B -> 0, 256B -> 1, 512B -> 2, ... */
>> +
>> + if ((flags & PCI_BASE_ADDRESS_SPACE) == PCI_BASE_ADDRESS_SPACE_IO) {
>> + ctrl = CDNS_PCIE_LM_BAR_CFG_CTRL_IO_32BITS;
>> + } else {
>> + bool is_prefetch = !!(flags & PCI_BASE_ADDRESS_MEM_PREFETCH);
>> + bool is_64bits = sz > SZ_2G;
>> +
>> + if (is_64bits && (bar & 1))
>> + return -EINVAL;
>> +
>> + switch (is_64bits << 1 | is_prefetch) {
>
> I would not mind implementing this as a nested if-else, I am not a big
> fan of using bool this way.
>
>> + case 0:
>> + ctrl = CDNS_PCIE_LM_BAR_CFG_CTRL_MEM_32BITS;
>> + break;
>> +
>> + case 1:
>> + ctrl = CDNS_PCIE_LM_BAR_CFG_CTRL_PREFETCH_MEM_32BITS;
>> + break;
>> +
>> + case 2:
>> + ctrl = CDNS_PCIE_LM_BAR_CFG_CTRL_MEM_64BITS;
>> + break;
>> +
>> + case 3:
>> + ctrl = CDNS_PCIE_LM_BAR_CFG_CTRL_PREFETCH_MEM_64BITS;
>> + break;
>> + }
>> + }
>> +
>> + addr0 = lower_32_bits(bar_phys);
>> + addr1 = upper_32_bits(bar_phys);
>> + cdns_pcie_writel(pcie, CDNS_PCIE_AT_IB_EP_FUNC_BAR_ADDR0(fn, bar),
>> + addr0);
It would be nice if you can have defines for CDNS_PCIE_AT_IB_EP_FUNC_BAR_ADDR0
included in this patch rather than "PCI: cadence: Add host driver for Cadence
PCIe controller". All EP specific functions in header file should be included here.
>> + cdns_pcie_writel(pcie, CDNS_PCIE_AT_IB_EP_FUNC_BAR_ADDR1(fn, bar),
>> + addr1);
>
> Is fn always 0 ?
>
>> + if (bar < BAR_4) {
>> + reg = CDNS_PCIE_LM_EP_FUNC_BAR_CFG0(fn);
>> + b = bar;
>> + } else {
>> + reg = CDNS_PCIE_LM_EP_FUNC_BAR_CFG1(fn);
>> + b = bar - BAR_4;
>> + }
>> +
>> + cfg = cdns_pcie_readl(pcie, reg);
>> + cfg &= ~(CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_APERTURE_MASK(b) |
>> + CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_CTRL_MASK(b));
>> + cfg |= (CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_APERTURE(b, aperture) |
>> + CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_CTRL(b, ctrl));
>> + cdns_pcie_writel(pcie, reg, cfg);
>> +
>> + return 0;
>> +}
>> +
>> +static void cdns_pcie_ep_clear_bar(struct pci_epc *epc, enum pci_barno bar)
>> +{
>> + struct cdns_pcie_ep *ep = epc_get_drvdata(epc);
>> + struct cdns_pcie *pcie = &ep->pcie;
>> + u32 reg, cfg, b, ctrl;
>> + u8 fn = 0;
Here too endpoint core should send the function number..
>> +
>> + if (bar < BAR_4) {
>> + reg = CDNS_PCIE_LM_EP_FUNC_BAR_CFG0(fn);
>> + b = bar;
>> + } else {
>> + reg = CDNS_PCIE_LM_EP_FUNC_BAR_CFG1(fn);
>> + b = bar - BAR_4;
>> + }
>> +
>> + ctrl = CDNS_PCIE_LM_BAR_CFG_CTRL_DISABLED;
>> + cfg = cdns_pcie_readl(pcie, reg);
>> + cfg &= ~(CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_APERTURE_MASK(b) |
>> + CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_CTRL_MASK(b));
>> + cfg |= CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_CTRL_MASK(ctrl);
>> + cdns_pcie_writel(pcie, reg, cfg);
>> +
>> + cdns_pcie_writel(pcie, CDNS_PCIE_AT_IB_EP_FUNC_BAR_ADDR0(fn, bar), 0);
>> + cdns_pcie_writel(pcie, CDNS_PCIE_AT_IB_EP_FUNC_BAR_ADDR1(fn, bar), 0);
>> +}
>> +
>> +static int cdns_pcie_ep_map_addr(struct pci_epc *epc, phys_addr_t addr,
>> + u64 pci_addr, size_t size)
>> +{
>> + struct cdns_pcie_ep *ep = epc_get_drvdata(epc);
>> + struct cdns_pcie *pcie = &ep->pcie;
>> + u32 r;
>> +
>> + r = find_first_zero_bit(&ep->ob_region_map, sizeof(ep->ob_region_map));
>
> Second argument must be in bits not bytes.
>
> https://marc.info/?l=linux-pci&m=151179781225513&w=2
>
>> + if (r >= ep->data->max_regions - 1) {
>> + dev_err(&epc->dev, "no free outbound region\n");
>> + return -EINVAL;
>> + }
>> +
>> + cdns_pcie_set_outbound_region(pcie, r, false, addr, pci_addr, size);
>> +
>> + set_bit(r, &ep->ob_region_map);
>> + ep->ob_addr[r] = addr;
>> +
>> + return 0;
>> +}
>> +
>> +static void cdns_pcie_ep_unmap_addr(struct pci_epc *epc, phys_addr_t addr)
>> +{
>> + struct cdns_pcie_ep *ep = epc_get_drvdata(epc);
>> + struct cdns_pcie *pcie = &ep->pcie;
>> + u32 r;
>> +
>> + for (r = 0; r < ep->data->max_regions - 1; r++)
>> + if (ep->ob_addr[r] == addr)
>> + break;
>> +
>> + if (r >= ep->data->max_regions - 1)
>
> == ?
>
>> + return;
>> +
>> + cdns_pcie_reset_outbound_region(pcie, r);
>> +
>> + ep->ob_addr[r] = 0;
>> + clear_bit(r, &ep->ob_region_map);
>> +}
>> +
>> +static int cdns_pcie_ep_set_msi(struct pci_epc *epc, u8 mmc)
>> +{
>> + struct cdns_pcie_ep *ep = epc_get_drvdata(epc);
>> + struct cdns_pcie *pcie = &ep->pcie;
>> + u32 cap = CDNS_PCIE_EP_FUNC_MSI_CAP_OFFSET;
>> + u16 flags;
>> + u8 fn = 0;
>> +
>> + /* Validate the ID of the MSI Capability structure. */
>> + if (cdns_pcie_ep_fn_readb(pcie, fn, cap) != PCI_CAP_ID_MSI)
>> + return -EINVAL;
>> +
>> + /*
>> + * Set the Multiple Message Capable bitfield into the Message Control
>> + * register.
>> + */
>> + flags = cdns_pcie_ep_fn_readw(pcie, fn, cap + PCI_MSI_FLAGS);
>> + flags = (flags & ~PCI_MSI_FLAGS_QMASK) | (mmc << 1);
>> + flags |= PCI_MSI_FLAGS_64BIT;
>> + flags &= ~PCI_MSI_FLAGS_MASKBIT;
Any reason why "Per-vector masking capable" is reset?
>> + cdns_pcie_ep_fn_writew(pcie, fn, cap + PCI_MSI_FLAGS, flags);
>> +
>> + return 0;
>> +}
>> +
>> +static int cdns_pcie_ep_get_msi(struct pci_epc *epc)
>> +{
>> + struct cdns_pcie_ep *ep = epc_get_drvdata(epc);
>> + struct cdns_pcie *pcie = &ep->pcie;
>> + u32 cap = CDNS_PCIE_EP_FUNC_MSI_CAP_OFFSET;
>> + u16 flags, mmc, mme;
>> + u8 fn = 0;
>> +
>> + /* Validate the ID of the MSI Capability structure. */
>> + if (cdns_pcie_ep_fn_readb(pcie, fn, cap) != PCI_CAP_ID_MSI)
>> + return -EINVAL;
>> +
>> + /* Validate that the MSI feature is actually enabled. */
>> + flags = cdns_pcie_ep_fn_readw(pcie, fn, cap + PCI_MSI_FLAGS);
>> + if (!(flags & PCI_MSI_FLAGS_ENABLE))
>> + return -EINVAL;
>> +
>> + /*
>> + * Get the Multiple Message Enable bitfield from the Message Control
>> + * register.
>> + */
>> + mmc = (flags & PCI_MSI_FLAGS_QMASK) >> 1;
>> + mme = (flags & PCI_MSI_FLAGS_QSIZE) >> 4;
>> + if (mme > mmc)
>> + mme = mmc;
>> + if (mme > 5)
>> + mme = 5;
I'm not sure if both these above checks are required..
>
> You should comment on what this 5 means and why it is fine to cap mme.
>
>> +
>> + return mme;
>> +}
>> +
>> +static void cdns_pcie_ep_assert_intx(struct cdns_pcie_ep *ep, u8 fn,
>> + u8 intx, bool is_asserted)
>> +{
>> + struct cdns_pcie *pcie = &ep->pcie;
>> + u32 r = ep->data->max_regions - 1;
>> + u32 offset;
>> + u16 status;
>> + u8 msg_code;
>> +
>> + intx &= 3;
>> +
>> + /* Set the outbound region if needed. */
>> + if (unlikely(ep->irq_pci_addr != CDNS_PCIE_EP_IRQ_PCI_ADDR_LEGACY)) {
>> + /* Last region was reserved for IRQ writes. */
>> + cdns_pcie_set_outbound_region_for_normal_msg(pcie, r,
>> + ep->irq_phys_addr);
>> + ep->irq_pci_addr = CDNS_PCIE_EP_IRQ_PCI_ADDR_LEGACY;
>> + }
>> +
>> + if (is_asserted) {
>> + ep->irq_pending |= BIT(intx);
>> + msg_code = MSG_CODE_ASSERT_INTA + intx;
>> + } else {
>> + ep->irq_pending &= ~BIT(intx);
>> + msg_code = MSG_CODE_DEASSERT_INTA + intx;
>> + }
>> +
>> + status = cdns_pcie_ep_fn_readw(pcie, fn, PCI_STATUS);
>> + if (((status & PCI_STATUS_INTERRUPT) != 0) ^ (ep->irq_pending != 0)) {
>> + status ^= PCI_STATUS_INTERRUPT;
>> + cdns_pcie_ep_fn_writew(pcie, fn, PCI_STATUS, status);
>> + }
here you are setting the PCI_STATUS_INTERRUPT even before sending the ASSERT
message.
>> +
>> + offset = CDNS_PCIE_NORMAL_MSG_ROUTING(MSG_ROUTING_LOCAL) |
>> + CDNS_PCIE_NORMAL_MSG_CODE(msg_code) |
>> + CDNS_PCIE_MSG_NO_DATA;
>> + writel(0, ep->irq_cpu_addr + offset);
>> +}
>> +
>> +static int cdns_pcie_ep_send_legacy_irq(struct cdns_pcie_ep *ep, u8 fn, u8 intx)
>> +{
>> + u16 cmd;
>> +
>> + cmd = cdns_pcie_ep_fn_readw(&ep->pcie, fn, PCI_COMMAND);
>> + if (cmd & PCI_COMMAND_INTX_DISABLE)
>> + return -EINVAL;
>> +
>> + cdns_pcie_ep_assert_intx(ep, fn, intx, true);
>> + mdelay(1);
>
> Add a comment please to explain the mdelay value.
>
>> + cdns_pcie_ep_assert_intx(ep, fn, intx, false);
>> + return 0;
>> +}
>> +
>> +static int cdns_pcie_ep_raise_irq(struct pci_epc *epc,
>> + enum pci_epc_irq_type type, u8 interrupt_num)
>> +{
>> + struct cdns_pcie_ep *ep = epc_get_drvdata(epc);
>> + struct cdns_pcie *pcie = &ep->pcie;
>> + u32 cap = CDNS_PCIE_EP_FUNC_MSI_CAP_OFFSET;
>> + u16 flags, mmc, mme, data, data_mask;
>> + u8 msi_count;
>> + u64 pci_addr, pci_addr_mask = 0xff;
>> + u8 fn = 0;
>> +
>> + /* Handle legacy IRQ. */
>> + if (type == PCI_EPC_IRQ_LEGACY)
>> + return cdns_pcie_ep_send_legacy_irq(ep, fn, 0);
>> +
>> + /* Otherwise MSI. */
>> + if (type != PCI_EPC_IRQ_MSI)
>> + return -EINVAL;
MSI-X?
>> +
>> + /* Check whether the MSI feature has been enabled by the PCI host. */
>> + flags = cdns_pcie_ep_fn_readw(pcie, fn, cap + PCI_MSI_FLAGS);
>> + if (!(flags & PCI_MSI_FLAGS_ENABLE))
>> + return -EINVAL;
>> +
>> + /* Get the number of enabled MSIs */
>> + mmc = (flags & PCI_MSI_FLAGS_QMASK) >> 1;
>> + mme = (flags & PCI_MSI_FLAGS_QSIZE) >> 4;
>> + if (mme > mmc)
>> + mme = mmc;
>> + if (mme > 5)
>> + mme = 5;
>
> Same comment as above.
>
>> + msi_count = 1 << mme;
>> + if (!interrupt_num || interrupt_num > msi_count)
>> + return -EINVAL;
>> +
>> + /* Compute the data value to be written. */
>> + data_mask = msi_count - 1;
>> + data = cdns_pcie_ep_fn_readw(pcie, fn, cap + PCI_MSI_DATA_64);
>> + data = (data & ~data_mask) | ((interrupt_num - 1) & data_mask);
>> +
>> + /* Get the PCI address where to write the data into. */
>> + pci_addr = cdns_pcie_ep_fn_readl(pcie, fn, cap + PCI_MSI_ADDRESS_HI);
>> + pci_addr <<= 32;
>> + pci_addr |= cdns_pcie_ep_fn_readl(pcie, fn, cap + PCI_MSI_ADDRESS_LO);
>> + pci_addr &= GENMASK_ULL(63, 2);
>> +
>> + /* Set the outbound region if needed. */
>> + if (unlikely(ep->irq_pci_addr != pci_addr)) {
>> + /* Last region was reserved for IRQ writes. */
>> + cdns_pcie_set_outbound_region(pcie, ep->data->max_regions - 1,
>> + false,
>> + ep->irq_phys_addr,
>> + pci_addr & ~pci_addr_mask,
>> + pci_addr_mask + 1);
>> + ep->irq_pci_addr = pci_addr;
>> + }
>> + writew(data, ep->irq_cpu_addr + (pci_addr & pci_addr_mask));
>> +
>> + return 0;
>> +}
>> +
>> +static int cdns_pcie_ep_start(struct pci_epc *epc)
>> +{
>> + struct cdns_pcie_ep *ep = epc_get_drvdata(epc);
>> + struct cdns_pcie *pcie = &ep->pcie;
>> + struct pci_epf *epf;
>> + u32 cfg;
>> + u8 fn = 0;
>> +
>> + /* Enable this endpoint function. */
>> + cfg = cdns_pcie_readl(pcie, CDNS_PCIE_LM_EP_FUNC_CFG);
>> + cfg |= BIT(fn);
>> + cdns_pcie_writel(pcie, CDNS_PCIE_LM_EP_FUNC_CFG, cfg);
>> +
>> + /*
>> + * Already linked-up: don't call directly pci_epc_linkup() because we've
>> + * already locked the epc->lock.
>> + */
Not sure what you mean by linked-up here?
>> + list_for_each_entry(epf, &epc->pci_epf, list)
>> + pci_epf_linkup(epf);
>> +
>> + return 0;
>> +}
>> +
>> +static void cdns_pcie_ep_stop(struct pci_epc *epc)
>> +{
>> + struct cdns_pcie_ep *ep = epc_get_drvdata(epc);
>> + struct cdns_pcie *pcie = &ep->pcie;
>> + u32 cfg;
>> + u8 fn = 0;
>> +
>> + /* Disable this endpoint function (function 0 can't be disabled). */
>
> I do not understand this comment and how it applies to the code,
> in other words fn is always 0 here (so it can't be disabled)
> I do not understand what this code is there for.
>
>> + cfg = cdns_pcie_readl(pcie, CDNS_PCIE_LM_EP_FUNC_CFG);
>> + cfg &= ~BIT(fn);
>> + cdns_pcie_writel(pcie, CDNS_PCIE_LM_EP_FUNC_CFG, cfg);
>> +}
>> +
>> +static const struct pci_epc_ops cdns_pcie_epc_ops = {
>> + .write_header = cdns_pcie_ep_write_header,
>> + .set_bar = cdns_pcie_ep_set_bar,
>> + .clear_bar = cdns_pcie_ep_clear_bar,
>> + .map_addr = cdns_pcie_ep_map_addr,
>> + .unmap_addr = cdns_pcie_ep_unmap_addr,
>> + .set_msi = cdns_pcie_ep_set_msi,
>> + .get_msi = cdns_pcie_ep_get_msi,
>> + .raise_irq = cdns_pcie_ep_raise_irq,
>> + .start = cdns_pcie_ep_start,
>> + .stop = cdns_pcie_ep_stop,
>> +};
>> +
>> +static const struct cdns_pcie_ep_data cdns_pcie_ep_data = {
>> + .max_regions = 16,
>> +};
>
> As I mentioned in patch 3, should this be set-up with DT ?
>
> Thanks,
> Lorenzo
>
>> +
>> +static const struct of_device_id cdns_pcie_ep_of_match[] = {
>> + { .compatible = "cdns,cdns-pcie-ep",
>> + .data = &cdns_pcie_ep_data },
>> +
>> + { },
>> +};
>> +
>> +static int cdns_pcie_ep_probe(struct platform_device *pdev)
>> +{
>> + struct device *dev = &pdev->dev;
>> + struct device_node *np = dev->of_node;
>> + const struct of_device_id *of_id;
>> + struct cdns_pcie_ep *ep;
>> + struct cdns_pcie *pcie;
>> + struct pci_epc *epc;
>> + struct resource *res;
>> + size_t max_regions;
>> + int ret;
>> +
>> + ep = devm_kzalloc(dev, sizeof(*ep), GFP_KERNEL);
>> + if (!ep)
>> + return -ENOMEM;
>> +
>> + platform_set_drvdata(pdev, ep);
>> +
>> + pcie = &ep->pcie;
>> + pcie->is_rc = false;
>> +
>> + of_id = of_match_node(cdns_pcie_ep_of_match, np);
>> + ep->data = (const struct cdns_pcie_ep_data *)of_id->data;
>> +
>> + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "reg");
>> + pcie->reg_base = devm_ioremap_resource(dev, res);
>> + if (IS_ERR(pcie->reg_base)) {
>> + dev_err(dev, "missing \"reg\"\n");
>> + return PTR_ERR(pcie->reg_base);
>> + }
>> +
>> + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "mem");
>> + if (!res) {
>> + dev_err(dev, "missing \"mem\"\n");
>> + return -EINVAL;
>> + }
>> + pcie->mem_res = res;
>> +
>> + max_regions = ep->data->max_regions;
>> + ep->ob_addr = devm_kzalloc(dev, max_regions * sizeof(*ep->ob_addr),
>> + GFP_KERNEL);
>> + if (!ep->ob_addr)
>> + return -ENOMEM;
>> +
>> + pm_runtime_enable(dev);
>> + ret = pm_runtime_get_sync(dev);
>> + if (ret < 0) {
>> + dev_err(dev, "pm_runtime_get_sync() failed\n");
>> + goto err_get_sync;
>> + }
>> +
>> + /* Disable all but function 0 (anyway BIT(0) is hardwired to 1). */
>> + cdns_pcie_writel(pcie, CDNS_PCIE_LM_EP_FUNC_CFG, BIT(0));
why disable all functions?
>> +
>> + epc = devm_pci_epc_create(dev, &cdns_pcie_epc_ops);
>> + if (IS_ERR(epc)) {
>> + dev_err(dev, "failed to create epc device\n");
>> + ret = PTR_ERR(epc);
>> + goto err_init;
>> + }
>> +
>> + ep->epc = epc;
>> + epc_set_drvdata(epc, ep);
>> +
>> + ret = of_property_read_u8(np, "max-functions", &epc->max_functions);
>> + if (ret < 0)
>> + epc->max_functions = 1;
>> +
>> + ret = pci_epc_mem_init(epc, pcie->mem_res->start,
>> + resource_size(pcie->mem_res));
>> + if (ret < 0) {
>> + dev_err(dev, "failed to initialize the memory space\n");
>> + goto err_init;
>> + }
>> +
>> + ep->irq_cpu_addr = pci_epc_mem_alloc_addr(epc, &ep->irq_phys_addr,
>> + SZ_128K);
Any reason why you chose SZ_128K?
Thanks
Kishon
Powered by blists - more mailing lists