[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <862eabc4-b317-334e-acd9-5e83c7eee350@linux.ibm.com>
Date: Tue, 20 Jul 2021 19:44:00 +0200
From: Frederic Barrat <fbarrat@...ux.ibm.com>
To: Leonardo Bras <leobras.c@...il.com>,
Michael Ellerman <mpe@...erman.id.au>,
Benjamin Herrenschmidt <benh@...nel.crashing.org>,
Paul Mackerras <paulus@...ba.org>,
Alexey Kardashevskiy <aik@...abs.ru>,
David Gibson <david@...son.dropbear.id.au>,
kernel test robot <lkp@...el.com>,
Nicolin Chen <nicoleotsuka@...il.com>
Cc: linuxppc-dev@...ts.ozlabs.org, linux-kernel@...r.kernel.org
Subject: Re: [PATCH v5 05/11] powerpc/pseries/iommu: Allow DDW windows
starting at 0x00
On 16/07/2021 10:27, Leonardo Bras wrote:
> enable_ddw() currently returns the address of the DMA window, which is
> considered invalid if has the value 0x00.
>
> Also, it only considers valid an address returned from find_existing_ddw
> if it's not 0x00.
>
> Changing this behavior makes sense, given the users of enable_ddw() only
> need to know if direct mapping is possible. It can also allow a DMA window
> starting at 0x00 to be used.
>
> This will be helpful for using a DDW with indirect mapping, as the window
> address will be different than 0x00, but it will not map the whole
> partition.
>
> Signed-off-by: Leonardo Bras <leobras.c@...il.com>
> Reviewed-by: Alexey Kardashevskiy <aik@...abs.ru>
> ---
Looks good to me
Reviewed-by: Frederic Barrat <fbarrat@...ux.ibm.com>
> arch/powerpc/platforms/pseries/iommu.c | 36 +++++++++++++-------------
> 1 file changed, 18 insertions(+), 18 deletions(-)
>
> diff --git a/arch/powerpc/platforms/pseries/iommu.c b/arch/powerpc/platforms/pseries/iommu.c
> index 712d1667144a..b34b473bbdc1 100644
> --- a/arch/powerpc/platforms/pseries/iommu.c
> +++ b/arch/powerpc/platforms/pseries/iommu.c
> @@ -853,25 +853,26 @@ static void remove_ddw(struct device_node *np, bool remove_prop)
> np, ret);
> }
>
> -static u64 find_existing_ddw(struct device_node *pdn, int *window_shift)
> +static bool find_existing_ddw(struct device_node *pdn, u64 *dma_addr, int *window_shift)
> {
> struct direct_window *window;
> const struct dynamic_dma_window_prop *direct64;
> - u64 dma_addr = 0;
> + bool found = false;
>
> spin_lock(&direct_window_list_lock);
> /* check if we already created a window and dupe that config if so */
> list_for_each_entry(window, &direct_window_list, list) {
> if (window->device == pdn) {
> direct64 = window->prop;
> - dma_addr = be64_to_cpu(direct64->dma_base);
> + *dma_addr = be64_to_cpu(direct64->dma_base);
> *window_shift = be32_to_cpu(direct64->window_shift);
> + found = true;
> break;
> }
> }
> spin_unlock(&direct_window_list_lock);
>
> - return dma_addr;
> + return found;
> }
>
> static struct direct_window *ddw_list_new_entry(struct device_node *pdn,
> @@ -1161,20 +1162,20 @@ static int iommu_get_page_shift(u32 query_page_size)
> * pdn: the parent pe node with the ibm,dma_window property
> * Future: also check if we can remap the base window for our base page size
> *
> - * returns the dma offset for use by the direct mapped DMA code.
> + * returns true if can map all pages (direct mapping), false otherwise..
> */
> -static u64 enable_ddw(struct pci_dev *dev, struct device_node *pdn)
> +static bool enable_ddw(struct pci_dev *dev, struct device_node *pdn)
> {
> int len = 0, ret;
> int max_ram_len = order_base_2(ddw_memory_hotplug_max());
> struct ddw_query_response query;
> struct ddw_create_response create;
> int page_shift;
> - u64 dma_addr;
> struct device_node *dn;
> u32 ddw_avail[DDW_APPLICABLE_SIZE];
> struct direct_window *window;
> struct property *win64;
> + bool ddw_enabled = false;
> struct dynamic_dma_window_prop *ddwprop;
> struct failed_ddw_pdn *fpdn;
> bool default_win_removed = false;
> @@ -1186,9 +1187,10 @@ static u64 enable_ddw(struct pci_dev *dev, struct device_node *pdn)
>
> mutex_lock(&direct_window_init_mutex);
>
> - dma_addr = find_existing_ddw(pdn, &len);
> - if (dma_addr != 0)
> + if (find_existing_ddw(pdn, &dev->dev.archdata.dma_offset, &len)) {
> + ddw_enabled = true;
> goto out_unlock;
> + }
>
> /*
> * If we already went through this for a previous function of
> @@ -1342,7 +1344,8 @@ static u64 enable_ddw(struct pci_dev *dev, struct device_node *pdn)
> list_add(&window->list, &direct_window_list);
> spin_unlock(&direct_window_list_lock);
>
> - dma_addr = be64_to_cpu(ddwprop->dma_base);
> + dev->dev.archdata.dma_offset = be64_to_cpu(ddwprop->dma_base);
> + ddw_enabled = true;
> goto out_unlock;
>
> out_free_window:
> @@ -1374,10 +1377,10 @@ static u64 enable_ddw(struct pci_dev *dev, struct device_node *pdn)
> * as RAM, then we failed to create a window to cover persistent
> * memory and need to set the DMA limit.
> */
> - if (pmem_present && dma_addr && (len == max_ram_len))
> - dev->dev.bus_dma_limit = dma_addr + (1ULL << len);
> + if (pmem_present && ddw_enabled && (len == max_ram_len))
> + dev->dev.bus_dma_limit = dev->dev.archdata.dma_offset + (1ULL << len);
>
> - return dma_addr;
> + return ddw_enabled;
> }
>
> static void pci_dma_dev_setup_pSeriesLP(struct pci_dev *dev)
> @@ -1456,11 +1459,8 @@ static bool iommu_bypass_supported_pSeriesLP(struct pci_dev *pdev, u64 dma_mask)
> break;
> }
>
> - if (pdn && PCI_DN(pdn)) {
> - pdev->dev.archdata.dma_offset = enable_ddw(pdev, pdn);
> - if (pdev->dev.archdata.dma_offset)
> - return true;
> - }
> + if (pdn && PCI_DN(pdn))
> + return enable_ddw(pdev, pdn);
>
> return false;
> }
>
Powered by blists - more mailing lists