[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <6232948f-033d-8322-e656-544f12c5f784@ozlabs.ru>
Date: Sat, 22 Aug 2020 19:33:47 +1000
From: Alexey Kardashevskiy <aik@...abs.ru>
To: Leonardo Bras <leobras.c@...il.com>,
Michael Ellerman <mpe@...erman.id.au>,
Benjamin Herrenschmidt <benh@...nel.crashing.org>,
Paul Mackerras <paulus@...ba.org>,
Christophe Leroy <christophe.leroy@....fr>,
Joel Stanley <joel@....id.au>,
Thiago Jung Bauermann <bauerman@...ux.ibm.com>,
Ram Pai <linuxram@...ibm.com>,
Brian King <brking@...ux.vnet.ibm.com>,
Murilo Fossa Vicentini <muvic@...ux.ibm.com>,
David Dai <zdai@...ux.vnet.ibm.com>
Cc: linuxppc-dev@...ts.ozlabs.org, linux-kernel@...r.kernel.org
Subject: Re: [PATCH v1 01/10] powerpc/pseries/iommu: Replace hard-coded page
shift
On 18/08/2020 09:40, Leonardo Bras wrote:
> Some functions assume IOMMU page size can only be 4K (pageshift == 12).
> Update them to accept any page size passed, so we can use 64K pages.
>
> In the process, some defines like TCE_SHIFT were made obsolete, and then
> removed. TCE_RPN_MASK was updated to generate a mask according to
> the pageshift used.
>
> Most places had a tbl struct, so using tbl->it_page_shift was simple.
> tce_free_pSeriesLP() was a special case, since callers not always have a
> tbl struct, so adding a tceshift parameter seems the right thing to do.
>
> Signed-off-by: Leonardo Bras <leobras.c@...il.com>
> ---
> arch/powerpc/include/asm/tce.h | 10 ++----
> arch/powerpc/platforms/pseries/iommu.c | 42 ++++++++++++++++----------
> 2 files changed, 28 insertions(+), 24 deletions(-)
>
> diff --git a/arch/powerpc/include/asm/tce.h b/arch/powerpc/include/asm/tce.h
> index db5fc2f2262d..971cba2d87cc 100644
> --- a/arch/powerpc/include/asm/tce.h
> +++ b/arch/powerpc/include/asm/tce.h
> @@ -19,15 +19,9 @@
> #define TCE_VB 0
> #define TCE_PCI 1
>
> -/* TCE page size is 4096 bytes (1 << 12) */
> -
> -#define TCE_SHIFT 12
> -#define TCE_PAGE_SIZE (1 << TCE_SHIFT)
> -
> #define TCE_ENTRY_SIZE 8 /* each TCE is 64 bits */
> -
> -#define TCE_RPN_MASK 0xfffffffffful /* 40-bit RPN (4K pages) */
> -#define TCE_RPN_SHIFT 12
> +#define TCE_RPN_BITS 52 /* Bits 0-51 represent RPN on TCE */
Ditch this one and use MAX_PHYSMEM_BITS instead? I am pretty sure this
is the actual limit.
> +#define TCE_RPN_MASK(ps) ((1ul << (TCE_RPN_BITS - (ps))) - 1)
> #define TCE_VALID 0x800 /* TCE valid */
> #define TCE_ALLIO 0x400 /* TCE valid for all lpars */
> #define TCE_PCI_WRITE 0x2 /* write from PCI allowed */
> diff --git a/arch/powerpc/platforms/pseries/iommu.c b/arch/powerpc/platforms/pseries/iommu.c
> index e4198700ed1a..8fe23b7dff3a 100644
> --- a/arch/powerpc/platforms/pseries/iommu.c
> +++ b/arch/powerpc/platforms/pseries/iommu.c
> @@ -107,6 +107,9 @@ static int tce_build_pSeries(struct iommu_table *tbl, long index,
> u64 proto_tce;
> __be64 *tcep;
> u64 rpn;
> + const unsigned long tceshift = tbl->it_page_shift;
> + const unsigned long pagesize = IOMMU_PAGE_SIZE(tbl);
> + const u64 rpn_mask = TCE_RPN_MASK(tceshift);
Using IOMMU_PAGE_SIZE macro for the page size and not using
IOMMU_PAGE_MASK for the mask - this incosistency makes my small brain
explode :) I understand the history but maaaaan... Oh well, ok.
Good, otherwise. Thanks,
>
> proto_tce = TCE_PCI_READ; // Read allowed
>
> @@ -117,10 +120,10 @@ static int tce_build_pSeries(struct iommu_table *tbl, long index,
>
> while (npages--) {
> /* can't move this out since we might cross MEMBLOCK boundary */
> - rpn = __pa(uaddr) >> TCE_SHIFT;
> - *tcep = cpu_to_be64(proto_tce | (rpn & TCE_RPN_MASK) << TCE_RPN_SHIFT);
> + rpn = __pa(uaddr) >> tceshift;
> + *tcep = cpu_to_be64(proto_tce | (rpn & rpn_mask) << tceshift);
>
> - uaddr += TCE_PAGE_SIZE;
> + uaddr += pagesize;
> tcep++;
> }
> return 0;
> @@ -146,7 +149,7 @@ static unsigned long tce_get_pseries(struct iommu_table *tbl, long index)
> return be64_to_cpu(*tcep);
> }
>
> -static void tce_free_pSeriesLP(unsigned long liobn, long, long);
> +static void tce_free_pSeriesLP(unsigned long liobn, long, long, long);
> static void tce_freemulti_pSeriesLP(struct iommu_table*, long, long);
>
> static int tce_build_pSeriesLP(unsigned long liobn, long tcenum, long tceshift,
> @@ -159,6 +162,7 @@ static int tce_build_pSeriesLP(unsigned long liobn, long tcenum, long tceshift,
> u64 rpn;
> int ret = 0;
> long tcenum_start = tcenum, npages_start = npages;
> + const u64 rpn_mask = TCE_RPN_MASK(tceshift);
>
> rpn = __pa(uaddr) >> tceshift;
> proto_tce = TCE_PCI_READ;
> @@ -166,12 +170,12 @@ static int tce_build_pSeriesLP(unsigned long liobn, long tcenum, long tceshift,
> proto_tce |= TCE_PCI_WRITE;
>
> while (npages--) {
> - tce = proto_tce | (rpn & TCE_RPN_MASK) << tceshift;
> + tce = proto_tce | (rpn & rpn_mask) << tceshift;
> rc = plpar_tce_put((u64)liobn, (u64)tcenum << tceshift, tce);
>
> if (unlikely(rc == H_NOT_ENOUGH_RESOURCES)) {
> ret = (int)rc;
> - tce_free_pSeriesLP(liobn, tcenum_start,
> + tce_free_pSeriesLP(liobn, tcenum_start, tceshift,
> (npages_start - (npages + 1)));
> break;
> }
> @@ -205,10 +209,12 @@ static int tce_buildmulti_pSeriesLP(struct iommu_table *tbl, long tcenum,
> long tcenum_start = tcenum, npages_start = npages;
> int ret = 0;
> unsigned long flags;
> + const unsigned long tceshift = tbl->it_page_shift;
> + const u64 rpn_mask = TCE_RPN_MASK(tceshift);
>
> if ((npages == 1) || !firmware_has_feature(FW_FEATURE_PUT_TCE_IND)) {
> return tce_build_pSeriesLP(tbl->it_index, tcenum,
> - tbl->it_page_shift, npages, uaddr,
> + tceshift, npages, uaddr,
> direction, attrs);
> }
>
> @@ -225,13 +231,13 @@ static int tce_buildmulti_pSeriesLP(struct iommu_table *tbl, long tcenum,
> if (!tcep) {
> local_irq_restore(flags);
> return tce_build_pSeriesLP(tbl->it_index, tcenum,
> - tbl->it_page_shift,
> + tceshift,
> npages, uaddr, direction, attrs);
> }
> __this_cpu_write(tce_page, tcep);
> }
>
> - rpn = __pa(uaddr) >> TCE_SHIFT;
> + rpn = __pa(uaddr) >> tceshift;
> proto_tce = TCE_PCI_READ;
> if (direction != DMA_TO_DEVICE)
> proto_tce |= TCE_PCI_WRITE;
> @@ -245,12 +251,12 @@ static int tce_buildmulti_pSeriesLP(struct iommu_table *tbl, long tcenum,
> limit = min_t(long, npages, 4096/TCE_ENTRY_SIZE);
>
> for (l = 0; l < limit; l++) {
> - tcep[l] = cpu_to_be64(proto_tce | (rpn & TCE_RPN_MASK) << TCE_RPN_SHIFT);
> + tcep[l] = cpu_to_be64(proto_tce | (rpn & rpn_mask) << tceshift);
> rpn++;
> }
>
> rc = plpar_tce_put_indirect((u64)tbl->it_index,
> - (u64)tcenum << 12,
> + (u64)tcenum << tceshift,
> (u64)__pa(tcep),
> limit);
>
> @@ -277,12 +283,13 @@ static int tce_buildmulti_pSeriesLP(struct iommu_table *tbl, long tcenum,
> return ret;
> }
>
> -static void tce_free_pSeriesLP(unsigned long liobn, long tcenum, long npages)
> +static void tce_free_pSeriesLP(unsigned long liobn, long tcenum, long tceshift,
> + long npages)
> {
> u64 rc;
>
> while (npages--) {
> - rc = plpar_tce_put((u64)liobn, (u64)tcenum << 12, 0);
> + rc = plpar_tce_put((u64)liobn, (u64)tcenum << tceshift, 0);
>
> if (rc && printk_ratelimit()) {
> printk("tce_free_pSeriesLP: plpar_tce_put failed. rc=%lld\n", rc);
> @@ -301,9 +308,11 @@ static void tce_freemulti_pSeriesLP(struct iommu_table *tbl, long tcenum, long n
> u64 rc;
>
> if (!firmware_has_feature(FW_FEATURE_STUFF_TCE))
> - return tce_free_pSeriesLP(tbl->it_index, tcenum, npages);
> + return tce_free_pSeriesLP(tbl->it_index, tcenum,
> + tbl->it_page_shift, npages);
>
> - rc = plpar_tce_stuff((u64)tbl->it_index, (u64)tcenum << 12, 0, npages);
> + rc = plpar_tce_stuff((u64)tbl->it_index,
> + (u64)tcenum << tbl->it_page_shift, 0, npages);
>
> if (rc && printk_ratelimit()) {
> printk("tce_freemulti_pSeriesLP: plpar_tce_stuff failed\n");
> @@ -319,7 +328,8 @@ static unsigned long tce_get_pSeriesLP(struct iommu_table *tbl, long tcenum)
> u64 rc;
> unsigned long tce_ret;
>
> - rc = plpar_tce_get((u64)tbl->it_index, (u64)tcenum << 12, &tce_ret);
> + rc = plpar_tce_get((u64)tbl->it_index,
> + (u64)tcenum << tbl->it_page_shift, &tce_ret);
>
> if (rc && printk_ratelimit()) {
> printk("tce_get_pSeriesLP: plpar_tce_get failed. rc=%lld\n", rc);
>
--
Alexey
Powered by blists - more mailing lists