[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date: Sun, 10 Oct 2010 18:22:11 +0300
From: Felipe Contreras <felipe.contreras@...il.com>
To: Fernando Guzman Lugo <x0095840@...com>
Cc: Hiroshi.DOYU@...ia.com, david.cohen@...ia.com,
felipe.contreras@...ia.com, ameya.palande@...ia.com,
linux-kernel@...r.kernel.org, andy.shevchenko@...il.com,
linux-omap@...r.kernel.org
Subject: Re: [PATCHv2 2/3] iovmm: add superpages support to fixed da address
On Tue, Oct 5, 2010 at 12:02 AM, Fernando Guzman Lugo <x0095840@...com> wrote:
> This patch adds superpages support to fixed ad address
> inside iommu_kmap function.
>
> Signed-off-by: Fernando Guzman Lugo <x0095840@...com>
> ---
> arch/arm/plat-omap/iovmm.c | 61 ++++++++++++++++++++++++++-----------------
> 1 files changed, 37 insertions(+), 24 deletions(-)
>
> diff --git a/arch/arm/plat-omap/iovmm.c b/arch/arm/plat-omap/iovmm.c
> index 34f0012..8006a19 100644
> --- a/arch/arm/plat-omap/iovmm.c
> +++ b/arch/arm/plat-omap/iovmm.c
> @@ -87,27 +87,37 @@ static size_t sgtable_len(const struct sg_table *sgt)
> }
> #define sgtable_ok(x) (!!sgtable_len(x))
>
> +
> +static unsigned max_alignment(u32 addr)
> +{
> + int i;
> + unsigned pagesize[] = { SZ_16M, SZ_1M, SZ_64K, SZ_4K, };
> + for (i = 0; i < ARRAY_SIZE(pagesize) && addr & (pagesize[i] - 1); i++)
> + ;
> + return (i < ARRAY_SIZE(pagesize)) ? pagesize[i] : 0;
> +}
> +
> +
I don't think those extra spaces make sense.
> /*
> * calculate the optimal number sg elements from total bytes based on
> * iommu superpages
> */
> -static unsigned int sgtable_nents(size_t bytes)
> +static unsigned int sgtable_nents(size_t bytes, u32 da, u32 pa)
> {
> - int i;
> - unsigned int nr_entries;
> - const unsigned long pagesize[] = { SZ_16M, SZ_1M, SZ_64K, SZ_4K, };
> + unsigned int nr_entries = 0, ent_sz;
How about s/unsigned int/unsigned/?
>
> if (!IS_ALIGNED(bytes, PAGE_SIZE)) {
> pr_err("%s: wrong size %08x\n", __func__, bytes);
> return 0;
> }
>
> - nr_entries = 0;
> - for (i = 0; i < ARRAY_SIZE(pagesize); i++) {
> - if (bytes >= pagesize[i]) {
> - nr_entries += (bytes / pagesize[i]);
> - bytes %= pagesize[i];
> - }
> + while (bytes) {
> + ent_sz = max_alignment(da | pa);
> + ent_sz = min(ent_sz, (unsigned)iopgsz_max(bytes));
> + nr_entries++;
> + da += ent_sz;
> + pa += ent_sz;
> + bytes -= ent_sz;
> }
> BUG_ON(bytes);
>
> @@ -115,7 +125,8 @@ static unsigned int sgtable_nents(size_t bytes)
> }
>
> /* allocate and initialize sg_table header(a kind of 'superblock') */
> -static struct sg_table *sgtable_alloc(const size_t bytes, u32 flags)
> +static struct sg_table *sgtable_alloc(const size_t bytes, u32 flags,
> + u32 da, u32 pa)
> {
> unsigned int nr_entries;
> int err;
> @@ -127,9 +138,8 @@ static struct sg_table *sgtable_alloc(const size_t bytes, u32 flags)
> if (!IS_ALIGNED(bytes, PAGE_SIZE))
> return ERR_PTR(-EINVAL);
>
> - /* FIXME: IOVMF_DA_FIXED should support 'superpages' */
> - if ((flags & IOVMF_LINEAR) && (flags & IOVMF_DA_ANON)) {
> - nr_entries = sgtable_nents(bytes);
> + if (flags & IOVMF_LINEAR) {
> + nr_entries = sgtable_nents(bytes, da, pa);
> if (!nr_entries)
> return ERR_PTR(-EINVAL);
> } else
> @@ -409,7 +419,8 @@ static inline void sgtable_drain_vmalloc(struct sg_table *sgt)
> BUG_ON(!sgt);
> }
>
> -static void sgtable_fill_kmalloc(struct sg_table *sgt, u32 pa, size_t len)
> +static void sgtable_fill_kmalloc(struct sg_table *sgt, u32 pa, u32 da,
> + size_t len)
> {
> unsigned int i;
> struct scatterlist *sg;
> @@ -420,7 +431,8 @@ static void sgtable_fill_kmalloc(struct sg_table *sgt, u32 pa, size_t len)
> for_each_sg(sgt->sgl, sg, sgt->nents, i) {
> size_t bytes;
>
> - bytes = iopgsz_max(len);
> + bytes = max_alignment(da | pa);
> + bytes = min(bytes, (size_t)iopgsz_max(len));
Why the size_t casting?
Otherwise:
Signed-off-by: Felipe Contreras <felipe.contreras@...il.com>
--
Felipe Contreras
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/
Powered by blists - more mailing lists