lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite for Android: free password hash cracker in your pocket
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <c596779c-7664-f860-80c0-357e5e074801@ozlabs.ru>
Date:   Mon, 10 May 2021 17:34:07 +1000
From:   Alexey Kardashevskiy <aik@...abs.ru>
To:     Leonardo Bras <leobras.c@...il.com>,
        Michael Ellerman <mpe@...erman.id.au>,
        Benjamin Herrenschmidt <benh@...nel.crashing.org>,
        Paul Mackerras <paulus@...ba.org>,
        Joel Stanley <joel@....id.au>,
        Christophe Leroy <christophe.leroy@....fr>,
        Nicolin Chen <nicoleotsuka@...il.com>,
        Niklas Schnelle <schnelle@...ux.ibm.com>
Cc:     linuxppc-dev@...ts.ozlabs.org, linux-kernel@...r.kernel.org
Subject: Re: [PATCH v4 02/11] powerpc/kernel/iommu: Add new
 iommu_table_in_use() helper



On 5/1/21 02:31, Leonardo Bras wrote:
> Having a function to check if the iommu table has any allocation helps
> deciding if a tbl can be reset for using a new DMA window.
> 
> It should be enough to replace all instances of !bitmap_empty(tbl...).
> 
> iommu_table_in_use() skips reserved memory, so we don't need to worry about
> releasing it before testing. This causes iommu_table_release_pages() to
> become unnecessary, given it is only used to remove reserved memory for
> testing.
> 
> Also, only allow storing reserved memory values in tbl if they are valid
> in the table, so there is no need to check it in the new helper.
> 
> Signed-off-by: Leonardo Bras <leobras.c@...il.com>
> ---
>   arch/powerpc/include/asm/iommu.h |  1 +
>   arch/powerpc/kernel/iommu.c      | 65 ++++++++++++++++----------------
>   2 files changed, 34 insertions(+), 32 deletions(-)
> 
> diff --git a/arch/powerpc/include/asm/iommu.h b/arch/powerpc/include/asm/iommu.h
> index deef7c94d7b6..bf3b84128525 100644
> --- a/arch/powerpc/include/asm/iommu.h
> +++ b/arch/powerpc/include/asm/iommu.h
> @@ -154,6 +154,7 @@ extern int iommu_tce_table_put(struct iommu_table *tbl);
>    */
>   extern struct iommu_table *iommu_init_table(struct iommu_table *tbl,
>   		int nid, unsigned long res_start, unsigned long res_end);
> +bool iommu_table_in_use(struct iommu_table *tbl);
>   
>   #define IOMMU_TABLE_GROUP_MAX_TABLES	2
>   
> diff --git a/arch/powerpc/kernel/iommu.c b/arch/powerpc/kernel/iommu.c
> index ad82dda81640..5e168bd91401 100644
> --- a/arch/powerpc/kernel/iommu.c
> +++ b/arch/powerpc/kernel/iommu.c
> @@ -691,32 +691,24 @@ static void iommu_table_reserve_pages(struct iommu_table *tbl,
>   	if (tbl->it_offset == 0)
>   		set_bit(0, tbl->it_map);
>   
> -	tbl->it_reserved_start = res_start;
> -	tbl->it_reserved_end = res_end;
> -
> -	/* Check if res_start..res_end isn't empty and overlaps the table */
> -	if (res_start && res_end &&
> -			(tbl->it_offset + tbl->it_size < res_start ||
> -			 res_end < tbl->it_offset))
> -		return;
> +	if (res_start < tbl->it_offset)
> +		res_start = tbl->it_offset;
>   
> -	for (i = tbl->it_reserved_start; i < tbl->it_reserved_end; ++i)
> -		set_bit(i - tbl->it_offset, tbl->it_map);
> -}
> +	if (res_end > (tbl->it_offset + tbl->it_size))
> +		res_end = tbl->it_offset + tbl->it_size;
>   
> -static void iommu_table_release_pages(struct iommu_table *tbl)
> -{
> -	int i;
> +	/* Check if res_start..res_end is a valid range in the table */
> +	if (res_start >= res_end) {
> +		tbl->it_reserved_start = tbl->it_offset;
> +		tbl->it_reserved_end = tbl->it_offset;
> +		return;
> +	}
>   
> -	/*
> -	 * In case we have reserved the first bit, we should not emit
> -	 * the warning below.
> -	 */
> -	if (tbl->it_offset == 0)
> -		clear_bit(0, tbl->it_map);
> +	tbl->it_reserved_start = res_start;
> +	tbl->it_reserved_end = res_end;
>   
>   	for (i = tbl->it_reserved_start; i < tbl->it_reserved_end; ++i)
> -		clear_bit(i - tbl->it_offset, tbl->it_map);
> +		set_bit(i - tbl->it_offset, tbl->it_map);


git produced a messy chunk here. The new logic is:


static void iommu_table_reserve_pages(struct iommu_table *tbl,
		unsigned long res_start, unsigned long res_end)
{
	int i;

	WARN_ON_ONCE(res_end < res_start);
	/*
	 * Reserve page 0 so it will not be used for any mappings.
	 * This avoids buggy drivers that consider page 0 to be invalid
	 * to crash the machine or even lose data.
	 */
	if (tbl->it_offset == 0)
		set_bit(0, tbl->it_map);

	if (res_start < tbl->it_offset)
		res_start = tbl->it_offset;

	if (res_end > (tbl->it_offset + tbl->it_size))
		res_end = tbl->it_offset + tbl->it_size;

	/* Check if res_start..res_end is a valid range in the table */
	if (res_start >= res_end) {
		tbl->it_reserved_start = tbl->it_offset;
		tbl->it_reserved_end = tbl->it_offset;
		return;
	}


It is just hard to read. A code reviewer would assume res_end >= 
res_start (as there is WARN_ON) but later we allow res_end to be lesser 
than res_start.

but may be it is just me :)
Otherwise looks good.

Reviewed-by: Alexey Kardashevskiy <aik@...abs.ru>


>   }
>   
>   /*
> @@ -781,6 +773,22 @@ struct iommu_table *iommu_init_table(struct iommu_table *tbl, int nid,
>   	return tbl;
>   }
>   
> +bool iommu_table_in_use(struct iommu_table *tbl)
> +{
> +	unsigned long start = 0, end;
> +
> +	/* ignore reserved bit0 */
> +	if (tbl->it_offset == 0)
> +		start = 1;
> +	end = tbl->it_reserved_start - tbl->it_offset;
> +	if (find_next_bit(tbl->it_map, end, start) != end)
> +		return true;
> +
> +	start = tbl->it_reserved_end - tbl->it_offset;
> +	end = tbl->it_size;
> +	return find_next_bit(tbl->it_map, end, start) != end;
> +}
> +
>   static void iommu_table_free(struct kref *kref)
>   {
>   	unsigned long bitmap_sz;
> @@ -799,10 +807,8 @@ static void iommu_table_free(struct kref *kref)
>   
>   	iommu_debugfs_del(tbl);
>   
> -	iommu_table_release_pages(tbl);
> -
>   	/* verify that table contains no entries */
> -	if (!bitmap_empty(tbl->it_map, tbl->it_size))
> +	if (iommu_table_in_use(tbl))
>   		pr_warn("%s: Unexpected TCEs\n", __func__);
>   
>   	/* calculate bitmap size in bytes */
> @@ -1108,18 +1114,13 @@ int iommu_take_ownership(struct iommu_table *tbl)
>   	for (i = 0; i < tbl->nr_pools; i++)
>   		spin_lock(&tbl->pools[i].lock);
>   
> -	iommu_table_release_pages(tbl);
> -
> -	if (!bitmap_empty(tbl->it_map, tbl->it_size)) {
> +	if (iommu_table_in_use(tbl)) {
>   		pr_err("iommu_tce: it_map is not empty");
>   		ret = -EBUSY;
> -		/* Undo iommu_table_release_pages, i.e. restore bit#0, etc */
> -		iommu_table_reserve_pages(tbl, tbl->it_reserved_start,
> -				tbl->it_reserved_end);
> -	} else {
> -		memset(tbl->it_map, 0xff, sz);
>   	}
>   
> +	memset(tbl->it_map, 0xff, sz);
> +
>   	for (i = 0; i < tbl->nr_pools; i++)
>   		spin_unlock(&tbl->pools[i].lock);
>   	spin_unlock_irqrestore(&tbl->large_pool.lock, flags);
> 

-- 
Alexey

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ