lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <99bb1797-06ba-440d-b173-db62d5f54e08@arm.com>
Date: Fri, 19 Dec 2025 12:34:49 +0000
From: Robin Murphy <robin.murphy@....com>
To: Barry Song <21cnbao@...il.com>, catalin.marinas@....com,
 m.szyprowski@...sung.com, will@...nel.org
Cc: ada.coupriediaz@....com, anshuman.khandual@....com, ardb@...nel.org,
 iommu@...ts.linux.dev, linux-arm-kernel@...ts.infradead.org,
 linux-kernel@...r.kernel.org, maz@...nel.org, ryan.roberts@....com,
 surenb@...gle.com, v-songbaohua@...o.com, zhengtangquan@...o.com
Subject: Re: [PATCH 3/6] arm64: Provide dcache_inval_poc_nosync helper

On 2025-12-19 5:36 am, Barry Song wrote:
> From: Barry Song <v-songbaohua@...o.com>
> 
> dcache_inval_poc_nosync does not wait for the data cache invalidation to
> complete. Later, we defer the synchronization so we can wait for all SG
> entries together.
> 
> Cc: Catalin Marinas <catalin.marinas@....com>
> Cc: Will Deacon <will@...nel.org>
> Cc: Marek Szyprowski <m.szyprowski@...sung.com>
> Cc: Robin Murphy <robin.murphy@....com>
> Cc: Ada Couprie Diaz <ada.coupriediaz@....com>
> Cc: Ard Biesheuvel <ardb@...nel.org>
> Cc: Marc Zyngier <maz@...nel.org>
> Cc: Anshuman Khandual <anshuman.khandual@....com>
> Cc: Ryan Roberts <ryan.roberts@....com>
> Cc: Suren Baghdasaryan <surenb@...gle.com>
> Cc: Tangquan Zheng <zhengtangquan@...o.com>
> Signed-off-by: Barry Song <v-songbaohua@...o.com>
> ---
>   arch/arm64/include/asm/cacheflush.h |  1 +
>   arch/arm64/mm/cache.S               | 43 +++++++++++++++++++++--------
>   2 files changed, 33 insertions(+), 11 deletions(-)
> 
> diff --git a/arch/arm64/include/asm/cacheflush.h b/arch/arm64/include/asm/cacheflush.h
> index 9b6d0a62cf3d..382b4ac3734d 100644
> --- a/arch/arm64/include/asm/cacheflush.h
> +++ b/arch/arm64/include/asm/cacheflush.h
> @@ -74,6 +74,7 @@ extern void icache_inval_pou(unsigned long start, unsigned long end);
>   extern void dcache_clean_inval_poc(unsigned long start, unsigned long end);
>   extern void dcache_inval_poc(unsigned long start, unsigned long end);
>   extern void dcache_clean_poc(unsigned long start, unsigned long end);
> +extern void dcache_inval_poc_nosync(unsigned long start, unsigned long end);
>   extern void dcache_clean_poc_nosync(unsigned long start, unsigned long end);
>   extern void dcache_clean_pop(unsigned long start, unsigned long end);
>   extern void dcache_clean_pou(unsigned long start, unsigned long end);
> diff --git a/arch/arm64/mm/cache.S b/arch/arm64/mm/cache.S
> index 4a7c7e03785d..8c1043c9b9e5 100644
> --- a/arch/arm64/mm/cache.S
> +++ b/arch/arm64/mm/cache.S
> @@ -132,17 +132,7 @@ alternative_else_nop_endif
>   	ret
>   SYM_FUNC_END(dcache_clean_pou)
>   
> -/*
> - *	dcache_inval_poc(start, end)
> - *
> - * 	Ensure that any D-cache lines for the interval [start, end)
> - * 	are invalidated. Any partial lines at the ends of the interval are
> - *	also cleaned to PoC to prevent data loss.
> - *
> - *	- start   - kernel start address of region
> - *	- end     - kernel end address of region
> - */
> -SYM_FUNC_START(__pi_dcache_inval_poc)
> +.macro _dcache_inval_poc_impl, do_sync
>   	dcache_line_size x2, x3
>   	sub	x3, x2, #1
>   	tst	x1, x3				// end cache line aligned?
> @@ -158,11 +148,42 @@ SYM_FUNC_START(__pi_dcache_inval_poc)
>   3:	add	x0, x0, x2
>   	cmp	x0, x1
>   	b.lo	2b
> +.if \do_sync
>   	dsb	sy
> +.endif

Similarly, don't bother with complication like this, just put the DSB in 
the one place it needs to be.

Thanks,
Robin.

>   	ret
> +.endm
> +
> +/*
> + *	dcache_inval_poc(start, end)
> + *
> + * 	Ensure that any D-cache lines for the interval [start, end)
> + * 	are invalidated. Any partial lines at the ends of the interval are
> + *	also cleaned to PoC to prevent data loss.
> + *
> + *	- start   - kernel start address of region
> + *	- end     - kernel end address of region
> + */
> +SYM_FUNC_START(__pi_dcache_inval_poc)
> +	_dcache_inval_poc_impl 1
>   SYM_FUNC_END(__pi_dcache_inval_poc)
>   SYM_FUNC_ALIAS(dcache_inval_poc, __pi_dcache_inval_poc)
>   
> +/*
> + *	dcache_inval_poc_nosync(start, end)
> + *
> + * 	Issue the instructions of D-cache lines for the interval [start, end)
> + * 	for invalidation. Not necessarily cleaned to PoC till an explicit dsb
> + *	sy later
> + *
> + *	- start   - kernel start address of region
> + *	- end     - kernel end address of region
> + */
> +SYM_FUNC_START(__pi_dcache_inval_poc_nosync)
> +	_dcache_inval_poc_impl 0
> +SYM_FUNC_END(__pi_dcache_inval_poc_nosync)
> +SYM_FUNC_ALIAS(dcache_inval_poc_nosync, __pi_dcache_inval_poc_nosync)
> +
>   /*
>    *	dcache_clean_poc(start, end)
>    *


Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ