[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <ZhAwdtGPuc2-sd7Q@arm.com>
Date: Fri, 5 Apr 2024 18:10:14 +0100
From: Catalin Marinas <catalin.marinas@....com>
To: Gavin Shan <gshan@...hat.com>
Cc: linux-arm-kernel@...ts.infradead.org, linux-kernel@...r.kernel.org,
will@...nel.org, akpm@...ux-foundation.org, maz@...nel.org,
oliver.upton@...ux.dev, ryan.roberts@....com, apopple@...dia.com,
rananta@...gle.com, mark.rutland@....com, v-songbaohua@...o.com,
yangyicong@...ilicon.com, shahuang@...hat.com, yihyu@...hat.com,
shan.gavin@...il.com
Subject: Re: [PATCH v3 1/3] arm64: tlb: Fix TLBI RANGE operand
On Fri, Apr 05, 2024 at 01:58:50PM +1000, Gavin Shan wrote:
> diff --git a/arch/arm64/include/asm/tlbflush.h b/arch/arm64/include/asm/tlbflush.h
> index 3b0e8248e1a4..a75de2665d84 100644
> --- a/arch/arm64/include/asm/tlbflush.h
> +++ b/arch/arm64/include/asm/tlbflush.h
> @@ -161,12 +161,18 @@ static inline unsigned long get_trans_granule(void)
> #define MAX_TLBI_RANGE_PAGES __TLBI_RANGE_PAGES(31, 3)
>
> /*
> - * Generate 'num' values from -1 to 30 with -1 rejected by the
> - * __flush_tlb_range() loop below.
> + * Generate 'num' values from -1 to 31 with -1 rejected by the
> + * __flush_tlb_range() loop below. Its return value is only
> + * significant for a maximum of MAX_TLBI_RANGE_PAGES pages. If
> + * 'pages' is more than that, you must iterate over the overall
> + * range.
> */
> -#define TLBI_RANGE_MASK GENMASK_ULL(4, 0)
> -#define __TLBI_RANGE_NUM(pages, scale) \
> - ((((pages) >> (5 * (scale) + 1)) & TLBI_RANGE_MASK) - 1)
> +#define __TLBI_RANGE_NUM(pages, scale) \
> + ({ \
> + int __pages = min((pages), \
> + __TLBI_RANGE_PAGES(31, (scale))); \
> + (__pages >> (5 * (scale) + 1)) - 1; \
> + })
Reviewed-by: Catalin Marinas <catalin.marinas@....com>
This looks correct to me as well. I spent a bit of time to update an old
CBMC model I had around. With the original __TLBI_RANGE_NUM indeed shows
'scale' becoming negative on the kvm_tlb_flush_vmid_range() path. The
patch above fixes it and it also allows the non-KVM path to use the
range TLBI for MAX_TLBI_RANGE_PAGES (as per patch 3).
FWIW, here's the model:
-----------------------8<--------------------------------------
// SPDX-License-Identifier: GPL-2.0-only
/*
* Check with:
* cbmc --unwind 6 tlbinval.c
*/
#define PAGE_SHIFT (12)
#define PAGE_SIZE (1 << PAGE_SHIFT)
#define VA_RANGE (1UL << 48)
#define SZ_64K 0x00010000
#define __round_mask(x, y) ((__typeof__(x))((y)-1))
#define round_up(x, y) ((((x)-1) | __round_mask(x, y))+1)
#define round_down(x, y) ((x) & ~__round_mask(x, y))
#define min(x, y) (x <= y ? x : y)
#define __ALIGN_KERNEL(x, a) __ALIGN_KERNEL_MASK(x, (__typeof__(x))(a) - 1)
#define __ALIGN_KERNEL_MASK(x, mask) (((x) + (mask)) & ~(mask))
#define ALIGN(x, a) __ALIGN_KERNEL((x), (a))
/* only masking out irrelevant bits */
#define __TLBI_RANGE_VADDR(addr, shift) ((addr) & ~((1UL << shift) - 1))
#define __TLBI_VADDR(addr) __TLBI_RANGE_VADDR(addr, PAGE_SHIFT)
#define __TLBI_RANGE_PAGES(num, scale) ((unsigned long)((num) + 1) << (5 * (scale) + 1))
#define MAX_TLBI_RANGE_PAGES __TLBI_RANGE_PAGES(31, 3)
#if 0
/* original code */
#define TLBI_RANGE_MASK 0x1fUL
#define __TLBI_RANGE_NUM(pages, scale) \
((((int)(pages) >> (5 * (scale) + 1)) & TLBI_RANGE_MASK) - 1)
#else
#define __TLBI_RANGE_NUM(pages, scale) \
({ \
int __pages = min((pages), \
__TLBI_RANGE_PAGES(31, (scale))); \
(__pages >> (5 * (scale) + 1)) - 1; \
})
#endif
const static _Bool lpa2 = 1;
const static _Bool kvm = 1;
static unsigned long inval_start;
static unsigned long inval_end;
static void tlbi(unsigned long start, unsigned long size)
{
unsigned long end = start + size;
if (inval_end == 0) {
inval_start = start;
inval_end = end;
return;
}
/* optimal invalidation */
__CPROVER_assert(start >= inval_end || end <= inval_start, "No overlapping TLBI range");
if (start < inval_start) {
__CPROVER_assert(end >= inval_start, "No TLBI range gaps");
inval_start = start;
}
if (end > inval_end) {
__CPROVER_assert(start <= inval_end, "No TLBI range gaps");
inval_end = end;
}
}
static void tlbi_range(unsigned long start, int num, int scale)
{
unsigned long size = __TLBI_RANGE_PAGES(num, scale) << PAGE_SHIFT;
tlbi(start, size);
}
static void __flush_tlb_range_op(unsigned long start, unsigned long pages,
unsigned long stride)
{
int num = 0;
int scale = 3;
int shift = lpa2 ? 16 : PAGE_SHIFT;
unsigned long addr;
while (pages > 0) {
if (pages == 1 ||
(lpa2 && start != ALIGN(start, SZ_64K))) {
addr = __TLBI_VADDR(start);
tlbi(addr, stride);
start += stride;
pages -= stride >> PAGE_SHIFT;
continue;
}
__CPROVER_assert(scale >= 0 && scale <= 3, "Scale in range");
num = __TLBI_RANGE_NUM(pages, scale);
__CPROVER_assert(num <= 31, "Num in range");
if (num >= 0) {
addr = __TLBI_RANGE_VADDR(start, shift);
tlbi_range(addr, num, scale);
start += __TLBI_RANGE_PAGES(num, scale) << PAGE_SHIFT;
pages -= __TLBI_RANGE_PAGES(num, scale);
}
scale--;
}
}
static void __flush_tlb_range(unsigned long start, unsigned long pages,
unsigned long stride)
{
if (pages > MAX_TLBI_RANGE_PAGES) {
tlbi(0, VA_RANGE);
return;
}
__flush_tlb_range_op(start, pages, stride);
}
void __kvm_tlb_flush_vmid_range(unsigned long start, unsigned long pages)
{
unsigned long stride;
stride = PAGE_SIZE;
start = round_down(start, stride);
__flush_tlb_range_op(start, pages, stride);
}
static void kvm_tlb_flush_vmid_range(unsigned long addr, unsigned long size)
{
unsigned long pages, inval_pages;
pages = size >> PAGE_SHIFT;
while (pages > 0) {
inval_pages = min(pages, MAX_TLBI_RANGE_PAGES);
__kvm_tlb_flush_vmid_range(addr, inval_pages);
addr += inval_pages << PAGE_SHIFT;
pages -= inval_pages;
}
}
static unsigned long nondet_ulong(void);
int main(void)
{
unsigned long stride = nondet_ulong();
unsigned long start = round_down(nondet_ulong(), stride);
unsigned long end = round_up(nondet_ulong(), stride);
unsigned long pages = (end - start) >> PAGE_SHIFT;
__CPROVER_assume(stride == PAGE_SIZE ||
stride == PAGE_SIZE << (PAGE_SHIFT - 3) ||
stride == PAGE_SIZE << (2 * (PAGE_SHIFT - 3)));
__CPROVER_assume(start < end);
__CPROVER_assume(end <= VA_RANGE);
if (kvm)
kvm_tlb_flush_vmid_range(start, pages << PAGE_SHIFT);
else
__flush_tlb_range(start, pages, stride);
__CPROVER_assert((inval_start == 0 && inval_end == VA_RANGE) ||
(inval_start == start && inval_end == end),
"Correct invalidation");
return 0;
}
Powered by blists - more mailing lists