[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20221017083203.3690346-3-yeyunfeng@huawei.com>
Date: Mon, 17 Oct 2022 16:32:00 +0800
From: Yunfeng Ye <yeyunfeng@...wei.com>
To: <catalin.marinas@....com>, <will@...nel.org>,
<wangkefeng.wang@...wei.com>,
<linux-arm-kernel@...ts.infradead.org>,
<linux-kernel@...r.kernel.org>, <yeyunfeng@...wei.com>
CC: <linfeilong@...wei.com>
Subject: [PATCH 2/5] arm64: mm: Extract the processing of asid_generation
To prepare for supporting ASID isolation feature, extract the processing
of asid_generation. it is convenient to modify the asid_generation
centrally.
By the way, It is clearer to put flush_generation() into flush_context().
Signed-off-by: Yunfeng Ye <yeyunfeng@...wei.com>
---
arch/arm64/mm/context.c | 39 ++++++++++++++++++++++++++++++++-------
1 file changed, 32 insertions(+), 7 deletions(-)
diff --git a/arch/arm64/mm/context.c b/arch/arm64/mm/context.c
index 8549b5f30352..380c7b05c36b 100644
--- a/arch/arm64/mm/context.c
+++ b/arch/arm64/mm/context.c
@@ -102,14 +102,40 @@ static void set_reserved_asid_bits(void)
bitmap_clear(asid_map, 0, NUM_USER_ASIDS);
}
-#define asid_gen_match(asid) \
- (!(((asid) ^ atomic64_read(&asid_generation)) >> asid_bits))
+static void asid_generation_init(void)
+{
+ atomic64_set(&asid_generation, ASID_FIRST_VERSION);
+}
+
+static void flush_generation(void)
+{
+ /* We're out of ASIDs, so increment the global generation count */
+ atomic64_add_return_relaxed(ASID_FIRST_VERSION,
+ &asid_generation);
+}
+
+static inline u64 asid_read_generation(void)
+{
+ return atomic64_read(&asid_generation);
+}
+
+static inline bool asid_match(u64 asid, u64 genid)
+{
+ return (!(((asid) ^ (genid)) >> asid_bits));
+}
+
+static inline bool asid_gen_match(u64 asid)
+{
+ return asid_match(asid, asid_read_generation());
+}
static void flush_context(void)
{
int i;
u64 asid;
+ flush_generation();
+
/* Update the list of reserved ASIDs and the ASID bitmap. */
set_reserved_asid_bits();
@@ -163,7 +189,7 @@ static u64 new_context(struct mm_struct *mm)
{
static u32 cur_idx = 1;
u64 asid = atomic64_read(&mm->context.id);
- u64 generation = atomic64_read(&asid_generation);
+ u64 generation = asid_read_generation();
if (asid != 0) {
u64 newasid = asid2ctxid(ctxid2asid(asid), generation);
@@ -202,14 +228,12 @@ static u64 new_context(struct mm_struct *mm)
if (asid != NUM_USER_ASIDS)
goto set_asid;
- /* We're out of ASIDs, so increment the global generation count */
- generation = atomic64_add_return_relaxed(ASID_FIRST_VERSION,
- &asid_generation);
flush_context();
/* We have more ASIDs than CPUs, so this will always succeed */
asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, 1);
+ generation = asid_read_generation();
set_asid:
__set_bit(asid, asid_map);
cur_idx = asid;
@@ -405,7 +429,8 @@ arch_initcall(asids_update_limit);
static int asids_init(void)
{
asid_bits = get_cpu_asid_bits();
- atomic64_set(&asid_generation, ASID_FIRST_VERSION);
+ asid_generation_init();
+
asid_map = bitmap_zalloc(NUM_USER_ASIDS, GFP_KERNEL);
if (!asid_map)
panic("Failed to allocate bitmap for %lu ASIDs\n",
--
2.27.0
Powered by blists - more mailing lists