[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20221017083203.3690346-4-yeyunfeng@huawei.com>
Date: Mon, 17 Oct 2022 16:32:01 +0800
From: Yunfeng Ye <yeyunfeng@...wei.com>
To: <catalin.marinas@....com>, <will@...nel.org>,
<wangkefeng.wang@...wei.com>,
<linux-arm-kernel@...ts.infradead.org>,
<linux-kernel@...r.kernel.org>, <yeyunfeng@...wei.com>
CC: <linfeilong@...wei.com>
Subject: [PATCH 3/5] arm64: mm: Use cpumask in flush_context()
Currently, all CPUs are selected to flush TLB in flush_context().
In order to prepare for flushing only part of the CPUs TLB, we use
asid_housekeeping_mask and use cpumask_or() instead of cpumask_setall().
Signed-off-by: Yunfeng Ye <yeyunfeng@...wei.com>
---
arch/arm64/mm/context.c | 14 ++++++++++++--
1 file changed, 12 insertions(+), 2 deletions(-)
diff --git a/arch/arm64/mm/context.c b/arch/arm64/mm/context.c
index 380c7b05c36b..e402997aa1c2 100644
--- a/arch/arm64/mm/context.c
+++ b/arch/arm64/mm/context.c
@@ -11,6 +11,7 @@
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/mm.h>
+#include <linux/cpumask.h>
#include <asm/cpufeature.h>
#include <asm/mmu_context.h>
@@ -32,6 +33,7 @@ static unsigned long *asid_map;
static DEFINE_PER_CPU(atomic64_t, active_asids);
static DEFINE_PER_CPU(u64, reserved_asids);
static cpumask_t tlb_flush_pending;
+static const struct cpumask *asid_housekeeping_mask;
static struct asid_bitmap pinned_asid;
@@ -129,17 +131,23 @@ static inline bool asid_gen_match(u64 asid)
return asid_match(asid, asid_read_generation());
}
+static const struct cpumask *flush_cpumask(void)
+{
+ return asid_housekeeping_mask;
+}
+
static void flush_context(void)
{
int i;
u64 asid;
+ const struct cpumask *cpumask = flush_cpumask();
flush_generation();
/* Update the list of reserved ASIDs and the ASID bitmap. */
set_reserved_asid_bits();
- for_each_possible_cpu(i) {
+ for_each_cpu(i, cpumask) {
asid = atomic64_xchg_relaxed(&per_cpu(active_asids, i), 0);
/*
* If this CPU has already been through a
@@ -158,7 +166,7 @@ static void flush_context(void)
* Queue a TLB invalidation for each CPU to perform on next
* context-switch
*/
- cpumask_setall(&tlb_flush_pending);
+ cpumask_or(&tlb_flush_pending, &tlb_flush_pending, cpumask);
}
static bool check_update_reserved_asid(u64 asid, u64 newasid)
@@ -439,6 +447,8 @@ static int asids_init(void)
pinned_asid.map = bitmap_zalloc(NUM_USER_ASIDS, GFP_KERNEL);
pinned_asid.nr = 0;
+ asid_housekeeping_mask = cpu_possible_mask;
+
/*
* We cannot call set_reserved_asid_bits() here because CPU
* caps are not finalized yet, so it is safer to assume KPTI
--
2.27.0
Powered by blists - more mailing lists