lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [thread-next>] [day] [month] [year] [list]
Message-ID: <20190327100201.32220-1-anup.patel@wdc.com>
Date:   Wed, 27 Mar 2019 10:02:24 +0000
From:   Anup Patel <Anup.Patel@....com>
To:     Palmer Dabbelt <palmer@...ive.com>,
        Albert Ou <aou@...s.berkeley.edu>
CC:     Gary Guo <gary@...yguo.net>, Atish Patra <Atish.Patra@....com>,
        Christoph Hellwig <hch@...radead.org>,
        Paul Walmsley <paul.walmsley@...ive.com>,
        Mike Rapoport <rppt@...ux.ibm.com>,
        "linux-riscv@...ts.infradead.org" <linux-riscv@...ts.infradead.org>,
        "linux-kernel@...r.kernel.org" <linux-kernel@...r.kernel.org>,
        Anup Patel <Anup.Patel@....com>
Subject: [PATCH] RISC-V: Implement ASID allocator

Currently, we do local TLB flush on every MM switch. This is very harsh
on performance because we are forcing page table walks after every MM
switch.

This patch implements ASID allocator for assigning an ASID to every MM
context. The number of ASIDs are limited in HW so we create a logical
entity named CONTEXTID for assigning to MM context. The lower bits of
CONTEXTID are ASID and upper bits are VERSION number. We allocate new
CONTEXTID on first MM switch of a MM context where the ASID is allocated
from an ASID bitmap and VERSION is provide by an atomic counter.

At time of allocating new CONTEXTID, if we run out of ASIDs then:
1. We flush the ASID bitmap
2. Increment VERSION atomic counter
3. Force local tlb flush on all CPUs
4. Re-allocate ASID from ASID bitmap
5. Force CONTEXTID re-assignment on all CPUs

Using above approach, we have virtually infinite CONTEXTIDs on-top-of
limited number of HW ASIDs. This approach is inspired from ASID allocator
used for Linux ARM/ARM64 but we have simplified it as much as possible.

Overall, this ASID allocator helps us reduce rate of local TLB flushes
on every CPU thereby increasing performance. The number of available
ASIDs are detected at boot-time by writing 1s to ASID bits in SATP CSR.
The ASID #0 is always reserved because it is used at boot-time for
initial MM context.

This patch is tested on QEMU/virt machine and SiFive Unleashed board.
On QEMU/virt machine, we see 10% (approx) performance improvement with
SW emulated TLBs and ASIDs provided by QEMU. Unfortunately, ASID bits
of SATP CSR are not implemented on SiFive Unleashed board so we don't
see any change in performance.

Signed-off-by: Anup Patel <anup.patel@....com>
---
This patch is based on Linux-5.1-rc2 and TLB flush cleanup patches v4
from Gary Guo. It can be also found in riscv_asid_allocator_v1 branch
of https://github.com/avpatel/linux.git
---
 arch/riscv/include/asm/csr.h         |   6 +
 arch/riscv/include/asm/mmu.h         |   1 +
 arch/riscv/include/asm/mmu_context.h |   1 +
 arch/riscv/mm/context.c              | 204 +++++++++++++++++++++++++--
 4 files changed, 200 insertions(+), 12 deletions(-)

diff --git a/arch/riscv/include/asm/csr.h b/arch/riscv/include/asm/csr.h
index 28a0d1cb374c..ce18ab8f53ed 100644
--- a/arch/riscv/include/asm/csr.h
+++ b/arch/riscv/include/asm/csr.h
@@ -45,10 +45,16 @@
 #define SATP_PPN     _AC(0x003FFFFF, UL)
 #define SATP_MODE_32 _AC(0x80000000, UL)
 #define SATP_MODE    SATP_MODE_32
+#define SATP_ASID_BITS	9
+#define SATP_ASID_SHIFT	22
+#define SATP_ASID_MASK	_AC(0x1FF, UL)
 #else
 #define SATP_PPN     _AC(0x00000FFFFFFFFFFF, UL)
 #define SATP_MODE_39 _AC(0x8000000000000000, UL)
 #define SATP_MODE    SATP_MODE_39
+#define SATP_ASID_BITS	16
+#define SATP_ASID_SHIFT	44
+#define SATP_ASID_MASK	_AC(0xFFFF, UL)
 #endif

 /* Interrupt Enable and Interrupt Pending flags */
diff --git a/arch/riscv/include/asm/mmu.h b/arch/riscv/include/asm/mmu.h
index 5df2dccdba12..dcbbefb89ebc 100644
--- a/arch/riscv/include/asm/mmu.h
+++ b/arch/riscv/include/asm/mmu.h
@@ -18,6 +18,7 @@
 #ifndef __ASSEMBLY__

 typedef struct {
+	atomic64_t id;
 	void *vdso;
 #ifdef CONFIG_SMP
 	/* A local icache flush is needed before user execution can resume. */
diff --git a/arch/riscv/include/asm/mmu_context.h b/arch/riscv/include/asm/mmu_context.h
index bf4f097a9051..785dd65aa904 100644
--- a/arch/riscv/include/asm/mmu_context.h
+++ b/arch/riscv/include/asm/mmu_context.h
@@ -30,6 +30,7 @@ static inline void enter_lazy_tlb(struct mm_struct *mm,
 static inline int init_new_context(struct task_struct *task,
 	struct mm_struct *mm)
 {
+	atomic64_set(&(mm)->context.id, 0);
 	return 0;
 }

diff --git a/arch/riscv/mm/context.c b/arch/riscv/mm/context.c
index 0f787bcd3a7a..aa43f6aa727e 100644
--- a/arch/riscv/mm/context.c
+++ b/arch/riscv/mm/context.c
@@ -2,13 +2,158 @@
 /*
  * Copyright (C) 2012 Regents of the University of California
  * Copyright (C) 2017 SiFive
+ * Copyright (C) 2019 Western Digital Corporation or its affiliates.
  */

+#include <linux/bitops.h>
 #include <linux/mm.h>
+#include <linux/slab.h>

 #include <asm/tlbflush.h>
 #include <asm/cacheflush.h>

+static bool use_asid_allocator;
+static unsigned long asid_bits;
+static unsigned long num_asids;
+static unsigned long asid_mask;
+static u64 first_version;
+
+static DEFINE_RAW_SPINLOCK(context_lock);
+static atomic64_t context_version;
+static unsigned long *context_asid_map;
+static cpumask_t context_tlb_flush_pending;
+
+static DEFINE_PER_CPU(atomic64_t, active_context);
+
+/* Note: must be called with context_lock held */
+static void __flush_context(void)
+{
+	int i;
+	u64 cntx, cntx_asid, cntx_ver;
+
+	/* Update the list of reserved ASIDs and the ASID bitmap. */
+	bitmap_clear(context_asid_map, 0, num_asids);
+
+	/* Mark already acitve ASIDs as used */
+	for_each_possible_cpu(i) {
+		cntx = atomic64_xchg_relaxed(&per_cpu(active_context, i), 0);
+
+		cntx_asid = cntx & asid_mask;
+		cntx_ver = cntx >> asid_bits;
+
+		if (cntx_ver)
+			__set_bit(cntx_asid, context_asid_map);
+	}
+
+	/* Mark ASID #0 as used because it is used at boot-time */
+	__set_bit(0, context_asid_map);
+
+	/* Queue a TLB invalidation for each CPU on next context-switch */
+	cpumask_setall(&context_tlb_flush_pending);
+}
+
+/* Note: must be called with context_lock held */
+static u64 __new_context(struct mm_struct *mm)
+{
+	static u32 cur_idx = 1;
+	u64 asid, ver = atomic64_read(&context_version);
+
+	/*
+	 * Allocate a free ASID. If we can't find one then increment
+	 * context_version and flush all ASIDs.
+	 */
+	asid = find_next_zero_bit(context_asid_map, num_asids, cur_idx);
+	if (asid != num_asids)
+		goto set_asid;
+
+	/* We're out of ASIDs, so increment the global version count */
+	ver = atomic64_add_return_relaxed(first_version,
+					  &context_version);
+
+	__flush_context();
+
+	/* We have more ASIDs than CPUs, so this will always succeed */
+	asid = find_next_zero_bit(context_asid_map, num_asids, 1);
+
+set_asid:
+	__set_bit(asid, context_asid_map);
+	cur_idx = asid;
+	return asid | ver;
+}
+
+static void set_mm_asid(struct mm_struct *mm, unsigned int cpu)
+{
+	unsigned long flags;
+	u64 cntx, old_active_cntx;
+
+	cntx = atomic64_read(&mm->context.id);
+
+	/*
+	 * If our active_context is non-zero and the context matches the
+	 * current version, then we update the active_context entry with a
+	 * relaxed cmpxchg.
+	 *
+	 * Following is how we handle racing with a concurrent rollover:
+	 *
+	 * - We get a zero back from the cmpxchg and end up waiting on the
+	 *   lock. Taking the lock synchronises with the rollover and so
+	 *   we are forced to see the updated verion.
+	 *
+	 * - We get a valid context back from the cmpxchg then we continue
+	 *   using old ASID because __flush_context() would have marked ASID
+	 *   of active_context as used and next context switch we will allocate
+	 *   new context.
+	 */
+	old_active_cntx = atomic64_read(&per_cpu(active_context, cpu));
+	if (old_active_cntx &&
+	    !((cntx ^ atomic64_read(&context_version)) >> asid_bits) &&
+	    atomic64_cmpxchg_relaxed(&per_cpu(active_context, cpu),
+				     old_active_cntx, cntx))
+		goto switch_mm_fast;
+
+	raw_spin_lock_irqsave(&context_lock, flags);
+
+	/* Check that our ASID belongs to the current version. */
+	cntx = atomic64_read(&mm->context.id);
+	if ((cntx ^ atomic64_read(&context_version)) >> asid_bits) {
+		cntx = __new_context(mm);
+		atomic64_set(&mm->context.id, cntx);
+	}
+
+	if (cpumask_test_and_clear_cpu(cpu, &context_tlb_flush_pending))
+		local_flush_tlb_all();
+
+	atomic64_set(&per_cpu(active_context, cpu), cntx);
+
+	raw_spin_unlock_irqrestore(&context_lock, flags);
+
+switch_mm_fast:
+	/*
+	 * Use the old spbtr name instead of using the current satp
+	 * name to support binutils 2.29 which doesn't know about the
+	 * privileged ISA 1.10 yet.
+	 */
+	csr_write(sptbr, virt_to_pfn(mm->pgd) |
+		  ((cntx & asid_mask) << SATP_ASID_SHIFT) | SATP_MODE);
+}
+
+static void set_mm_noasid(struct mm_struct *mm)
+{
+	/*
+	 * Use the old spbtr name instead of using the current satp
+	 * name to support binutils 2.29 which doesn't know about the
+	 * privileged ISA 1.10 yet.
+	 */
+	csr_write(sptbr, virt_to_pfn(mm->pgd) | SATP_MODE);
+
+	/*
+	 * sfence.vma after SATP write. We call it on MM context instead of
+	 * calling local_flush_tlb_all to prevent global mappings from being
+	 * affected.
+	 */
+	local_flush_tlb_mm(mm);
+}
+
 /*
  * When necessary, performs a deferred icache flush for the given MM context,
  * on the local CPU.  RISC-V has no direct mechanism for instruction cache
@@ -58,20 +203,55 @@ void switch_mm(struct mm_struct *prev, struct mm_struct *next,
 	cpumask_clear_cpu(cpu, mm_cpumask(prev));
 	cpumask_set_cpu(cpu, mm_cpumask(next));

-	/*
-	 * Use the old spbtr name instead of using the current satp
-	 * name to support binutils 2.29 which doesn't know about the
-	 * privileged ISA 1.10 yet.
-	 */
-	csr_write(sptbr, virt_to_pfn(next->pgd) | SATP_MODE);
+	if (use_asid_allocator)
+		set_mm_asid(next, cpu);
+	else
+		set_mm_noasid(next);
+
+	flush_icache_deferred(next);
+}
+
+static int asids_init(void)
+{
+	unsigned long old, new;
+
+	/* Figure-out number of ASID bits in HW */
+	old = csr_read(sptbr);
+	new = old | (SATP_ASID_MASK << SATP_ASID_SHIFT);
+	csr_write(sptbr, new);
+	new = (csr_read(sptbr) >> SATP_ASID_SHIFT)  & SATP_ASID_MASK;
+	asid_bits = fls_long(new);
+	csr_write(sptbr, old);
+
+	/* Pre-compute ASID details */
+	num_asids = 1UL << asid_bits;
+	asid_mask = num_asids - 1;
+	first_version = num_asids;

 	/*
-	 * sfence.vma after SATP write. We call it on MM context instead of
-	 * calling local_flush_tlb_all to prevent global mappings from being
-	 * affected.
+	 * Use ASID allocator only if number of HW ASIDs are
+	 * at-least twice more than CPUs
 	 */
-	local_flush_tlb_mm(next);
+	use_asid_allocator =
+		(num_asids <= (2 * num_possible_cpus())) ? false : true;

-	flush_icache_deferred(next);
-}
+	/* Setup ASID allocator if available */
+	if (use_asid_allocator) {
+		atomic64_set(&context_version, first_version);
+
+		context_asid_map = kcalloc(BITS_TO_LONGS(num_asids),
+				   sizeof(*context_asid_map), GFP_KERNEL);
+		if (!context_asid_map)
+			panic("Failed to allocate bitmap for %lu ASIDs\n",
+			      num_asids);

+		__set_bit(0, context_asid_map);
+
+		pr_info("ASID allocator using %lu entries\n", num_asids);
+	} else {
+		pr_info("ASID allocator disabled\n");
+	}
+
+	return 0;
+}
+early_initcall(asids_init);
--
2.17.1

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ