lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite for Android: free password hash cracker in your pocket
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <67bf778d592c39d02444825c416c2ed11d2ef4b2.1673610485.git.raghavendra.kt@amd.com>
Date:   Mon, 16 Jan 2023 07:05:34 +0530
From:   Raghavendra K T <raghavendra.kt@....com>
To:     = <linux-kernel@...r.kernel.org>, <linux-mm@...ck.org>,
        --cc=Ingo Molnar <mingo@...hat.com>,
        Peter Zijlstra <peterz@...radead.org>,
        Juri Lelli <juri.lelli@...hat.com>,
        Vincent Guittot <vincent.guittot@...aro.org>,
        Dietmar Eggemann <dietmar.eggemann@....com>,
        Steven Rostedt <rostedt@...dmis.org>,
        Ben Segall <bsegall@...gle.com>, Mel Gorman <mgorman@...e.de>,
        Daniel Bristot de Oliveira <bristot@...hat.com>,
        "Valentin Schneider" <vschneid@...hat.com>,
        Andrew Morton <akpm@...ux-foundation.org>,
        Matthew Wilcox <willy@...radead.org>,
        Vlastimil Babka <vbabka@...e.cz>,
        "Liam R . Howlett" <Liam.Howlett@...cle.com>,
        Peter Xu <peterx@...hat.com>,
        "David Hildenbrand" <david@...hat.com>,
        xu xin <cgel.zte@...il.com>, Yu Zhao <yuzhao@...gle.com>,
        Colin Cross <ccross@...gle.com>, Arnd Bergmann <arnd@...db.de>,
        Hugh Dickins <hughd@...gle.com>,
        Bharata B Rao <bharata@....com>,
        Disha Talreja <dishaa.talreja@....com>
CC:     Raghavendra K T <raghavendra.kt@....com>
Subject: [RFC PATCH V1 1/1] sched/numa: Enhance vma scanning logic

 During the Numa scanning make sure only relevant vmas of the
tasks are scanned.

Logic:
1) For the first two time allow unconditional scanning of vmas
2) Store recent 4 unique tasks (last 8bits of PIDs) accessed the vma.
  False negetives in case of collison should be fine here.
3) If more than 4 pids exist assume task indeed accessed vma to
 to avoid false negetives

Co-developed-by: Bharata B Rao <bharata@....com>
(initial patch to store pid information)

Suggested-by: Mel Gorman <mgorman@...hsingularity.net>
Signed-off-by: Bharata B Rao <bharata@....com>
Signed-off-by: Raghavendra K T <raghavendra.kt@....com>
---
 include/linux/mm_types.h |  2 ++
 kernel/sched/fair.c      | 32 ++++++++++++++++++++++++++++++++
 mm/memory.c              | 21 +++++++++++++++++++++
 3 files changed, 55 insertions(+)

diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index 500e536796ca..07feae37b8e6 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -506,6 +506,8 @@ struct vm_area_struct {
 	struct mempolicy *vm_policy;	/* NUMA policy for the VMA */
 #endif
 	struct vm_userfaultfd_ctx vm_userfaultfd_ctx;
+	unsigned int accessing_pids;
+	int next_pid_slot;
 } __randomize_layout;
 
 struct kioctx_table;
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index e4a0b8bd941c..944d2e3b0b3c 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -2916,6 +2916,35 @@ static void reset_ptenuma_scan(struct task_struct *p)
 	p->mm->numa_scan_offset = 0;
 }
 
+static bool vma_is_accessed(struct vm_area_struct *vma)
+{
+	int i;
+	bool more_pids_exist;
+	unsigned long pid, max_pids;
+	unsigned long current_pid = current->pid & LAST__PID_MASK;
+
+	max_pids = sizeof(unsigned int) * BITS_PER_BYTE / LAST__PID_SHIFT;
+
+	/* By default we assume >= max_pids exist */
+	more_pids_exist = true;
+
+	if (READ_ONCE(current->mm->numa_scan_seq) < 2)
+		return true;
+
+	for (i = 0; i < max_pids; i++) {
+		pid = (vma->accessing_pids >> i * LAST__PID_SHIFT) &
+			LAST__PID_MASK;
+		if (pid == current_pid)
+			return true;
+		if (pid == 0) {
+			more_pids_exist = false;
+			break;
+		}
+	}
+
+	return more_pids_exist;
+}
+
 /*
  * The expensive part of numa migration is done from task_work context.
  * Triggered from task_tick_numa().
@@ -3015,6 +3044,9 @@ static void task_numa_work(struct callback_head *work)
 		if (!vma_is_accessible(vma))
 			continue;
 
+		if (!vma_is_accessed(vma))
+			continue;
+
 		do {
 			start = max(start, vma->vm_start);
 			end = ALIGN(start + (pages << PAGE_SHIFT), HPAGE_SIZE);
diff --git a/mm/memory.c b/mm/memory.c
index 8c8420934d60..fafd78d87a51 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -4717,7 +4717,28 @@ static vm_fault_t do_numa_page(struct vm_fault *vmf)
 	pte_t pte, old_pte;
 	bool was_writable = pte_savedwrite(vmf->orig_pte);
 	int flags = 0;
+	int pid_slot = vma->next_pid_slot;
 
+	int i;
+	unsigned long pid, max_pids;
+	unsigned long current_pid = current->pid & LAST__PID_MASK;
+
+	max_pids = sizeof(unsigned int) * BITS_PER_BYTE / LAST__PID_SHIFT;
+
+	/* Avoid duplicate PID updation */
+	for (i = 0; i < max_pids; i++) {
+		pid = (vma->accessing_pids >> i * LAST__PID_SHIFT) &
+			LAST__PID_MASK;
+		if (pid == current_pid)
+			goto skip_update;
+	}
+
+	vma->next_pid_slot = (++pid_slot) % max_pids;
+	vma->accessing_pids &= ~(LAST__PID_MASK << (pid_slot * LAST__PID_SHIFT));
+	vma->accessing_pids |= ((current_pid) <<
+			(pid_slot * LAST__PID_SHIFT));
+
+skip_update:
 	/*
 	 * The "pte" at this point cannot be used safely without
 	 * validation through pte_unmap_same(). It's of NUMA type but
-- 
2.34.1

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ