[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20230127101720.qh2wramyfyyucxhx@suse.de>
Date: Fri, 27 Jan 2023 10:17:20 +0000
From: Mel Gorman <mgorman@...e.de>
To: Raghavendra K T <raghavendra.kt@....com>
Cc: linux-kernel@...r.kernel.org, linux-mm@...ck.org,
Ingo Molnar <mingo@...hat.com>,
Peter Zijlstra <peterz@...radead.org>,
Andrew Morton <akpm@...ux-foundation.org>,
David Hildenbrand <david@...hat.com>,
Bharata B Rao <bharata@....com>,
Disha Talreja <dishaa.talreja@....com>,
Mike Rapoport <rppt@...nel.org>
Subject: Re: [RFC PATCH V1 1/1] sched/numa: Enhance vma scanning logic
On Wed, Jan 25, 2023 at 12:48:16AM +0530, Raghavendra K T wrote:
> looks like we have to additionally handle numab initialization in
> vm_area_dup() code path. something like below fixed it (copied pasted
> from tty):
>
Yep, it wasn't even boot tested. Better approach is something like this,
still not actually tested
include/linux/mm.h | 9 +++++++++
include/linux/mm_types.h | 7 +++++++
kernel/fork.c | 2 ++
kernel/sched/fair.c | 17 +++++++++++++++++
4 files changed, 35 insertions(+)
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 8f857163ac89..481f90dc1983 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -612,6 +612,14 @@ struct vm_operations_struct {
unsigned long addr);
};
+#ifdef CONFIG_NUMA_BALANCING
+#define vma_numab_init(vma) do { (vma)->numab = NULL; } while (0)
+#define vma_numab_free(vma) do { kfree((vma)->numab); } while (0)
+#else
+static inline void vma_numab_init(struct vm_area_struct *vma) {}
+static inline void vma_numab_free(struct vm_area_struct *vma) {}
+#endif /* CONFIG_NUMA_BALANCING */
+
static inline void vma_init(struct vm_area_struct *vma, struct mm_struct *mm)
{
static const struct vm_operations_struct dummy_vm_ops = {};
@@ -620,6 +628,7 @@ static inline void vma_init(struct vm_area_struct *vma, struct mm_struct *mm)
vma->vm_mm = mm;
vma->vm_ops = &dummy_vm_ops;
INIT_LIST_HEAD(&vma->anon_vma_chain);
+ vma_numab_init(vma);
}
static inline void vma_set_anonymous(struct vm_area_struct *vma)
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index 9757067c3053..43ce363d5124 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -526,6 +526,10 @@ struct anon_vma_name {
char name[];
};
+struct vma_numab {
+ unsigned long next_scan;
+};
+
/*
* This struct describes a virtual memory area. There is one of these
* per VM-area/task. A VM area is any part of the process virtual memory
@@ -593,6 +597,9 @@ struct vm_area_struct {
#endif
#ifdef CONFIG_NUMA
struct mempolicy *vm_policy; /* NUMA policy for the VMA */
+#endif
+#ifdef CONFIG_NUMA_BALANCING
+ struct vma_numab *numab; /* NUMA Balancing state */
#endif
struct vm_userfaultfd_ctx vm_userfaultfd_ctx;
} __randomize_layout;
diff --git a/kernel/fork.c b/kernel/fork.c
index 9f7fe3541897..5a2e8c3cc410 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -474,6 +474,7 @@ struct vm_area_struct *vm_area_dup(struct vm_area_struct *orig)
*/
*new = data_race(*orig);
INIT_LIST_HEAD(&new->anon_vma_chain);
+ vma_numab_init(new);
dup_anon_vma_name(orig, new);
}
return new;
@@ -481,6 +482,7 @@ struct vm_area_struct *vm_area_dup(struct vm_area_struct *orig)
void vm_area_free(struct vm_area_struct *vma)
{
+ vma_numab_free(vma);
free_anon_vma_name(vma);
kmem_cache_free(vm_area_cachep, vma);
}
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index c36aa54ae071..6a1cffdfc76b 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -3027,6 +3027,23 @@ static void task_numa_work(struct callback_head *work)
if (!vma_is_accessible(vma))
continue;
+ /* Initialise new per-VMA NUMAB state. */
+ if (!vma->numab) {
+ vma->numab = kzalloc(sizeof(struct vma_numab), GFP_KERNEL);
+ if (!vma->numab)
+ continue;
+
+ vma->numab->next_scan = now +
+ msecs_to_jiffies(sysctl_numa_balancing_scan_delay);
+ }
+
+ /*
+ * After the first scan is complete, delay the balancing scan
+ * for new VMAs.
+ */
+ if (mm->numa_scan_seq && time_before(jiffies, vma->numab->next_scan))
+ continue;
+
do {
start = max(start, vma->vm_start);
end = ALIGN(start + (pages << PAGE_SHIFT), HPAGE_SIZE);
Powered by blists - more mailing lists