[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <CALvZod7MZaE52408O6eGNpGGW77xFTyr56YK0F7qjNH1HX98MQ@mail.gmail.com>
Date: Fri, 17 Jul 2020 19:47:50 -0700
From: Shakeel Butt <shakeelb@...gle.com>
To: SeongJae Park <sjpark@...zon.com>
Cc: Andrew Morton <akpm@...ux-foundation.org>,
SeongJae Park <sjpark@...zon.de>, Jonathan.Cameron@...wei.com,
Andrea Arcangeli <aarcange@...hat.com>, acme@...nel.org,
alexander.shishkin@...ux.intel.com, amit@...nel.org,
benh@...nel.crashing.org, brendan.d.gregg@...il.com,
Brendan Higgins <brendanhiggins@...gle.com>,
Qian Cai <cai@....pw>,
Colin Ian King <colin.king@...onical.com>,
Jonathan Corbet <corbet@....net>,
David Hildenbrand <david@...hat.com>, dwmw@...zon.com,
foersleo@...zon.de, Ian Rogers <irogers@...gle.com>,
jolsa@...hat.com, "Kirill A. Shutemov" <kirill@...temov.name>,
mark.rutland@....com, Mel Gorman <mgorman@...e.de>,
Minchan Kim <minchan@...nel.org>,
Ingo Molnar <mingo@...hat.com>, namhyung@...nel.org,
"Peter Zijlstra (Intel)" <peterz@...radead.org>,
Randy Dunlap <rdunlap@...radead.org>,
Rik van Riel <riel@...riel.com>,
David Rientjes <rientjes@...gle.com>,
Steven Rostedt <rostedt@...dmis.org>, rppt@...nel.org,
sblbir@...zon.com, shuah@...nel.org, sj38.park@...il.com,
snu@...zon.de, Vlastimil Babka <vbabka@...e.cz>,
Vladimir Davydov <vdavydov.dev@...il.com>,
Yang Shi <yang.shi@...ux.alibaba.com>,
Huang Ying <ying.huang@...el.com>, linux-damon@...zon.com,
Linux MM <linux-mm@...ck.org>, linux-doc@...r.kernel.org,
LKML <linux-kernel@...r.kernel.org>
Subject: Re: [PATCH v18 02/14] mm: Introduce Data Access MONitor (DAMON)
On Mon, Jul 13, 2020 at 1:43 AM SeongJae Park <sjpark@...zon.com> wrote:
>
> From: SeongJae Park <sjpark@...zon.de>
>
> DAMON is a data access monitoring framework subsystem for the Linux
> kernel. The core mechanisms of DAMON make it
>
> - accurate (the monitoring output is useful enough for DRAM level
> memory management; It might not appropriate for CPU Cache levels,
> though),
> - light-weight (the monitoring overhead is low enough to be applied
> online), and
> - scalable (the upper-bound of the overhead is in constant range
> regardless of the size of target workloads).
>
> Using this framework, therefore, the kernel's memory management
> mechanisms can make advanced decisions. Experimental memory management
> optimization works that incurring high data accesses monitoring overhead
> could implemented again. In user space, meanwhile, users who have some
> special workloads can write personalized applications for better
> understanding and optimizations of their workloads and systems.
>
> This commit is implementing only the stub for the module load/unload,
> basic data structures, and simple manipulation functions of the
> structures to keep the size of commit small. The core mechanisms of
> DAMON will be implemented one by one by following commits.
>
> Signed-off-by: SeongJae Park <sjpark@...zon.de>
> Reviewed-by: Leonard Foerster <foersleo@...zon.de>
> Reviewed-by: Varad Gautam <vrd@...zon.de>
> ---
> include/linux/damon.h | 63 ++++++++++++++
> mm/Kconfig | 12 +++
> mm/Makefile | 1 +
> mm/damon.c | 188 ++++++++++++++++++++++++++++++++++++++++++
> 4 files changed, 264 insertions(+)
> create mode 100644 include/linux/damon.h
> create mode 100644 mm/damon.c
>
> diff --git a/include/linux/damon.h b/include/linux/damon.h
> new file mode 100644
> index 000000000000..c8f8c1c41a45
> --- /dev/null
> +++ b/include/linux/damon.h
> @@ -0,0 +1,63 @@
> +/* SPDX-License-Identifier: GPL-2.0 */
> +/*
> + * DAMON api
> + *
> + * Copyright 2019-2020 Amazon.com, Inc. or its affiliates.
> + *
> + * Author: SeongJae Park <sjpark@...zon.de>
> + */
> +
> +#ifndef _DAMON_H_
> +#define _DAMON_H_
> +
> +#include <linux/random.h>
> +#include <linux/types.h>
> +
> +/**
> + * struct damon_addr_range - Represents an address region of [@start, @end).
> + * @start: Start address of the region (inclusive).
> + * @end: End address of the region (exclusive).
> + */
> +struct damon_addr_range {
> + unsigned long start;
> + unsigned long end;
> +};
> +
> +/**
> + * struct damon_region - Represents a monitoring target region.
> + * @ar: The address range of the region.
> + * @sampling_addr: Address of the sample for the next access check.
> + * @nr_accesses: Access frequency of this region.
> + * @list: List head for siblings.
> + */
> +struct damon_region {
> + struct damon_addr_range ar;
> + unsigned long sampling_addr;
> + unsigned int nr_accesses;
> + struct list_head list;
> +};
> +
> +/**
> + * struct damon_task - Represents a monitoring target task.
> + * @pid: Process id of the task.
> + * @regions_list: Head of the monitoring target regions of this task.
> + * @list: List head for siblings.
> + *
> + * If the monitoring target address space is task independent (e.g., physical
> + * memory address space monitoring), @pid should be '-1'.
> + */
> +struct damon_task {
> + int pid;
Storing and accessing pid like this is racy. Why not save the "struct
pid" after getting the reference? I am still going over the usage,
maybe storing mm_struct would be an even better choice.
> + struct list_head regions_list;
> + struct list_head list;
> +};
> +
> +/**
> + * struct damon_ctx - Represents a context for each monitoring.
> + * @tasks_list: Head of monitoring target tasks (&damon_task) list.
> + */
> +struct damon_ctx {
> + struct list_head tasks_list; /* 'damon_task' objects */
> +};
> +
> +#endif
> diff --git a/mm/Kconfig b/mm/Kconfig
> index c1acc34c1c35..464e9594dcec 100644
> --- a/mm/Kconfig
> +++ b/mm/Kconfig
> @@ -867,4 +867,16 @@ config ARCH_HAS_HUGEPD
> config MAPPING_DIRTY_HELPERS
> bool
>
> +config DAMON
> + tristate "Data Access Monitor"
> + depends on MMU
> + help
> + This feature allows to monitor access frequency of each memory
> + region. The information can be useful for performance-centric DRAM
> + level memory management.
> +
> + See https://damonitor.github.io/doc/html/latest-damon/index.html for
> + more information.
> + If unsure, say N.
> +
> endmenu
> diff --git a/mm/Makefile b/mm/Makefile
> index fccd3756b25f..230e545b6e07 100644
> --- a/mm/Makefile
> +++ b/mm/Makefile
> @@ -112,3 +112,4 @@ obj-$(CONFIG_MEMFD_CREATE) += memfd.o
> obj-$(CONFIG_MAPPING_DIRTY_HELPERS) += mapping_dirty_helpers.o
> obj-$(CONFIG_PTDUMP_CORE) += ptdump.o
> obj-$(CONFIG_PAGE_REPORTING) += page_reporting.o
> +obj-$(CONFIG_DAMON) += damon.o
> diff --git a/mm/damon.c b/mm/damon.c
> new file mode 100644
> index 000000000000..5ab13b1c15cf
> --- /dev/null
> +++ b/mm/damon.c
> @@ -0,0 +1,188 @@
> +// SPDX-License-Identifier: GPL-2.0
> +/*
> + * Data Access Monitor
> + *
> + * Copyright 2019-2020 Amazon.com, Inc. or its affiliates.
> + *
> + * Author: SeongJae Park <sjpark@...zon.de>
> + *
> + * This file is constructed in below parts.
> + *
> + * - Functions and macros for DAMON data structures
> + * - Functions for the module loading/unloading
> + *
> + * The core parts are not implemented yet.
> + */
> +
> +#define pr_fmt(fmt) "damon: " fmt
> +
> +#include <linux/damon.h>
> +#include <linux/mm.h>
> +#include <linux/module.h>
> +#include <linux/slab.h>
> +
> +/*
> + * Functions and macros for DAMON data structures
> + */
> +
> +#define damon_get_task_struct(t) \
> + (get_pid_task(find_vpid(t->pid), PIDTYPE_PID))
You need at least rcu lock around find_vpid(). Also you need to be
careful about the context. If you accept my previous suggestion then
you just need to do this in the process context which is registering
the pid (no need to worry about the pid namespace).
I am wondering if there should be an interface to register processes
with DAMON using pidfd instead of integer pid.
> +
> +#define damon_next_region(r) \
> + (container_of(r->list.next, struct damon_region, list))
> +
> +#define damon_prev_region(r) \
> + (container_of(r->list.prev, struct damon_region, list))
> +
> +#define damon_for_each_region(r, t) \
> + list_for_each_entry(r, &t->regions_list, list)
> +
> +#define damon_for_each_region_safe(r, next, t) \
> + list_for_each_entry_safe(r, next, &t->regions_list, list)
> +
> +#define damon_for_each_task(t, ctx) \
> + list_for_each_entry(t, &(ctx)->tasks_list, list)
> +
> +#define damon_for_each_task_safe(t, next, ctx) \
> + list_for_each_entry_safe(t, next, &(ctx)->tasks_list, list)
> +
> +/* Get a random number in [l, r) */
> +#define damon_rand(l, r) (l + prandom_u32() % (r - l))
> +
> +/*
> + * Construct a damon_region struct
> + *
> + * Returns the pointer to the new struct if success, or NULL otherwise
> + */
> +static struct damon_region *damon_new_region(unsigned long start,
> + unsigned long end)
> +{
> + struct damon_region *region;
> +
> + region = kmalloc(sizeof(*region), GFP_KERNEL);
> + if (!region)
> + return NULL;
> +
> + region->ar.start = start;
> + region->ar.end = end;
> + region->nr_accesses = 0;
> + INIT_LIST_HEAD(®ion->list);
> +
> + return region;
> +}
> +
> +/*
> + * Add a region between two other regions
> + */
> +static inline void damon_insert_region(struct damon_region *r,
> + struct damon_region *prev, struct damon_region *next)
> +{
> + __list_add(&r->list, &prev->list, &next->list);
> +}
> +
> +static void damon_add_region(struct damon_region *r, struct damon_task *t)
> +{
> + list_add_tail(&r->list, &t->regions_list);
> +}
> +
> +static void damon_del_region(struct damon_region *r)
> +{
> + list_del(&r->list);
> +}
> +
> +static void damon_free_region(struct damon_region *r)
> +{
> + kfree(r);
> +}
> +
> +static void damon_destroy_region(struct damon_region *r)
> +{
> + damon_del_region(r);
> + damon_free_region(r);
> +}
> +
> +/*
> + * Construct a damon_task struct
> + *
> + * Returns the pointer to the new struct if success, or NULL otherwise
> + */
> +static struct damon_task *damon_new_task(int pid)
> +{
> + struct damon_task *t;
> +
> + t = kmalloc(sizeof(*t), GFP_KERNEL);
> + if (!t)
> + return NULL;
> +
> + t->pid = pid;
> + INIT_LIST_HEAD(&t->regions_list);
> +
> + return t;
> +}
> +
> +static void damon_add_task(struct damon_ctx *ctx, struct damon_task *t)
> +{
> + list_add_tail(&t->list, &ctx->tasks_list);
> +}
> +
> +static void damon_del_task(struct damon_task *t)
> +{
> + list_del(&t->list);
> +}
> +
> +static void damon_free_task(struct damon_task *t)
> +{
> + struct damon_region *r, *next;
> +
> + damon_for_each_region_safe(r, next, t)
> + damon_free_region(r);
> + kfree(t);
> +}
> +
> +static void damon_destroy_task(struct damon_task *t)
> +{
> + damon_del_task(t);
> + damon_free_task(t);
> +}
> +
> +static unsigned int nr_damon_tasks(struct damon_ctx *ctx)
> +{
> + struct damon_task *t;
> + unsigned int nr_tasks = 0;
> +
> + damon_for_each_task(t, ctx)
> + nr_tasks++;
> +
> + return nr_tasks;
> +}
> +
> +static unsigned int nr_damon_regions(struct damon_task *t)
> +{
> + struct damon_region *r;
> + unsigned int nr_regions = 0;
> +
> + damon_for_each_region(r, t)
> + nr_regions++;
> +
> + return nr_regions;
> +}
> +
> +/*
> + * Functions for the module loading/unloading
> + */
> +
> +static int __init damon_init(void)
> +{
> + return 0;
> +}
> +
> +static void __exit damon_exit(void)
> +{
> +}
> +
> +module_init(damon_init);
> +module_exit(damon_exit);
> +
> +MODULE_LICENSE("GPL");
> +MODULE_AUTHOR("SeongJae Park <sjpark@...zon.de>");
> +MODULE_DESCRIPTION("DAMON: Data Access MONitor");
> --
> 2.17.1
>
Powered by blists - more mailing lists