[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20210608115254.11930-4-sj38.park@gmail.com>
Date: Tue, 8 Jun 2021 11:52:43 +0000
From: SeongJae Park <sj38.park@...il.com>
To: akpm@...ux-foundation.org
Cc: SeongJae Park <sjpark@...zon.de>, Jonathan.Cameron@...wei.com,
acme@...nel.org, alexander.shishkin@...ux.intel.com,
amit@...nel.org, benh@...nel.crashing.org,
brendanhiggins@...gle.com, corbet@....net, david@...hat.com,
dwmw@...zon.com, elver@...gle.com, fan.du@...el.com,
foersleo@...zon.de, greg@...ah.com, gthelen@...gle.com,
guoju.fgj@...baba-inc.com, jgowans@...zon.com, mgorman@...e.de,
minchan@...nel.org, mingo@...hat.com, namhyung@...nel.org,
peterz@...radead.org, riel@...riel.com, rientjes@...gle.com,
rostedt@...dmis.org, rppt@...nel.org, shakeelb@...gle.com,
shuah@...nel.org, sj38.park@...il.com, snu@...le79.org,
vbabka@...e.cz, vdavydov.dev@...il.com, zgf574564920@...il.com,
linux-damon@...zon.com, linux-mm@...ck.org,
linux-doc@...r.kernel.org, linux-kernel@...r.kernel.org
Subject: [RFC v2 03/14] damon/core/schemes: Skip already charged targets and regions
From: SeongJae Park <sjpark@...zon.de>
If DAMOS stopped applying action to memory regions due to the speed
limit, it does nothing until next charge window starts. Then, it starts
the work from the beginning of the address space. If there is a huge
memory region at the beginning of the address space and it fulfills the
scheme target data access pattern, the action will applied to only the
region.
This commit mitigates the case by skipping memory regions that charged
in previous charge window at the beginning of current charge window.
Signed-off-by: SeongJae Park <sjpark@...zon.de>
---
include/linux/damon.h | 5 +++++
mm/damon/core.c | 21 +++++++++++++++++++++
2 files changed, 26 insertions(+)
diff --git a/include/linux/damon.h b/include/linux/damon.h
index 35068b0ece6f..0df81dd2d560 100644
--- a/include/linux/damon.h
+++ b/include/linux/damon.h
@@ -104,6 +104,8 @@ struct damos_speed_limit {
/* private: for limit accounting */
unsigned long charged_sz;
unsigned long charged_from;
+ struct damon_target *charge_target_from;
+ unsigned long charge_addr_from;
};
/**
@@ -331,6 +333,9 @@ struct damon_ctx {
#define damon_prev_region(r) \
(container_of(r->list.prev, struct damon_region, list))
+#define damon_last_region(t) \
+ (list_last_entry(&t->regions_list, struct damon_region, list))
+
#define damon_for_each_region(r, t) \
list_for_each_entry(r, &t->regions_list, list)
diff --git a/mm/damon/core.c b/mm/damon/core.c
index 4ac4a9aa2514..5864b63c18f9 100644
--- a/mm/damon/core.c
+++ b/mm/damon/core.c
@@ -107,6 +107,8 @@ struct damos *damon_new_scheme(
scheme->limit.ms = limit->ms;
scheme->limit.charged_sz = 0;
scheme->limit.charged_from = 0;
+ scheme->limit.charge_target_from = NULL;
+ scheme->limit.charge_addr_from = 0;
return scheme;
}
@@ -558,6 +560,21 @@ static void damon_do_apply_schemes(struct damon_ctx *c,
if (limit->sz && limit->charged_sz >= limit->sz)
continue;
+ if (limit->charge_target_from) {
+ if (t != limit->charge_target_from)
+ continue;
+ if (r == damon_last_region(t)) {
+ limit->charge_target_from = NULL;
+ limit->charge_addr_from = 0;
+ continue;
+ }
+ if (limit->charge_addr_from &&
+ r->ar.start < limit->charge_addr_from)
+ continue;
+ limit->charge_target_from = NULL;
+ limit->charge_addr_from = 0;
+ }
+
sz = r->ar.end - r->ar.start;
/* Check the target regions condition */
if (sz < s->min_sz_region || s->max_sz_region < sz)
@@ -576,6 +593,10 @@ static void damon_do_apply_schemes(struct damon_ctx *c,
}
c->primitive.apply_scheme(c, t, r, s);
limit->charged_sz += sz;
+ if (limit->sz && limit->charged_sz >= limit->sz) {
+ limit->charge_target_from = t;
+ limit->charge_addr_from = r->ar.end + 1;
+ }
}
if (s->action != DAMOS_STAT)
r->age = 0;
--
2.17.1
Powered by blists - more mailing lists