[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20221122203850.2765015-2-almasrymina@google.com>
Date: Tue, 22 Nov 2022 12:38:46 -0800
From: Mina Almasry <almasrymina@...gle.com>
To: Huang Ying <ying.huang@...el.com>,
Yang Shi <yang.shi@...ux.alibaba.com>,
Yosry Ahmed <yosryahmed@...gle.com>,
Tim Chen <tim.c.chen@...ux.intel.com>, weixugc@...gle.com,
shakeelb@...gle.com, gthelen@...gle.com, fvdl@...gle.com,
Johannes Weiner <hannes@...xchg.org>,
Michal Hocko <mhocko@...nel.org>,
Roman Gushchin <roman.gushchin@...ux.dev>,
Muchun Song <songmuchun@...edance.com>,
Andrew Morton <akpm@...ux-foundation.org>
Cc: Mina Almasry <almasrymina@...gle.com>,
linux-kernel@...r.kernel.org, cgroups@...r.kernel.org,
linux-mm@...ck.org
Subject: [RFC PATCH v1] mm: Add memory.demote for proactive demotion only
Add the proactive demotion interface memory.demote. This interface can
be used as follows:
echo "1m" > memory.demote
At this command the kernel will attempt to demote 1m of memory from this
cgroup. The kernel may not be able to demote the full amount requested
by the userspace and in that case EAGAIN would be returned to the user
(similar to memory.reclaim).
The kernel will only attempt to demote pages with this interface. It
will not attempt any other kind of reclaim (swap, writeback or
reclaiming clean file pages).
Signed-off-by: Mina Almasry <almasrymina@...gle.com>
---
mm/memcontrol.c | 38 ++++++++++++++++++++++++++++++++++++++
mm/vmscan.c | 18 ++++++++++++++----
2 files changed, 52 insertions(+), 4 deletions(-)
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index fd4ff1c865a2..427c79e467eb 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -6623,6 +6623,39 @@ static ssize_t memory_reclaim(struct kernfs_open_file *of, char *buf,
return nbytes;
}
+static ssize_t memory_demote(struct kernfs_open_file *of, char *buf,
+ size_t nbytes, loff_t off)
+{
+ struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
+ unsigned int nr_retries = MAX_RECLAIM_RETRIES;
+ unsigned long nr_to_demote, nr_demoted = 0;
+ unsigned int reclaim_options = MEMCG_RECLAIM_ONLY_DEMOTE;
+ int err;
+
+ buf = strstrip(buf);
+ err = page_counter_memparse(buf, "", &nr_to_demote);
+ if (err)
+ return err;
+
+ while (nr_demoted < nr_to_demote) {
+ unsigned long demoted;
+
+ if (signal_pending(current))
+ return -EINTR;
+
+ demoted = try_to_free_mem_cgroup_pages(
+ memcg, nr_to_demote - nr_demoted, GFP_KERNEL,
+ reclaim_options);
+
+ if (!demoted && !nr_retries--)
+ return -EAGAIN;
+
+ nr_demoted += demoted;
+ }
+
+ return nbytes;
+}
+
static struct cftype memory_files[] = {
{
.name = "current",
@@ -6691,6 +6724,11 @@ static struct cftype memory_files[] = {
.flags = CFTYPE_NS_DELEGATABLE,
.write = memory_reclaim,
},
+ {
+ .name = "demote",
+ .flags = CFTYPE_NS_DELEGATABLE,
+ .write = memory_demote,
+ },
{ } /* terminate */
};
diff --git a/mm/vmscan.c b/mm/vmscan.c
index dea05ad8ece5..8c1f5416d789 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -1657,12 +1657,13 @@ static unsigned int shrink_folio_list(struct list_head *folio_list,
LIST_HEAD(demote_folios);
unsigned int nr_reclaimed = 0;
unsigned int pgactivate = 0;
- bool do_demote_pass;
+ bool do_demote_pass, only_demote_pass;
struct swap_iocb *plug = NULL;
memset(stat, 0, sizeof(*stat));
cond_resched();
do_demote_pass = can_demote(pgdat->node_id, sc);
+ only_demote_pass = sc->demotion == 2;
retry:
while (!list_empty(folio_list)) {
@@ -2091,10 +2092,19 @@ static unsigned int shrink_folio_list(struct list_head *folio_list,
nr_reclaimed += demote_folio_list(&demote_folios, pgdat);
/* Folios that could not be demoted are still in @demote_folios */
if (!list_empty(&demote_folios)) {
- /* Folios which weren't demoted go back on @folio_list for retry: */
+ /*
+ * Folios which weren't demoted go back on @folio_list.
+ */
list_splice_init(&demote_folios, folio_list);
- do_demote_pass = false;
- goto retry;
+
+ /*
+ * goto retry to reclaim the undemoted folios in folio_list if
+ * desired.
+ */
+ if (!only_demote_pass) {
+ do_demote_pass = false;
+ goto retry;
+ }
}
pgactivate = stat->nr_activate[0] + stat->nr_activate[1];
--
2.38.1.584.g0f3c55d4c2-goog
Powered by blists - more mailing lists