lists.openwall.net | lists / announce owl-users owl-dev john-users john-dev passwdqc-users yescrypt popa3d-users / oss-security kernel-hardening musl sabotage tlsify passwords / crypt-dev xvendor / Bugtraq Full-Disclosure linux-kernel linux-netdev linux-ext4 linux-hardening linux-cve-announce PHC | |
Open Source and information security mailing list archives
| ||
|
Message-Id: <20220208081902.3550911-11-yuzhao@google.com> Date: Tue, 8 Feb 2022 01:19:00 -0700 From: Yu Zhao <yuzhao@...gle.com> To: Andrew Morton <akpm@...ux-foundation.org>, Johannes Weiner <hannes@...xchg.org>, Mel Gorman <mgorman@...e.de>, Michal Hocko <mhocko@...nel.org> Cc: Andi Kleen <ak@...ux.intel.com>, Aneesh Kumar <aneesh.kumar@...ux.ibm.com>, Barry Song <21cnbao@...il.com>, Catalin Marinas <catalin.marinas@....com>, Dave Hansen <dave.hansen@...ux.intel.com>, Hillf Danton <hdanton@...a.com>, Jens Axboe <axboe@...nel.dk>, Jesse Barnes <jsbarnes@...gle.com>, Jonathan Corbet <corbet@....net>, Linus Torvalds <torvalds@...ux-foundation.org>, Matthew Wilcox <willy@...radead.org>, Michael Larabel <Michael@...haellarabel.com>, Mike Rapoport <rppt@...nel.org>, Rik van Riel <riel@...riel.com>, Vlastimil Babka <vbabka@...e.cz>, Will Deacon <will@...nel.org>, Ying Huang <ying.huang@...el.com>, linux-arm-kernel@...ts.infradead.org, linux-doc@...r.kernel.org, linux-kernel@...r.kernel.org, linux-mm@...ck.org, page-reclaim@...gle.com, x86@...nel.org, Yu Zhao <yuzhao@...gle.com>, Brian Geffon <bgeffon@...gle.com>, Jan Alexander Steffens <heftig@...hlinux.org>, Oleksandr Natalenko <oleksandr@...alenko.name>, Steven Barrett <steven@...uorix.net>, Suleiman Souhlal <suleiman@...gle.com>, Daniel Byrne <djbyrne@....edu>, Donald Carr <d@...os-reins.com>, "Holger Hoffstätte" <holger@...lied-asynchrony.com>, Konstantin Kharlamov <Hi-Angel@...dex.ru>, Shuang Zhai <szhai2@...rochester.edu>, Sofia Trinh <sofia.trinh@....works> Subject: [PATCH v7 10/12] mm: multigenerational LRU: thrashing prevention Add /sys/kernel/mm/lru_gen/min_ttl_ms for thrashing prevention, as requested by many desktop users [1]. When set to value N, it prevents the working set of N milliseconds from getting evicted. The OOM killer is triggered if this working set can't be kept in memory. Based on the average human detectable lag (~100ms), N=1000 usually eliminates intolerable lags due to thrashing. Larger values like N=3000 make lags less noticeable at the risk of premature OOM kills. Compared with the size-based approach, e.g., [2], this time-based approach has the following advantages: 1) It's easier to configure because it's agnostic to applications and memory sizes. 2) It's more reliable because it's directly wired to the OOM killer. [1] https://lore.kernel.org/lkml/Ydza%2FzXKY9ATRoh6@google.com/ [2] https://lore.kernel.org/lkml/20211130201652.2218636d@mail.inbox.lv/ Signed-off-by: Yu Zhao <yuzhao@...gle.com> Acked-by: Brian Geffon <bgeffon@...gle.com> Acked-by: Jan Alexander Steffens (heftig) <heftig@...hlinux.org> Acked-by: Oleksandr Natalenko <oleksandr@...alenko.name> Acked-by: Steven Barrett <steven@...uorix.net> Acked-by: Suleiman Souhlal <suleiman@...gle.com> Tested-by: Daniel Byrne <djbyrne@....edu> Tested-by: Donald Carr <d@...os-reins.com> Tested-by: Holger Hoffstätte <holger@...lied-asynchrony.com> Tested-by: Konstantin Kharlamov <Hi-Angel@...dex.ru> Tested-by: Shuang Zhai <szhai2@...rochester.edu> Tested-by: Sofia Trinh <sofia.trinh@....works> --- mm/vmscan.c | 64 +++++++++++++++++++++++++++++++++++++++++++++++++---- 1 file changed, 60 insertions(+), 4 deletions(-) diff --git a/mm/vmscan.c b/mm/vmscan.c index 700c35f2a030..4d37d63668b5 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -4335,7 +4335,8 @@ static long get_nr_evictable(struct lruvec *lruvec, unsigned long max_seq, return total > 0 ? total : 0; } -static void age_lruvec(struct lruvec *lruvec, struct scan_control *sc) +static bool age_lruvec(struct lruvec *lruvec, struct scan_control *sc, + unsigned long min_ttl) { bool need_aging; long nr_to_scan; @@ -4344,14 +4345,22 @@ static void age_lruvec(struct lruvec *lruvec, struct scan_control *sc) DEFINE_MAX_SEQ(lruvec); DEFINE_MIN_SEQ(lruvec); + if (min_ttl) { + int gen = lru_gen_from_seq(min_seq[TYPE_FILE]); + unsigned long birth = READ_ONCE(lruvec->lrugen.timestamps[gen]); + + if (time_is_after_jiffies(birth + min_ttl)) + return false; + } + mem_cgroup_calculate_protection(NULL, memcg); if (mem_cgroup_below_min(memcg)) - return; + return false; nr_to_scan = get_nr_evictable(lruvec, max_seq, min_seq, swappiness, &need_aging); if (!nr_to_scan) - return; + return false; nr_to_scan >>= sc->priority; @@ -4360,11 +4369,18 @@ static void age_lruvec(struct lruvec *lruvec, struct scan_control *sc) if (nr_to_scan && need_aging && (!mem_cgroup_below_low(memcg) || sc->memcg_low_reclaim)) try_to_inc_max_seq(lruvec, max_seq, sc, swappiness, false); + + return true; } +/* to protect the working set of the last N jiffies */ +static unsigned long lru_gen_min_ttl __read_mostly; + static void lru_gen_age_node(struct pglist_data *pgdat, struct scan_control *sc) { struct mem_cgroup *memcg; + bool success = false; + unsigned long min_ttl = READ_ONCE(lru_gen_min_ttl); VM_BUG_ON(!current_is_kswapd()); @@ -4390,11 +4406,28 @@ static void lru_gen_age_node(struct pglist_data *pgdat, struct scan_control *sc) do { struct lruvec *lruvec = mem_cgroup_lruvec(memcg, pgdat); - age_lruvec(lruvec, sc); + if (age_lruvec(lruvec, sc, min_ttl)) + success = true; cond_resched(); } while ((memcg = mem_cgroup_iter(NULL, memcg, NULL))); + /* + * The main goal is to OOM kill if every generation from all memcgs is + * younger than min_ttl. However, another theoretical possibility is all + * memcgs are either below min or empty. + */ + if (!success && mutex_trylock(&oom_lock)) { + struct oom_control oc = { + .gfp_mask = sc->gfp_mask, + .order = sc->order, + }; + + out_of_memory(&oc); + + mutex_unlock(&oom_lock); + } + current->reclaim_state->mm_walk = NULL; } @@ -5107,6 +5140,28 @@ static void lru_gen_change_state(bool enable) * sysfs interface ******************************************************************************/ +static ssize_t show_min_ttl(struct kobject *kobj, struct kobj_attribute *attr, char *buf) +{ + return sprintf(buf, "%u\n", jiffies_to_msecs(READ_ONCE(lru_gen_min_ttl))); +} + +static ssize_t store_min_ttl(struct kobject *kobj, struct kobj_attribute *attr, + const char *buf, size_t len) +{ + unsigned int msecs; + + if (kstrtouint(buf, 0, &msecs)) + return -EINVAL; + + WRITE_ONCE(lru_gen_min_ttl, msecs_to_jiffies(msecs)); + + return len; +} + +static struct kobj_attribute lru_gen_min_ttl_attr = __ATTR( + min_ttl_ms, 0644, show_min_ttl, store_min_ttl +); + static ssize_t show_enable(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { unsigned int caps = 0; @@ -5155,6 +5210,7 @@ static struct kobj_attribute lru_gen_enabled_attr = __ATTR( ); static struct attribute *lru_gen_attrs[] = { + &lru_gen_min_ttl_attr.attr, &lru_gen_enabled_attr.attr, NULL }; -- 2.35.0.263.gb82422642f-goog
Powered by blists - more mailing lists