[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20200221043052.3305-1-sultan@kerneltoast.com>
Date: Thu, 20 Feb 2020 20:30:52 -0800
From: Sultan Alsawaf <sultan@...neltoast.com>
To: unlisted-recipients:; (no To-header on input)
Cc: Sultan Alsawaf <sultan@...neltoast.com>,
Andrew Morton <akpm@...ux-foundation.org>, linux-mm@...ck.org,
linux-kernel@...r.kernel.org
Subject: [PATCH v2] mm: Stop kswapd early when nothing's waiting for it to free pages
From: Sultan Alsawaf <sultan@...neltoast.com>
Keeping kswapd running when all the failed allocations that invoked it
are satisfied incurs a high overhead due to unnecessary page eviction
and writeback, as well as spurious VM pressure events to various
registered shrinkers. When kswapd doesn't need to work to make an
allocation succeed anymore, stop it prematurely to save resources.
Signed-off-by: Sultan Alsawaf <sultan@...neltoast.com>
---
include/linux/mmzone.h | 1 +
mm/page_alloc.c | 17 ++++++++++++++---
mm/vmscan.c | 3 ++-
3 files changed, 17 insertions(+), 4 deletions(-)
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 462f6873905a..23861cdaab7f 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -735,6 +735,7 @@ typedef struct pglist_data {
unsigned long node_spanned_pages; /* total size of physical page
range, including holes */
int node_id;
+ atomic_t kswapd_waiters;
wait_queue_head_t kswapd_wait;
wait_queue_head_t pfmemalloc_wait;
struct task_struct *kswapd; /* Protected by
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 3c4eb750a199..923b994c38c8 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -4401,6 +4401,8 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
int no_progress_loops;
unsigned int cpuset_mems_cookie;
int reserve_flags;
+ pg_data_t *pgdat = ac->preferred_zoneref->zone->zone_pgdat;
+ bool woke_kswapd = false;
/*
* We also sanity check to catch abuse of atomic reserves being used by
@@ -4434,8 +4436,13 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
if (!ac->preferred_zoneref->zone)
goto nopage;
- if (alloc_flags & ALLOC_KSWAPD)
+ if (alloc_flags & ALLOC_KSWAPD) {
+ if (!woke_kswapd) {
+ atomic_inc(&pgdat->kswapd_waiters);
+ woke_kswapd = true;
+ }
wake_all_kswapds(order, gfp_mask, ac);
+ }
/*
* The adjusted alloc_flags might result in immediate success, so try
@@ -4640,9 +4647,12 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
goto retry;
}
fail:
- warn_alloc(gfp_mask, ac->nodemask,
- "page allocation failure: order:%u", order);
got_pg:
+ if (woke_kswapd)
+ atomic_dec(&pgdat->kswapd_waiters);
+ if (!page)
+ warn_alloc(gfp_mask, ac->nodemask,
+ "page allocation failure: order:%u", order);
return page;
}
@@ -6711,6 +6721,7 @@ static void __meminit pgdat_init_internals(struct pglist_data *pgdat)
pgdat_page_ext_init(pgdat);
spin_lock_init(&pgdat->lru_lock);
lruvec_init(&pgdat->__lruvec);
+ pgdat->kswapd_waiters = (atomic_t)ATOMIC_INIT(0);
}
static void __meminit zone_init_internals(struct zone *zone, enum zone_type idx, int nid,
diff --git a/mm/vmscan.c b/mm/vmscan.c
index c05eb9efec07..59d9f3dd14f6 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -3694,7 +3694,8 @@ static int balance_pgdat(pg_data_t *pgdat, int order, int classzone_idx)
__fs_reclaim_release();
ret = try_to_freeze();
__fs_reclaim_acquire();
- if (ret || kthread_should_stop())
+ if (ret || kthread_should_stop() ||
+ !atomic_read(&pgdat->kswapd_waiters))
break;
/*
--
2.25.1
Powered by blists - more mailing lists