[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <Zbr/iv3IfVqhOglE@memverge.com>
Date: Wed, 31 Jan 2024 21:18:50 -0500
From: Gregory Price <gregory.price@...verge.com>
To: "Huang, Ying" <ying.huang@...el.com>
Cc: Gregory Price <gourry.memverge@...il.com>, linux-mm@...ck.org,
linux-kernel@...r.kernel.org, linux-doc@...r.kernel.org,
linux-fsdevel@...r.kernel.org, linux-api@...r.kernel.org,
corbet@....net, akpm@...ux-foundation.org, honggyu.kim@...com,
rakie.kim@...com, hyeongtak.ji@...com, mhocko@...nel.org,
vtavarespetr@...ron.com, jgroves@...ron.com,
ravis.opensrc@...ron.com, sthanneeru@...ron.com,
emirakhur@...ron.com, Hasan.Maruf@....com, seungjun.ha@...sung.com,
hannes@...xchg.org, dan.j.williams@...el.com,
Srinivasulu Thanneeru <sthanneeru.opensrc@...ron.com>
Subject: Re: [PATCH v4 3/3] mm/mempolicy: introduce MPOL_WEIGHTED_INTERLEAVE
for weighted interleaving
On Thu, Feb 01, 2024 at 09:55:07AM +0800, Huang, Ying wrote:
> Gregory Price <gregory.price@...verge.com> writes:
> > - u8 __rcu *table, *weights, weight;
> > + u8 __rcu *table, __rcu *weights, weight;
>
> The __rcu usage can be checked with `sparse` directly. For example,
>
> make C=1 mm/mempolicy.o
>
fixed and squashed, all the __rcu usage i had except the global pointer
have been used. Thanks for the reference material, was struggling to
understand that.
> > task->mems_allowed_seq protection (added as 4th patch)
> > ------------------------------------------------------
> >
> > + cpuset_mems_cookie = read_mems_allowed_begin();
> > if (!current->il_weight || !node_isset(node, policy->nodes)) {
> > node = next_node_in(node, policy->nodes);
>
> node will be changed in the loop. So we need to change the logic here.
>
new patch, if it all looks good i'll ship it in v5
~Gregory
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index d8cc3a577986..4e5a640d10b8 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -1878,11 +1878,17 @@ bool apply_policy_zone(struct mempolicy *policy, enum zone_type zone)
static unsigned int weighted_interleave_nodes(struct mempolicy *policy)
{
- unsigned int node = current->il_prev;
-
- if (!current->il_weight || !node_isset(node, policy->nodes)) {
- node = next_node_in(node, policy->nodes);
- /* can only happen if nodemask is being rebound */
+ unsigned int node;
+ unsigned int cpuset_mems_cookie;
+
+retry:
+ /* to prevent miscount use tsk->mems_allowed_seq to detect rebind */
+ cpuset_mems_cookie = read_mems_allowed_begin();
+ if (!current->il_weight ||
+ !node_isset(current->il_prev, policy->nodes)) {
+ node = next_node_in(current->il_prev, policy->nodes);
+ if (read_mems_allowed_retry(cpuset_mems_cookie))
+ goto retry;
if (node == MAX_NUMNODES)
return node;
current->il_prev = node;
@@ -1896,8 +1902,14 @@ static unsigned int weighted_interleave_nodes(struct mempolicy *policy)
static unsigned int interleave_nodes(struct mempolicy *policy)
{
unsigned int nid;
+ unsigned int cpuset_mems_cookie;
+
+ /* to prevent miscount, use tsk->mems_allowed_seq to detect rebind */
+ do {
+ cpuset_mems_cookie = read_mems_allowed_begin();
+ nid = next_node_in(current->il_prev, policy->nodes);
+ } while (read_mems_allowed_retry(cpuset_mems_cookie));
- nid = next_node_in(current->il_prev, policy->nodes);
if (nid < MAX_NUMNODES)
current->il_prev = nid;
return nid;
@@ -2374,6 +2386,7 @@ static unsigned long alloc_pages_bulk_array_weighted_interleave(gfp_t gfp,
struct page **page_array)
{
struct task_struct *me = current;
+ unsigned int cpuset_mems_cookie;
unsigned long total_allocated = 0;
unsigned long nr_allocated = 0;
unsigned long rounds;
@@ -2391,7 +2404,13 @@ static unsigned long alloc_pages_bulk_array_weighted_interleave(gfp_t gfp,
if (!nr_pages)
return 0;
- nnodes = read_once_policy_nodemask(pol, &nodes);
+ /* read the nodes onto the stack, retry if done during rebind */
+ do {
+ cpuset_mems_cookie = read_mems_allowed_begin();
+ nnodes = read_once_policy_nodemask(pol, &nodes);
+ } while (read_mems_allowed_retry(cpuset_mems_cookie));
+
+ /* if the nodemask has become invalid, we cannot do anything */
if (!nnodes)
return 0;
Powered by blists - more mailing lists