[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20191031173721.e2a40b037799a149433a4867@linux-foundation.org>
Date: Thu, 31 Oct 2019 17:37:21 -0700
From: Andrew Morton <akpm@...ux-foundation.org>
To: Shaokun Zhang <zhangshaokun@...ilicon.com>
Cc: <linux-kernel@...r.kernel.org>, yuqi jin <jinyuqi@...wei.com>,
Mike Rapoport <rppt@...ux.ibm.com>,
Paul Burton <paul.burton@...s.com>,
Michal Hocko <mhocko@...e.com>,
Michael Ellerman <mpe@...erman.id.au>,
Anshuman Khandual <anshuman.khandual@....com>
Subject: Re: [PATCH] lib: optimize cpumask_local_spread()
On Thu, 31 Oct 2019 14:03:33 +0800 Shaokun Zhang <zhangshaokun@...ilicon.com> wrote:
> From: yuqi jin <jinyuqi@...wei.com>
>
> In the multi-processor and NUMA system, A device may have many numa
> nodes belonging to multiple cpus. When we get a local numa, it is better
> to find the node closest to the local numa node to return instead of
> going to the online cpu immediately.
>
> For example, In Huawei Kunpeng 920 system, there are 4 NUMA node(0 -3)
> in the 2-socket system(0 - 1). If the I/O device is in socket1
> and the local NUMA node is 2, we shall choose the non-local node3 in
> the same socket when cpu core in NUMA node2 is less that I/O requirements.
> If we directly pick one cpu core from all online ones, it may be in
> the another socket and it is not friendly for performance.
>
> ...
>
> Changes from RFC:
> Address Michal Hocko's comment: Use GFP_ATOMIC instead of GFP_KERNEL
Are you sure this is necessary? cpumask_local_spread() is typically
called when a device driver is initializing irq affinities, and
sleeping allocations are usually OK at driver initialization time. If
there is some driver which is calling cpumask_local_spread() from
atomic context, I bet it's pretty easy to fix.
> --- a/lib/cpumask.c
> +++ b/lib/cpumask.c
> @@ -192,6 +192,33 @@ void __init free_bootmem_cpumask_var(cpumask_var_t mask)
> }
> #endif
>
> +static void calc_node_distance(int *node_dist, int node)
> +{
> + int i;
> +
> + for (i = 0; i < nr_node_ids; i++)
> + node_dist[i] = node_distance(node, i);
> +}
> +
> +static int find_nearest_node(int *node_dist, bool *used_flag)
The name "used_flag" is rather redundant for a thing of type bool - we
know it's a flag! "used" would suffice.
> +{
> + int i, min_dist = node_dist[0], node_id = -1;
> +
> + for (i = 0; i < nr_node_ids; i++)
> + if (used_flag[i] == 0) {
> + min_dist = node_dist[i];
> + node_id = i;
> + break;
> + }
> + for (i = 0; i < nr_node_ids; i++)
> + if (node_dist[i] < min_dist && used_flag[i] == 0) {
> + min_dist = node_dist[i];
> + node_id = i;
> + }
> +
> + return node_id;
> +}
> +
> /**
> * cpumask_local_spread - select the i'th cpu with local numa cpu's first
> * @i: index number
> @@ -205,7 +232,8 @@ void __init free_bootmem_cpumask_var(cpumask_var_t mask)
> */
> unsigned int cpumask_local_spread(unsigned int i, int node)
Yes, this has become quite an expensive function. That seems harmless
given the typical callsites.
> {
> - int cpu;
> + int cpu, j, id, *node_dist;
> + bool *used_flag;
>
> /* Wrap: we always want a cpu. */
> i %= num_online_cpus();
> @@ -215,19 +243,45 @@ unsigned int cpumask_local_spread(unsigned int i, int node)
> if (i-- == 0)
> return cpu;
> } else {
> - /* NUMA first. */
> - for_each_cpu_and(cpu, cpumask_of_node(node), cpu_online_mask)
> - if (i-- == 0)
> - return cpu;
> + node_dist = kmalloc_array(nr_node_ids, sizeof(int), GFP_ATOMIC);
> + if (!node_dist)
> + for_each_cpu(cpu, cpu_online_mask)
> + if (i-- == 0)
> + return cpu;
>
> - for_each_cpu(cpu, cpu_online_mask) {
> - /* Skip NUMA nodes, done above. */
> - if (cpumask_test_cpu(cpu, cpumask_of_node(node)))
> - continue;
> + used_flag = kmalloc_array(nr_node_ids, sizeof(bool), GFP_ATOMIC);
This could actually be an array of bits (include/linux/bitmap.h), but
it hardly seems important.
In fact with CONFIG_NODES_SHIFT <= 10, such a bitmap would have max
size of 128 bytes and could be a local. But again, this is unimportant
as long as the other kmalloc is in there.
> + if (!used_flag)
> + for_each_cpu(cpu, cpu_online_mask)
> + if (i-- == 0) {
> + kfree(node_dist);
> + return cpu;
> + }
> + memset(used_flag, 0, nr_node_ids * sizeof(bool));
>
> - if (i-- == 0)
> - return cpu;
> + calc_node_distance(node_dist, node);
> + for (j = 0; j < nr_node_ids; j++) {
> + id = find_nearest_node(node_dist, used_flag);
> + if (id < 0)
> + break;
> + for_each_cpu_and(cpu,
> + cpumask_of_node(id), cpu_online_mask)
> + if (i-- == 0) {
> + kfree(node_dist);
> + kfree(used_flag);
> + return cpu;
> + }
> + used_flag[id] = 1;
> }
> +
> + for_each_cpu(cpu, cpu_online_mask)
> + if (i-- == 0) {
> + kfree(node_dist);
> + kfree(used_flag);
> + return cpu;
> + }
> +
> + kfree(node_dist);
> + kfree(used_flag);
> }
> BUG();
> }
Powered by blists - more mailing lists