[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <YKT2vbluMgcu94M6@hirez.programming.kicks-ass.net>
Date: Wed, 19 May 2021 13:30:05 +0200
From: Peter Zijlstra <peterz@...radead.org>
To: Beata Michalska <beata.michalska@....com>
Cc: linux-kernel@...r.kernel.org, mingo@...hat.com,
juri.lelli@...hat.com, vincent.guittot@...aro.org,
valentin.schneider@....com, dietmar.eggemann@....com,
corbet@....net, rdunlap@...radead.org, linux-doc@...r.kernel.org
Subject: Re: [PATCH v4 2/3] sched/topology: Rework CPU capacity asymmetry
detection
Mostly style nits, since I read you're already looking at reworking this
due to other feedback, do with it what you like.
On Mon, May 17, 2021 at 09:23:50AM +0100, Beata Michalska wrote:
> @@ -1989,66 +1989,96 @@ static bool topology_span_sane(struct sched_domain_topology_level *tl,
>
> return true;
> }
+ whitespace
> +/**
> + * Asym capacity bits
> + */
> +struct asym_cap_data {
> + struct list_head link;
> + unsigned long capacity;
> + struct cpumask *cpu_mask;
> +};
+ whitespace
> /*
> + * Set of available CPUs grouped by their corresponding capacities
> + * Each list entry contains a CPU mask reflecting CPUs that share the same
> + * capacity.
> + * The lifespan of data is unlimited.
> */
> +static LIST_HEAD(asym_cap_list);
>
> +/*
> + * Verify whether given CPU at a given topology level belongs to a sched domain
> + * that does span CPUs with different capacities.
> + * Provides sd_flags reflecting the asymmetry scope.
> + */
> +static inline int
> +asym_cpu_capacity_classify(struct sched_domain_topology_level *tl, int cpu)
> +{
> + int sd_asym_flags = SD_ASYM_CPUCAPACITY | SD_ASYM_CPUCAPACITY_FULL;
> + const struct cpumask *tl_mask = tl->mask(cpu);
> + struct asym_cap_data *entry;
> + int asym_cap_count = 0;
> +
> + if (list_is_singular(&asym_cap_list))
> + goto leave;
> +
> + list_for_each_entry(entry, &asym_cap_list, link) {
> + if (cpumask_intersects(tl_mask, entry->cpu_mask))
> + ++asym_cap_count;
> + else
> + sd_asym_flags &= ~SD_ASYM_CPUCAPACITY_FULL;
> }
> + WARN_ON_ONCE(!asym_cap_count);
> +leave:
> + return asym_cap_count > 1 ? sd_asym_flags : 0;
> +}
>
>
- whitespace
> +/*
> + * Build-up/update list of CPUs grouped by their capacities
> + */
> +static void asym_cpu_capacity_scan(const struct cpumask *cpu_map)
> +{
> + struct asym_cap_data *entry, *next;
> + int cpu;
>
> + if (!list_empty(&asym_cap_list))
> + list_for_each_entry(entry, &asym_cap_list, link)
> + cpumask_clear(entry->cpu_mask);
two nits:
- the if() needs { } because while what follows is strictly a single
statement, it is multi-line, so coding style requires { }.
- the if() is strictly superfluous, if the list is empty the
list_for_each_entry() iteration already doesn't do anything.
>
> + entry = list_first_entry_or_null(&asym_cap_list,
> + struct asym_cap_data, link);
Please align line-breaks at the most nested (, vim can help you do this
with: set cino=(0:0, if you're using that other editor, I'm sure you can
convince it to align properly too :-)
>
> + for_each_cpu(cpu, cpu_map) {
> + unsigned long capacity = arch_scale_cpu_capacity(cpu);
>
> + if (entry && capacity == entry->capacity)
> + goto next;
>
> + list_for_each_entry(entry, &asym_cap_list, link)
> + if (capacity == entry->capacity)
> + goto next;
{ } again
> +
> + entry = kzalloc(sizeof(*entry) + cpumask_size(), GFP_KERNEL);
> + if (entry) {
> + entry->capacity = capacity;
> + entry->cpu_mask = (struct cpumask *)((char *)entry +
> + sizeof(*entry));
alignment again
> + list_add(&entry->link, &asym_cap_list);
> }
> + WARN_ONCE(!entry,
> + "Failed to allocate memory for capacity asymmetry detection\n");
alignment again
(also, eeew, if this lives, perhaps a find_asym_data(capacity) helper
might make it better:
if (!entry || entry->capacity != capacity)
entry = find_asym_data(capacity);
)
> +next:
> + __cpumask_set_cpu(cpu, entry->cpu_mask);
> }
>
> + list_for_each_entry_safe(entry, next, &asym_cap_list, link) {
> + if (cpumask_empty(entry->cpu_mask)) {
> + list_del(&entry->link);
> + kfree(entry);
> + }
> + }
See, this has { }
> }
Powered by blists - more mailing lists