[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <1cae4fcc-d276-f66d-c094-35571233d923@arm.com>
Date: Wed, 26 May 2021 20:17:25 +0200
From: Dietmar Eggemann <dietmar.eggemann@....com>
To: Beata Michalska <beata.michalska@....com>
Cc: Valentin Schneider <valentin.schneider@....com>,
linux-kernel@...r.kernel.org, peterz@...radead.org,
mingo@...hat.com, juri.lelli@...hat.com,
vincent.guittot@...aro.org, corbet@....net, rdunlap@...radead.org,
linux-doc@...r.kernel.org
Subject: Re: [PATCH v5 2/3] sched/topology: Rework CPU capacity asymmetry
detection
On 26/05/2021 14:15, Beata Michalska wrote:
> On Wed, May 26, 2021 at 11:52:25AM +0200, Dietmar Eggemann wrote:
>> On 25/05/2021 12:29, Beata Michalska wrote:
>>> On Tue, May 25, 2021 at 10:53:07AM +0100, Valentin Schneider wrote:
>>>> On 24/05/21 23:55, Beata Michalska wrote:
>>>>> On Mon, May 24, 2021 at 07:01:04PM +0100, Valentin Schneider wrote:
>>>>>> On 24/05/21 11:16, Beata Michalska wrote:
[...]
>> static inline int
>> asym_cpu_capacity_classify(struct sched_domain *sd,
>> const struct cpumask *cpu_map)
>> {
>> int sd_span_match = 0, cpu_map_match = 0, flags = 0;
>> struct asym_cap_data *entry;
>>
>> list_for_each_entry(entry, &asym_cap_list, link) {
>> if (cpumask_intersects(sched_domain_span(sd), entry->cpu_mask))
>> ++sd_span_match;
>> else if (cpumask_intersects(cpu_map, entry->cpu_mask))
>> ++cpu_map_match;
>> }
>>
>> WARN_ON_ONCE(!sd_span_match);
>>
>> if (sd_span_match > 1) {
>> flags |= SD_ASYM_CPUCAPACITY;
>> if (!cpu_map_match)
>> flags |= SD_ASYM_CPUCAPACITY_FULL;
>> }
>>
>> return flags;
>> }
> So I planned to drop the list_is_singular check as it is needless really.
> Otherwise, I am not really convinced by the suggestion. I could add comments
> around current version to make it more ..... 'digestible' but I'd rather
> stay with it as it seems more compact to me (subjective).
You could pass in `const struct cpumask *sd_span` instead of `struct
sched_domain *sd` though. To make it clear that both masks are used to
compare against the cpumasks of the asym_cap_list entries.
static inline int
-asym_cpu_capacity_classify(struct sched_domain *sd,
+asym_cpu_capacity_classify(const struct cpumask *sd_span,
const struct cpumask *cpu_map)
{
int sd_asym_flags = SD_ASYM_CPUCAPACITY | SD_ASYM_CPUCAPACITY_FULL;
@@ -1377,14 +1378,14 @@ asym_cpu_capacity_classify(struct sched_domain *sd,
goto leave;
list_for_each_entry(entry, &asym_cap_list, link) {
- if (cpumask_intersects(sched_domain_span(sd), entry->cpu_mask)) {
+ if (cpumask_intersects(sd_span, entry->cpu_mask)) {
++asym_cap_count;
} else {
/*
* CPUs with given capacity might be offline
* so make sure this is not the case
*/
- if (cpumask_intersects(entry->cpu_mask, cpu_map)) {
+ if (cpumask_intersects(cpu_map, entry->cpu_mask)) {
sd_asym_flags &= ~SD_ASYM_CPUCAPACITY_FULL;
if (asym_cap_count > 1)
break;
@@ -1395,7 +1396,6 @@ asym_cpu_capacity_classify(struct sched_domain *sd,
leave:
return asym_cap_count > 1 ? sd_asym_flags : 0;
}
-#endif
static inline struct asym_cap_data *
asym_cpu_capacity_get_data(unsigned long capacity)
@@ -1589,6 +1589,7 @@ sd_init(struct sched_domain_topology_level *tl,
struct sd_data *sdd = &tl->data;
struct sched_domain *sd = *per_cpu_ptr(sdd->sd, cpu);
int sd_id, sd_weight, sd_flags = 0;
+ struct cpumask *sd_span;
#ifdef CONFIG_NUMA
/*
@@ -1636,10 +1637,11 @@ sd_init(struct sched_domain_topology_level *tl,
#endif
};
- cpumask_and(sched_domain_span(sd), cpu_map, tl->mask(cpu));
- sd_id = cpumask_first(sched_domain_span(sd));
+ sd_span = sched_domain_span(sd);
+ cpumask_and(sd_span, cpu_map, tl->mask(cpu));
+ sd_id = cpumask_first(sd_span);
- sd->flags |= asym_cpu_capacity_classify(sd, cpu_map);
+ sd->flags |= asym_cpu_capacity_classify(sd_span, cpu_map);
/*
* Convert topological properties into behaviour.
*/
Powered by blists - more mailing lists