[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <91ec5b74-2fda-4aa8-bc87-c8e99f046391@quicinc.com>
Date: Thu, 23 Jan 2025 16:05:59 +0530
From: Raviteja Laggyshetty <quic_rlaggysh@...cinc.com>
To: Bjorn Andersson <andersson@...nel.org>
CC: Georgi Djakov <djakov@...nel.org>, Rob Herring <robh@...nel.org>,
Krzysztof Kozlowski <krzk+dt@...nel.org>,
Conor Dooley <conor+dt@...nel.org>,
Konrad Dybcio <konradybcio@...nel.org>,
Odelu Kukatla
<quic_okukatla@...cinc.com>,
Mike Tipton <mdtipton@...cinc.com>,
"Vivek
Aknurwar" <viveka@...cinc.com>,
Sibi Sankar <quic_sibis@...cinc.com>, <linux-arm-msm@...r.kernel.org>,
<linux-pm@...r.kernel.org>, <devicetree@...r.kernel.org>,
<linux-kernel@...r.kernel.org>
Subject: Re: [PATCH V7 1/5] interconnect: core: Add dynamic id allocation
support
On 1/12/2025 2:27 AM, Bjorn Andersson wrote:
> On Sat, Jan 11, 2025 at 04:14:25PM +0000, Raviteja Laggyshetty wrote:
>> Current interconnect framework is based on static IDs for creating node
>> and registering with framework. This becomes a limitation for topologies
>> where there are multiple instances of same interconnect provider. Add
>> icc_node_create_alloc_id() API to create icc node with dynamic id, this
>> will help to overcome the dependency on static IDs.
>>
>> Signed-off-by: Raviteja Laggyshetty <quic_rlaggysh@...cinc.com>
>> ---
>> drivers/interconnect/core.c | 32 +++++++++++++++++++++++++++
>> include/linux/interconnect-provider.h | 6 +++++
>> 2 files changed, 38 insertions(+)
>>
>> diff --git a/drivers/interconnect/core.c b/drivers/interconnect/core.c
>> index 9d5404a07e8a..0b7093eb51af 100644
>> --- a/drivers/interconnect/core.c
>> +++ b/drivers/interconnect/core.c
>> @@ -858,6 +858,38 @@ struct icc_node *icc_node_create(int id)
>> }
>> EXPORT_SYMBOL_GPL(icc_node_create);
>>
>> +/**
>> + * icc_node_create_alloc_id() - create node and dynamically allocate id
>> + * @start_id: min id to be allocated
>> + *
>> + * Return: icc_node pointer on success, or ERR_PTR() on error
>> + */
>> +struct icc_node *icc_node_create_alloc_id(int start_id)
>
> By having clients pass in start_id, you distribute the decision of what
> a "good number" is across multiple parts of the system (or you have
> clients relying on getting [start_id, start_id + N) back).
>
> Wouldn't it be better to hide that choice in one place (inside the icc
> framework)?
>
Yes, inline to Dmitry's suggestion I will be moving the start_id to
framework and all dynamic allocations will start from 10000.
> Regards,
> Bjorn
>
>> +{
>> + struct icc_node *node;
>> + int id;
>> +
>> + mutex_lock(&icc_lock);
>> +
>> + node = kzalloc(sizeof(*node), GFP_KERNEL);
>> + if (!node)
>> + return ERR_PTR(-ENOMEM);
>> +
>> + id = idr_alloc(&icc_idr, node, start_id, 0, GFP_KERNEL);
>> + if (id < 0) {
>> + WARN(1, "%s: couldn't get idr\n", __func__);
>> + kfree(node);
>> + node = ERR_PTR(id);
>> + goto out;
>> + }
>> + node->id = id;
>> +out:
>> + mutex_unlock(&icc_lock);
>> +
>> + return node;
>> +}
>> +EXPORT_SYMBOL_GPL(icc_node_create_alloc_id);
>> +
>> /**
>> * icc_node_destroy() - destroy a node
>> * @id: node id
>> diff --git a/include/linux/interconnect-provider.h b/include/linux/interconnect-provider.h
>> index f5aef8784692..4fc7a5884374 100644
>> --- a/include/linux/interconnect-provider.h
>> +++ b/include/linux/interconnect-provider.h
>> @@ -117,6 +117,7 @@ struct icc_node {
>> int icc_std_aggregate(struct icc_node *node, u32 tag, u32 avg_bw,
>> u32 peak_bw, u32 *agg_avg, u32 *agg_peak);
>> struct icc_node *icc_node_create(int id);
>> +struct icc_node *icc_node_create_alloc_id(int start_id);
>> void icc_node_destroy(int id);
>> int icc_link_create(struct icc_node *node, const int dst_id);
>> void icc_node_add(struct icc_node *node, struct icc_provider *provider);
>> @@ -141,6 +142,11 @@ static inline struct icc_node *icc_node_create(int id)
>> return ERR_PTR(-ENOTSUPP);
>> }
>>
>> +static inline struct icc_node *icc_node_create_alloc_id(int start_id)
>> +{
>> + return ERR_PTR(-EOPNOTSUPP);
>> +}
>> +
>> static inline void icc_node_destroy(int id)
>> {
>> }
>> --
>> 2.39.2
>>
Powered by blists - more mailing lists