[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20220922130859.337985-1-przemyslaw.kitszel@intel.com>
Date: Thu, 22 Sep 2022 15:08:59 +0200
From: Przemek Kitszel <przemyslaw.kitszel@...el.com>
To: Michal Wilczynski <michal.wilczynski@...el.com>
Cc: Przemek Kitszel <przemyslaw.kitszel@...el.com>,
netdev@...r.kernel.org, alexandr.lobakin@...el.com,
dchumak@...dia.com, maximmi@...dia.com, jiri@...nulli.us,
simon.horman@...igine.com, jacob.e.keller@...el.com,
jesse.brandeburg@...el.com
Subject: Re: [RFC PATCH net-next v4 4/6] ice: Implement devlink-rate API
From: Michal Wilczynski <michal.wilczynski@...el.com>
Date: Thu, 15 Sep 2022 15:42:37 +0200
> There is a need to support modification of Tx scheduler topology, in the
> ice driver. This will allow user to control Tx settings of each node in
> the internal hierarchy of nodes. A number of parameters is supported per
> each node: tx_max, tx_share, tx_priority and tx_weight.
>
> Signed-off-by: Michal Wilczynski <michal.wilczynski@...el.com>
> ---
> drivers/net/ethernet/intel/ice/ice_devlink.c | 511 +++++++++++++++++++
> drivers/net/ethernet/intel/ice/ice_devlink.h | 2 +
> 2 files changed, 513 insertions(+)
>
> diff --git a/drivers/net/ethernet/intel/ice/ice_devlink.c b/drivers/net/ethernet/intel/ice/ice_devlink.c
> index e6ec20079ced..925283605b59 100644
> --- a/drivers/net/ethernet/intel/ice/ice_devlink.c
> +++ b/drivers/net/ethernet/intel/ice/ice_devlink.c
> @@ -713,6 +713,490 @@ ice_devlink_port_unsplit(struct devlink *devlink, struct devlink_port *port,
> return ice_devlink_port_split(devlink, port, 1, extack);
> }
>
> +/**
> + * ice_traverse_tx_tree - traverse Tx scheduler tree
> + * @devlink: devlink struct
> + * @node: current node, used for recursion
> + * @tc_node: tc_node struct, that is treated as a root
> + * @pf: pf struct
> + *
> + * This function traverses Tx scheduler tree and exports
> + * entire structure to the devlink-rate.
> + */
> +static void ice_traverse_tx_tree(struct devlink *devlink, struct ice_sched_node *node,
> + struct ice_sched_node *tc_node, struct ice_pf *pf)
> +{
> + struct ice_vf *vf;
> + int i;
> +
> + devl_lock(devlink);
> +
> + if (node->parent == tc_node) {
> + /* create root node */
> + devl_rate_node_create(devlink, node, node->name, NULL);
> + } else if (node->info.data.elem_type == ICE_AQC_ELEM_TYPE_LEAF &&
> + node->parent->name) {
> + devl_rate_queue_create(devlink, node->parent->name, node->tx_queue_id, node);
> + } else if (node->vsi_handle &&
> + pf->vsi[node->vsi_handle]->vf) {
> + vf = pf->vsi[node->vsi_handle]->vf;
> + snprintf(node->name, DEVLINK_RATE_NAME_MAX_LEN, "vport_%u", vf->devlink_port.index);
> + if (!vf->devlink_port.devlink_rate)
> + devl_rate_vport_create(&vf->devlink_port, node, node->parent->name);
> + } else {
> + devl_rate_node_create(devlink, node, node->name, node->parent->name);
> + }
> +
> + devl_unlock(devlink);
I would move devlink locking into ice_devlink_rate_init_tx_topology(),
so it would be locked only once for the whole tree walking.
> +
> + for (i = 0; i < node->num_children; i++)
> + ice_traverse_tx_tree(devlink, node->children[i], tc_node, pf);
> +}
> +
> +/**
> + * ice_devlink_rate_init_tx_topology - export Tx scheduler tree to devlink rate
> + * @devlink: devlink struct
> + * @vsi: main vsi struct
> + *
> + * This function finds a root node, then calls ice_traverse_tx tree, which
> + * traverses the tree and export it's contents to devlink rate.
> + */
> +int ice_devlink_rate_init_tx_topology(struct devlink *devlink, struct ice_vsi *vsi)
> +{
> + struct ice_port_info *pi = vsi->port_info;
> + struct ice_sched_node *tc_node;
> + struct ice_pf *pf = vsi->back;
> + int i;
> +
> + tc_node = pi->root->children[0];
> + mutex_lock(&pi->sched_lock);
> + for (i = 0; i < tc_node->num_children; i++)
> + ice_traverse_tx_tree(devlink, tc_node->children[i], tc_node, pf);
> + mutex_unlock(&pi->sched_lock);
> +
> + return 0;
> +}
// snip
> static int
> @@ -893,6 +1399,9 @@ void ice_devlink_register(struct ice_pf *pf)
> */
> void ice_devlink_unregister(struct ice_pf *pf)
> {
> + devl_lock(priv_to_devlink(pf));
> + devl_rate_objects_destroy(priv_to_devlink(pf));
> + devl_unlock(priv_to_devlink(pf));
> devlink_unregister(priv_to_devlink(pf));
> }
>
Maybe it's now worth a variable for priv_to_devlink(pf)?
[...]
--Przemek
Powered by blists - more mailing lists