[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <26e4698d-fd5e-feae-b9ee-fc3ac35c7a1c@amd.com>
Date: Wed, 14 Jun 2023 14:24:13 -0700
From: Brett Creeley <bcreeley@....com>
To: Dave Ertman <david.m.ertman@...el.com>, intel-wired-lan@...ts.osuosl.org
Cc: daniel.machon@...rochip.com, simon.horman@...igine.com,
netdev@...r.kernel.org
Subject: Re: [PATCH iwl-next v4 04/10] ice: implement lag netdev event handler
On 6/9/2023 2:16 PM, Dave Ertman wrote:
> Caution: This message originated from an External Source. Use proper caution when opening attachments, clicking links, or responding.
>
>
> The event handler for LAG will create a work item to place on the ordered
> workqueue to be processed.
>
> Add in defines for training packets and new recipes to be used by the
> switching block of the HW for LAG packet steering.
>
> Update the ice_lag struct to reflect the new processing methodology.
>
> Signed-off-by: Dave Ertman <david.m.ertman@...el.com>
> ---
> drivers/net/ethernet/intel/ice/ice_lag.c | 125 ++++++++++++++++++++---
> drivers/net/ethernet/intel/ice/ice_lag.h | 30 +++++-
> 2 files changed, 141 insertions(+), 14 deletions(-)
>
> diff --git a/drivers/net/ethernet/intel/ice/ice_lag.c b/drivers/net/ethernet/intel/ice/ice_lag.c
> index 73bfc5cd8b37..529abfb904d0 100644
> --- a/drivers/net/ethernet/intel/ice/ice_lag.c
> +++ b/drivers/net/ethernet/intel/ice/ice_lag.c
[...]
> +/**
> + * ice_lag_process_event - process a task assigned to the lag_wq
> + * @work: pointer to work_struct
> + */
> +static void ice_lag_process_event(struct work_struct *work)
> +{
> + struct netdev_notifier_changeupper_info *info;
> + struct ice_lag_work *lag_work;
> + struct net_device *netdev;
> + struct list_head *tmp, *n;
> + struct ice_pf *pf;
> +
> + lag_work = container_of(work, struct ice_lag_work, lag_task);
> + pf = lag_work->lag->pf;
> +
> + mutex_lock(&pf->lag_mutex);
> + lag_work->lag->netdev_head = &lag_work->netdev_list.node;
> +
> + switch (lag_work->event) {
> + case NETDEV_CHANGEUPPER:
> + info = &lag_work->info.changeupper_info;
> + if (ice_is_feature_supported(pf, ICE_F_SRIOV_LAG))
> + ice_lag_changeupper_event(lag_work->lag, info);
> + break;
> + case NETDEV_BONDING_INFO:
> + ice_lag_info_event(lag_work->lag, &lag_work->info.bonding_info);
> + break;
> + case NETDEV_UNREGISTER:
> + if (ice_is_feature_supported(pf, ICE_F_SRIOV_LAG)) {
> + netdev = lag_work->info.bonding_info.info.dev;
> + if ((netdev == lag_work->lag->netdev ||
> + lag_work->lag->primary) && lag_work->lag->bonded)
> + ice_lag_unregister(lag_work->lag, netdev);
> + }
> + break;
> + default:
> + break;
> + }
> +
> + /* cleanup resources allocated for this work item */
> + list_for_each_safe(tmp, n, &lag_work->netdev_list.node) {
> + struct ice_lag_netdev_list *entry;
> +
> + entry = list_entry(tmp, struct ice_lag_netdev_list, node);
> + list_del(&entry->node);
> + kfree(entry);
> + }
> + lag_work->lag->netdev_head = NULL;
> +
> + mutex_unlock(&pf->lag_mutex);
> +
> + kfree(work);
Should this be freeing lag_work instead?
> +}
> +
> /**
> * ice_lag_event_handler - handle LAG events from netdev
> * @notif_blk: notifier block registered by this netdev
> @@ -299,31 +351,79 @@ ice_lag_event_handler(struct notifier_block *notif_blk, unsigned long event,
> void *ptr)
> {
> struct net_device *netdev = netdev_notifier_info_to_dev(ptr);
> + struct net_device *upper_netdev;
> + struct ice_lag_work *lag_work;
> struct ice_lag *lag;
>
> - lag = container_of(notif_blk, struct ice_lag, notif_block);
> + if (!netif_is_ice(netdev))
> + return NOTIFY_DONE;
> +
> + if (event != NETDEV_CHANGEUPPER && event != NETDEV_BONDING_INFO &&
> + event != NETDEV_UNREGISTER)
> + return NOTIFY_DONE;
Would it make more sense to prevent the work item and any related work
if the ice_is_feature_supported(pf, ICE_F_SRIOV_LAG) check is moved to
this function along with the events that require that feature?
Something like:
if ((event == NETDEV_CHANGEUPPER || event == NETDEV_UNREGISTER) &&
!ice_is_feature_supported(pf, ICE_F_SRIOV_LAG))
return NOTIFY_DONE;
>
> + if (!(netdev->priv_flags & IFF_BONDING))
> + return NOTIFY_DONE;
> +
> + lag = container_of(notif_blk, struct ice_lag, notif_block);
> if (!lag->netdev)
> return NOTIFY_DONE;
>
> - /* Check that the netdev is in the working namespace */
> if (!net_eq(dev_net(netdev), &init_net))
> return NOTIFY_DONE;
>
> + /* This memory will be freed at the end of ice_lag_process_event */
> + lag_work = kzalloc(sizeof(*lag_work), GFP_KERNEL);
> + if (!lag_work)
> + return -ENOMEM;
> +
> + lag_work->event_netdev = netdev;
> + lag_work->lag = lag;
> + lag_work->event = event;
> + if (event == NETDEV_CHANGEUPPER) {
> + struct netdev_notifier_changeupper_info *info;
> +
> + info = ptr;
> + upper_netdev = info->upper_dev;
> + } else {
> + upper_netdev = netdev_master_upper_dev_get(netdev);
> + }
> +
> + INIT_LIST_HEAD(&lag_work->netdev_list.node);
> + if (upper_netdev) {
> + struct ice_lag_netdev_list *nd_list;
> + struct net_device *tmp_nd;
> +
> + rcu_read_lock();
> + for_each_netdev_in_bond_rcu(upper_netdev, tmp_nd) {
> + nd_list = kzalloc(sizeof(*nd_list), GFP_KERNEL);
> + if (!nd_list)
> + break;
> +
> + nd_list->netdev = tmp_nd;
> + list_add(&nd_list->node, &lag_work->netdev_list.node);
> + }
> + rcu_read_unlock();
> + }
> +
> switch (event) {
> case NETDEV_CHANGEUPPER:
> - ice_lag_changeupper_event(lag, ptr);
> + lag_work->info.changeupper_info =
> + *((struct netdev_notifier_changeupper_info *)ptr);
> break;
> case NETDEV_BONDING_INFO:
> - ice_lag_info_event(lag, ptr);
> - break;
> - case NETDEV_UNREGISTER:
> - ice_lag_unregister(lag, netdev);
> + lag_work->info.bonding_info =
> + *((struct netdev_notifier_bonding_info *)ptr);
> break;
> default:
> + lag_work->info.notifier_info =
> + *((struct netdev_notifier_info *)ptr);
> break;
> }
>
> + INIT_WORK(&lag_work->lag_task, ice_lag_process_event);
> + queue_work(ice_lag_wq, &lag_work->lag_task);
> +
> return NOTIFY_DONE;
> }
>
[...]
Powered by blists - more mailing lists