[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20230816143148.GX22185@unreal>
Date: Wed, 16 Aug 2023 17:31:48 +0300
From: Leon Romanovsky <leon@...nel.org>
To: Przemek Kitszel <przemyslaw.kitszel@...el.com>
Cc: intel-wired-lan@...ts.osuosl.org, netdev@...r.kernel.org,
Tony Nguyen <anthony.l.nguyen@...el.com>,
Mateusz Polchlopek <mateusz.polchlopek@...el.com>,
Jacob Keller <jacob.e.keller@...el.com>,
Jesse Brandeburg <jesse.brandeburg@...el.com>
Subject: Re: [PATCH iwl-next] ice: store VF's pci_dev ptr in ice_vf
On Wed, Aug 16, 2023 at 04:54:54AM -0400, Przemek Kitszel wrote:
> Extend struct ice_vf by vfdev.
> Calculation of vfdev falls more nicely into ice_create_vf_entries().
>
> Caching of vfdev enables simplification of ice_restore_all_vfs_msi_state().
I see that old code had access to pci_dev * of VF without any locking
from concurrent PCI core access. How is it protected? How do you make
sure that vfdev is valid?
Generally speaking, it is rarely good idea to cache VF pci_dev pointers
inside driver.
Thanks
>
> Reviewed-by: Jesse Brandeburg <jesse.brandeburg@...el.com>
> Reviewed-by: Jacob Keller <jacob.e.keller@...el.com>
> Signed-off-by: Przemek Kitszel <przemyslaw.kitszel@...el.com>
> ---
> add/remove: 0/0 grow/shrink: 2/1 up/down: 157/-130 (27)
> Function old new delta
> ice_sriov_configure 1712 1866 +154
> ice_pci_err_resume 168 171 +3
> ice_restore_all_vfs_msi_state 200 70 -130
> ---
> drivers/net/ethernet/intel/ice/ice_main.c | 2 +-
> drivers/net/ethernet/intel/ice/ice_sriov.c | 40 +++++++++------------
> drivers/net/ethernet/intel/ice/ice_sriov.h | 4 +--
> drivers/net/ethernet/intel/ice/ice_vf_lib.h | 2 +-
> 4 files changed, 21 insertions(+), 27 deletions(-)
>
> diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
> index a6dd336d2500..d04498c2fd6d 100644
> --- a/drivers/net/ethernet/intel/ice/ice_main.c
> +++ b/drivers/net/ethernet/intel/ice/ice_main.c
> @@ -5561,7 +5561,7 @@ static void ice_pci_err_resume(struct pci_dev *pdev)
> return;
> }
>
> - ice_restore_all_vfs_msi_state(pdev);
> + ice_restore_all_vfs_msi_state(pf);
>
> ice_do_reset(pf, ICE_RESET_PFR);
> ice_service_task_restart(pf);
> diff --git a/drivers/net/ethernet/intel/ice/ice_sriov.c b/drivers/net/ethernet/intel/ice/ice_sriov.c
> index 31314e7540f8..48bc8ea55265 100644
> --- a/drivers/net/ethernet/intel/ice/ice_sriov.c
> +++ b/drivers/net/ethernet/intel/ice/ice_sriov.c
> @@ -789,14 +789,19 @@ static const struct ice_vf_ops ice_sriov_vf_ops = {
> */
> static int ice_create_vf_entries(struct ice_pf *pf, u16 num_vfs)
> {
> + struct pci_dev *pdev = pf->pdev;
> struct ice_vfs *vfs = &pf->vfs;
> + struct pci_dev *vfdev = NULL;
> struct ice_vf *vf;
> - u16 vf_id;
> - int err;
> + u16 vf_pdev_id;
> + int err, pos;
>
> lockdep_assert_held(&vfs->table_lock);
>
> - for (vf_id = 0; vf_id < num_vfs; vf_id++) {
> + pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
> + pci_read_config_word(pdev, pos + PCI_SRIOV_VF_DID, &vf_pdev_id);
> +
> + for (u16 vf_id = 0; vf_id < num_vfs; vf_id++) {
> vf = kzalloc(sizeof(*vf), GFP_KERNEL);
> if (!vf) {
> err = -ENOMEM;
> @@ -812,6 +817,10 @@ static int ice_create_vf_entries(struct ice_pf *pf, u16 num_vfs)
>
> ice_initialize_vf_entry(vf);
>
> + do {
> + vfdev = pci_get_device(pdev->vendor, vf_pdev_id, vfdev);
> + } while (vfdev && vfdev->physfn != pdev);
> + vf->vfdev = vfdev;
> vf->vf_sw_id = pf->first_sw;
>
> hash_add_rcu(vfs->table, &vf->entry, vf_id);
> @@ -1714,26 +1723,11 @@ void ice_print_vfs_mdd_events(struct ice_pf *pf)
> * Called when recovering from a PF FLR to restore interrupt capability to
> * the VFs.
> */
> -void ice_restore_all_vfs_msi_state(struct pci_dev *pdev)
> +void ice_restore_all_vfs_msi_state(struct ice_pf *pf)
> {
> - u16 vf_id;
> - int pos;
> -
> - if (!pci_num_vf(pdev))
> - return;
> + struct ice_vf *vf;
> + u32 bkt;
>
> - pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
> - if (pos) {
> - struct pci_dev *vfdev;
> -
> - pci_read_config_word(pdev, pos + PCI_SRIOV_VF_DID,
> - &vf_id);
> - vfdev = pci_get_device(pdev->vendor, vf_id, NULL);
> - while (vfdev) {
> - if (vfdev->is_virtfn && vfdev->physfn == pdev)
> - pci_restore_msi_state(vfdev);
> - vfdev = pci_get_device(pdev->vendor, vf_id,
> - vfdev);
> - }
> - }
> + ice_for_each_vf(pf, bkt, vf)
> + pci_restore_msi_state(vf->vfdev);
> }
> diff --git a/drivers/net/ethernet/intel/ice/ice_sriov.h b/drivers/net/ethernet/intel/ice/ice_sriov.h
> index 346cb2666f3a..06829443d540 100644
> --- a/drivers/net/ethernet/intel/ice/ice_sriov.h
> +++ b/drivers/net/ethernet/intel/ice/ice_sriov.h
> @@ -33,7 +33,7 @@ int
> ice_get_vf_cfg(struct net_device *netdev, int vf_id, struct ifla_vf_info *ivi);
>
> void ice_free_vfs(struct ice_pf *pf);
> -void ice_restore_all_vfs_msi_state(struct pci_dev *pdev);
> +void ice_restore_all_vfs_msi_state(struct ice_pf *pf);
>
> int
> ice_set_vf_port_vlan(struct net_device *netdev, int vf_id, u16 vlan_id, u8 qos,
> @@ -67,7 +67,7 @@ static inline
> void ice_vf_lan_overflow_event(struct ice_pf *pf, struct ice_rq_event_info *event) { }
> static inline void ice_print_vfs_mdd_events(struct ice_pf *pf) { }
> static inline void ice_print_vf_rx_mdd_event(struct ice_vf *vf) { }
> -static inline void ice_restore_all_vfs_msi_state(struct pci_dev *pdev) { }
> +static inline void ice_restore_all_vfs_msi_state(struct ice_pf *pf) { }
>
> static inline int
> ice_sriov_configure(struct pci_dev __always_unused *pdev,
> diff --git a/drivers/net/ethernet/intel/ice/ice_vf_lib.h b/drivers/net/ethernet/intel/ice/ice_vf_lib.h
> index 48fea6fa0362..57c36e4ccf91 100644
> --- a/drivers/net/ethernet/intel/ice/ice_vf_lib.h
> +++ b/drivers/net/ethernet/intel/ice/ice_vf_lib.h
> @@ -82,7 +82,7 @@ struct ice_vf {
> struct rcu_head rcu;
> struct kref refcnt;
> struct ice_pf *pf;
> -
> + struct pci_dev *vfdev;
> /* Used during virtchnl message handling and NDO ops against the VF
> * that will trigger a VFR
> */
>
> base-commit: 0ad204c4acb8ba1ed99564b001609e62547bc79d
> --
> 2.40.1
>
>
Powered by blists - more mailing lists