lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <CAG-FcCM5-c3vCRWenUoxdswfKAfaVNkBEpzkvKRhy6UfSzbu0Q@mail.gmail.com>
Date: Thu, 9 May 2024 17:18:13 -0700
From: Ziwei Xiao <ziweixiao@...gle.com>
To: David Wei <dw@...idwei.uk>
Cc: netdev@...r.kernel.org, jeroendb@...gle.com, pkaligineedi@...gle.com, 
	shailend@...gle.com, davem@...emloft.net, edumazet@...gle.com, 
	kuba@...nel.org, pabeni@...hat.com, willemb@...gle.com, 
	hramamurthy@...gle.com, rushilg@...gle.com, jfraker@...gle.com, 
	linux-kernel@...r.kernel.org
Subject: Re: [PATCH net-next 3/5] gve: Add flow steering device option

On Tue, May 7, 2024 at 10:33 PM David Wei <dw@...idwei.uk> wrote:
>
> On 2024-05-07 15:59, Ziwei Xiao wrote:
> > From: Jeroen de Borst <jeroendb@...gle.com>
> >
> > Add a new device option to signal to the driver that the device supports
> > flow steering. This device option also carries the maximum number of
> > flow steering rules that the device can store.
> >
> > Signed-off-by: Jeroen de Borst <jeroendb@...gle.com>
> > Co-developed-by: Ziwei Xiao <ziweixiao@...gle.com>
> > Signed-off-by: Ziwei Xiao <ziweixiao@...gle.com>
> > Reviewed-by: Praveen Kaligineedi <pkaligineedi@...gle.com>
> > Reviewed-by: Harshitha Ramamurthy <hramamurthy@...gle.com>
> > Reviewed-by: Willem de Bruijn <willemb@...gle.com>
> > ---
> >  drivers/net/ethernet/google/gve/gve.h        |  2 +
> >  drivers/net/ethernet/google/gve/gve_adminq.c | 42 ++++++++++++++++++--
> >  drivers/net/ethernet/google/gve/gve_adminq.h | 11 +++++
> >  3 files changed, 51 insertions(+), 4 deletions(-)
>
> Think something went wrong here. The title is different but patch is
> same as 2/5.
This is the patch for adding the device option(3/5), while the
previous patch you commented is actually for adding extended
adminq(2/5). I don't see any wrong with these two patches. Maybe it's
replying in the wrong thread?

>
> >
> > diff --git a/drivers/net/ethernet/google/gve/gve.h b/drivers/net/ethernet/google/gve/gve.h
> > index ca7fce17f2c0..58213c15e084 100644
> > --- a/drivers/net/ethernet/google/gve/gve.h
> > +++ b/drivers/net/ethernet/google/gve/gve.h
> > @@ -786,6 +786,8 @@ struct gve_priv {
> >
> >       u16 header_buf_size; /* device configured, header-split supported if non-zero */
> >       bool header_split_enabled; /* True if the header split is enabled by the user */
> > +
> > +     u32 max_flow_rules;
> >  };
> >
> >  enum gve_service_task_flags_bit {
> > diff --git a/drivers/net/ethernet/google/gve/gve_adminq.c b/drivers/net/ethernet/google/gve/gve_adminq.c
> > index 514641b3ccc7..85d0d742ad21 100644
> > --- a/drivers/net/ethernet/google/gve/gve_adminq.c
> > +++ b/drivers/net/ethernet/google/gve/gve_adminq.c
> > @@ -44,6 +44,7 @@ void gve_parse_device_option(struct gve_priv *priv,
> >                            struct gve_device_option_jumbo_frames **dev_op_jumbo_frames,
> >                            struct gve_device_option_dqo_qpl **dev_op_dqo_qpl,
> >                            struct gve_device_option_buffer_sizes **dev_op_buffer_sizes,
> > +                          struct gve_device_option_flow_steering **dev_op_flow_steering,
> >                            struct gve_device_option_modify_ring **dev_op_modify_ring)
> >  {
> >       u32 req_feat_mask = be32_to_cpu(option->required_features_mask);
> > @@ -189,6 +190,23 @@ void gve_parse_device_option(struct gve_priv *priv,
> >               if (option_length == GVE_DEVICE_OPTION_NO_MIN_RING_SIZE)
> >                       priv->default_min_ring_size = true;
> >               break;
> > +     case GVE_DEV_OPT_ID_FLOW_STEERING:
> > +             if (option_length < sizeof(**dev_op_flow_steering) ||
> > +                 req_feat_mask != GVE_DEV_OPT_REQ_FEAT_MASK_FLOW_STEERING) {
> > +                     dev_warn(&priv->pdev->dev, GVE_DEVICE_OPTION_ERROR_FMT,
> > +                              "Flow Steering",
> > +                              (int)sizeof(**dev_op_flow_steering),
> > +                              GVE_DEV_OPT_REQ_FEAT_MASK_FLOW_STEERING,
> > +                              option_length, req_feat_mask);
> > +                     break;
> > +             }
> > +
> > +             if (option_length > sizeof(**dev_op_flow_steering))
> > +                     dev_warn(&priv->pdev->dev,
> > +                              GVE_DEVICE_OPTION_TOO_BIG_FMT,
> > +                              "Flow Steering");
> > +             *dev_op_flow_steering = (void *)(option + 1);
> > +             break;
> >       default:
> >               /* If we don't recognize the option just continue
> >                * without doing anything.
> > @@ -208,6 +226,7 @@ gve_process_device_options(struct gve_priv *priv,
> >                          struct gve_device_option_jumbo_frames **dev_op_jumbo_frames,
> >                          struct gve_device_option_dqo_qpl **dev_op_dqo_qpl,
> >                          struct gve_device_option_buffer_sizes **dev_op_buffer_sizes,
> > +                        struct gve_device_option_flow_steering **dev_op_flow_steering,
> >                          struct gve_device_option_modify_ring **dev_op_modify_ring)
> >  {
> >       const int num_options = be16_to_cpu(descriptor->num_device_options);
> > @@ -230,7 +249,7 @@ gve_process_device_options(struct gve_priv *priv,
> >                                       dev_op_gqi_rda, dev_op_gqi_qpl,
> >                                       dev_op_dqo_rda, dev_op_jumbo_frames,
> >                                       dev_op_dqo_qpl, dev_op_buffer_sizes,
> > -                                     dev_op_modify_ring);
> > +                                     dev_op_flow_steering, dev_op_modify_ring);
> >               dev_opt = next_opt;
> >       }
> >
> > @@ -838,6 +857,8 @@ static void gve_enable_supported_features(struct gve_priv *priv,
> >                                         *dev_op_dqo_qpl,
> >                                         const struct gve_device_option_buffer_sizes
> >                                         *dev_op_buffer_sizes,
> > +                                       const struct gve_device_option_flow_steering
> > +                                       *dev_op_flow_steering,
> >                                         const struct gve_device_option_modify_ring
> >                                         *dev_op_modify_ring)
> >  {
> > @@ -890,10 +911,22 @@ static void gve_enable_supported_features(struct gve_priv *priv,
> >                       priv->min_tx_desc_cnt = be16_to_cpu(dev_op_modify_ring->min_tx_ring_size);
> >               }
> >       }
> > +
> > +     if (dev_op_flow_steering &&
> > +         (supported_features_mask & GVE_SUP_FLOW_STEERING_MASK)) {
> > +             if (dev_op_flow_steering->max_flow_rules) {
> > +                     priv->max_flow_rules =
> > +                             be32_to_cpu(dev_op_flow_steering->max_flow_rules);
> > +                     dev_info(&priv->pdev->dev,
> > +                              "FLOW STEERING device option enabled with max rule limit of %u.\n",
> > +                              priv->max_flow_rules);
> > +             }
> > +     }
> >  }
> >
> >  int gve_adminq_describe_device(struct gve_priv *priv)
> >  {
> > +     struct gve_device_option_flow_steering *dev_op_flow_steering = NULL;
> >       struct gve_device_option_buffer_sizes *dev_op_buffer_sizes = NULL;
> >       struct gve_device_option_jumbo_frames *dev_op_jumbo_frames = NULL;
> >       struct gve_device_option_modify_ring *dev_op_modify_ring = NULL;
> > @@ -930,6 +963,7 @@ int gve_adminq_describe_device(struct gve_priv *priv)
> >                                        &dev_op_gqi_qpl, &dev_op_dqo_rda,
> >                                        &dev_op_jumbo_frames, &dev_op_dqo_qpl,
> >                                        &dev_op_buffer_sizes,
> > +                                      &dev_op_flow_steering,
> >                                        &dev_op_modify_ring);
> >       if (err)
> >               goto free_device_descriptor;
> > @@ -969,9 +1003,8 @@ int gve_adminq_describe_device(struct gve_priv *priv)
> >       /* set default descriptor counts */
> >       gve_set_default_desc_cnt(priv, descriptor);
> >
> > -     /* DQO supports LRO. */
> >       if (!gve_is_gqi(priv))
> > -             priv->dev->hw_features |= NETIF_F_LRO;
> > +             priv->dev->hw_features |= NETIF_F_LRO | NETIF_F_NTUPLE;
> >
> >       priv->max_registered_pages =
> >                               be64_to_cpu(descriptor->max_registered_pages);
> > @@ -991,7 +1024,8 @@ int gve_adminq_describe_device(struct gve_priv *priv)
> >
> >       gve_enable_supported_features(priv, supported_features_mask,
> >                                     dev_op_jumbo_frames, dev_op_dqo_qpl,
> > -                                   dev_op_buffer_sizes, dev_op_modify_ring);
> > +                                   dev_op_buffer_sizes, dev_op_flow_steering,
> > +                                   dev_op_modify_ring);
> >
> >  free_device_descriptor:
> >       dma_pool_free(priv->adminq_pool, descriptor, descriptor_bus);
> > diff --git a/drivers/net/ethernet/google/gve/gve_adminq.h b/drivers/net/ethernet/google/gve/gve_adminq.h
> > index e0370ace8397..e64a0e72e781 100644
> > --- a/drivers/net/ethernet/google/gve/gve_adminq.h
> > +++ b/drivers/net/ethernet/google/gve/gve_adminq.h
> > @@ -146,6 +146,14 @@ struct gve_device_option_modify_ring {
> >
> >  static_assert(sizeof(struct gve_device_option_modify_ring) == 12);
> >
> > +struct gve_device_option_flow_steering {
> > +     __be32 supported_features_mask;
> > +     __be32 reserved;
> > +     __be32 max_flow_rules;
> > +};
> > +
> > +static_assert(sizeof(struct gve_device_option_flow_steering) == 12);
> > +
> >  /* Terminology:
> >   *
> >   * RDA - Raw DMA Addressing - Buffers associated with SKBs are directly DMA
> > @@ -163,6 +171,7 @@ enum gve_dev_opt_id {
> >       GVE_DEV_OPT_ID_DQO_QPL                  = 0x7,
> >       GVE_DEV_OPT_ID_JUMBO_FRAMES             = 0x8,
> >       GVE_DEV_OPT_ID_BUFFER_SIZES             = 0xa,
> > +     GVE_DEV_OPT_ID_FLOW_STEERING            = 0xb,
> >  };
> >
> >  enum gve_dev_opt_req_feat_mask {
> > @@ -174,12 +183,14 @@ enum gve_dev_opt_req_feat_mask {
> >       GVE_DEV_OPT_REQ_FEAT_MASK_DQO_QPL               = 0x0,
> >       GVE_DEV_OPT_REQ_FEAT_MASK_BUFFER_SIZES          = 0x0,
> >       GVE_DEV_OPT_REQ_FEAT_MASK_MODIFY_RING           = 0x0,
> > +     GVE_DEV_OPT_REQ_FEAT_MASK_FLOW_STEERING         = 0x0,
> >  };
> >
> >  enum gve_sup_feature_mask {
> >       GVE_SUP_MODIFY_RING_MASK        = 1 << 0,
> >       GVE_SUP_JUMBO_FRAMES_MASK       = 1 << 2,
> >       GVE_SUP_BUFFER_SIZES_MASK       = 1 << 4,
> > +     GVE_SUP_FLOW_STEERING_MASK      = 1 << 5,
> >  };
> >
> >  #define GVE_DEV_OPT_LEN_GQI_RAW_ADDRESSING 0x0

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ