[<prev] [next>] [<thread-prev] [day] [month] [year] [list]
Message-ID: <PH7PR21MB3116E1C8D5F32651FC13B5D2CA809@PH7PR21MB3116.namprd21.prod.outlook.com>
Date: Mon, 20 Mar 2023 15:21:45 +0000
From: Haiyang Zhang <haiyangz@...rosoft.com>
To: Yunsheng Lin <linyunsheng@...wei.com>,
"linux-hyperv@...r.kernel.org" <linux-hyperv@...r.kernel.org>,
"netdev@...r.kernel.org" <netdev@...r.kernel.org>
CC: Dexuan Cui <decui@...rosoft.com>,
KY Srinivasan <kys@...rosoft.com>,
Paul Rosswurm <paulros@...rosoft.com>,
"olaf@...fle.de" <olaf@...fle.de>,
"vkuznets@...hat.com" <vkuznets@...hat.com>,
"davem@...emloft.net" <davem@...emloft.net>,
"wei.liu@...nel.org" <wei.liu@...nel.org>,
"edumazet@...gle.com" <edumazet@...gle.com>,
"kuba@...nel.org" <kuba@...nel.org>,
"pabeni@...hat.com" <pabeni@...hat.com>,
"leon@...nel.org" <leon@...nel.org>,
Long Li <longli@...rosoft.com>,
"ssengar@...ux.microsoft.com" <ssengar@...ux.microsoft.com>,
"linux-kernel@...r.kernel.org" <linux-kernel@...r.kernel.org>
Subject: RE: [PATCH net-next] net: mana: Add support for jumbo frame
> -----Original Message-----
> From: Yunsheng Lin <linyunsheng@...wei.com>
> Sent: Monday, March 20, 2023 5:42 AM
> To: Haiyang Zhang <haiyangz@...rosoft.com>; linux-hyperv@...r.kernel.org;
> netdev@...r.kernel.org
> Cc: Dexuan Cui <decui@...rosoft.com>; KY Srinivasan <kys@...rosoft.com>;
> Paul Rosswurm <paulros@...rosoft.com>; olaf@...fle.de;
> vkuznets@...hat.com; davem@...emloft.net; wei.liu@...nel.org;
> edumazet@...gle.com; kuba@...nel.org; pabeni@...hat.com;
> leon@...nel.org; Long Li <longli@...rosoft.com>;
> ssengar@...ux.microsoft.com; linux-kernel@...r.kernel.org
> Subject: Re: [PATCH net-next] net: mana: Add support for jumbo frame
>
> On 2023/3/20 5:27, Haiyang Zhang wrote:
> > During probe, get the hardware allowed max MTU by querying the device
> > configuration. Users can select MTU up to the device limit. Also,
> > when XDP is in use, we currently limit the buffer size to one page.
> >
> > Updated RX data path to allocate and use RX queue DMA buffers with
> > proper size based on the MTU setting.
>
> The change in this patch seems better to splitted into more reviewable
> patchset. Perhaps refactor the RX queue DMA buffers allocation to handle
> different size first, then add support for the jumbo frame.
Will consider.
>
> >
> > Signed-off-by: Haiyang Zhang <haiyangz@...rosoft.com>
> > ---
> > .../net/ethernet/microsoft/mana/mana_bpf.c | 22 +-
> > drivers/net/ethernet/microsoft/mana/mana_en.c | 229 ++++++++++++----
> --
> > include/net/mana/gdma.h | 4 +
> > include/net/mana/mana.h | 18 +-
> > 4 files changed, 183 insertions(+), 90 deletions(-)
> >
> > diff --git a/drivers/net/ethernet/microsoft/mana/mana_bpf.c
> b/drivers/net/ethernet/microsoft/mana/mana_bpf.c
> > index 3caea631229c..23b1521c0df9 100644
> > --- a/drivers/net/ethernet/microsoft/mana/mana_bpf.c
> > +++ b/drivers/net/ethernet/microsoft/mana/mana_bpf.c
> > @@ -133,12 +133,6 @@ u32 mana_run_xdp(struct net_device *ndev,
> struct mana_rxq *rxq,
> > return act;
> > }
> >
> > -static unsigned int mana_xdp_fraglen(unsigned int len)
> > -{
> > - return SKB_DATA_ALIGN(len) +
> > - SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
> > -}
> > -
> > struct bpf_prog *mana_xdp_get(struct mana_port_context *apc)
> > {
> > ASSERT_RTNL();
> > @@ -179,17 +173,18 @@ static int mana_xdp_set(struct net_device *ndev,
> struct bpf_prog *prog,
> > {
> > struct mana_port_context *apc = netdev_priv(ndev);
> > struct bpf_prog *old_prog;
> > - int buf_max;
> > + struct gdma_context *gc;
> > +
> > + gc = apc->ac->gdma_dev->gdma_context;
> >
> > old_prog = mana_xdp_get(apc);
> >
> > if (!old_prog && !prog)
> > return 0;
> >
> > - buf_max = XDP_PACKET_HEADROOM + mana_xdp_fraglen(ndev-
> >mtu + ETH_HLEN);
> > - if (prog && buf_max > PAGE_SIZE) {
> > - netdev_err(ndev, "XDP: mtu:%u too large, buf_max:%u\n",
> > - ndev->mtu, buf_max);
> > + if (prog && ndev->mtu > MANA_XDP_MTU_MAX) {
> > + netdev_err(ndev, "XDP: mtu:%u too large, mtu_max:%lu\n",
> > + ndev->mtu, MANA_XDP_MTU_MAX);
> > NL_SET_ERR_MSG_MOD(extack, "XDP: mtu too large");
> >
> > return -EOPNOTSUPP;
> > @@ -206,6 +201,11 @@ static int mana_xdp_set(struct net_device *ndev,
> struct bpf_prog *prog,
> > if (apc->port_is_up)
> > mana_chn_setxdp(apc, prog);
> >
> > + if (prog)
> > + ndev->max_mtu = MANA_XDP_MTU_MAX;
> > + else
> > + ndev->max_mtu = gc->adapter_mtu - ETH_HLEN;
> > +
> > return 0;
> > }
> >
> > diff --git a/drivers/net/ethernet/microsoft/mana/mana_en.c
> b/drivers/net/ethernet/microsoft/mana/mana_en.c
> > index 492474b4d8aa..07738b7e85f2 100644
> > --- a/drivers/net/ethernet/microsoft/mana/mana_en.c
> > +++ b/drivers/net/ethernet/microsoft/mana/mana_en.c
> > @@ -427,6 +427,34 @@ static u16 mana_select_queue(struct net_device
> *ndev, struct sk_buff *skb,
> > return txq;
> > }
> >
> > +static int mana_change_mtu(struct net_device *ndev, int new_mtu)
> > +{
> > + unsigned int old_mtu = ndev->mtu;
> > + int err, err2;
> > +
> > + err = mana_detach(ndev, false);
> > + if (err) {
> > + netdev_err(ndev, "mana_detach failed: %d\n", err);
> > + return err;
> > + }
> > +
> > + ndev->mtu = new_mtu;
> > +
> > + err = mana_attach(ndev);
> > + if (!err)
> > + return 0;
> > +
> > + netdev_err(ndev, "mana_attach failed: %d\n", err);
> > +
> > + /* Try to roll it back to the old configuration. */
> > + ndev->mtu = old_mtu;
> > + err2 = mana_attach(ndev);
> > + if (err2)
> > + netdev_err(ndev, "mana re-attach failed: %d\n", err2);
> > +
> > + return err;
> > +}
> > +
> > static const struct net_device_ops mana_devops = {
> > .ndo_open = mana_open,
> > .ndo_stop = mana_close,
> > @@ -436,6 +464,7 @@ static const struct net_device_ops mana_devops = {
> > .ndo_get_stats64 = mana_get_stats64,
> > .ndo_bpf = mana_bpf,
> > .ndo_xdp_xmit = mana_xdp_xmit,
> > + .ndo_change_mtu = mana_change_mtu,
> > };
> >
> > static void mana_cleanup_port_context(struct mana_port_context *apc)
> > @@ -625,6 +654,9 @@ static int mana_query_device_cfg(struct
> mana_context *ac, u32 proto_major_ver,
> >
> > mana_gd_init_req_hdr(&req.hdr, MANA_QUERY_DEV_CONFIG,
> > sizeof(req), sizeof(resp));
> > +
> > + req.hdr.resp.msg_version = GDMA_MESSAGE_V2;
>
> hdr->req.msg_version and hdr->resp.msg_version are both set to
> GDMA_MESSAGE_V1 in mana_gd_init_req_hdr(), is there any reason
> why hdr->req.msg_version is not set to GDMA_MESSAGE_V2?
The request version is still V1 in our hardware.
> Does initializing req.hdr.resp.msg_version to GDMA_MESSAGE_V2
> in mana_gd_init_req_hdr() without reset it to GDMA_MESSAGE_V2
> in mana_query_device_cfg() affect other user?
Yes, other users still need V1. So only this message response version is set
to V2.
>
>
> > +
> > req.proto_major_ver = proto_major_ver;
> > req.proto_minor_ver = proto_minor_ver;
> > req.proto_micro_ver = proto_micro_ver;
> > @@ -647,6 +679,11 @@ static int mana_query_device_cfg(struct
> mana_context *ac, u32 proto_major_ver,
> >
> > *max_num_vports = resp.max_num_vports;
> >
> > + if (resp.hdr.response.msg_version == GDMA_MESSAGE_V2)
>
> It seems the driver is setting resp.hdr.response.msg_version to
> GDMA_MESSAGE_V2 above, and do the checking here. Does older
> firmware reset the resp.hdr.response.msg_version to GDMA_MESSAGE_V1
> in order to enable compatibility between firmware and driver?
Yes older firmware still using V1.
>
> > + gc->adapter_mtu = resp.adapter_mtu;
> > + else
> > + gc->adapter_mtu = ETH_FRAME_LEN;
> > +
> > return 0;
> > }
> >
> > @@ -1185,10 +1222,10 @@ static void mana_post_pkt_rxq(struct
> mana_rxq *rxq)
> > WARN_ON_ONCE(recv_buf_oob->wqe_inf.wqe_size_in_bu != 1);
> > }
> >
> > -static struct sk_buff *mana_build_skb(void *buf_va, uint pkt_len,
> > - struct xdp_buff *xdp)
> > +static struct sk_buff *mana_build_skb(struct mana_rxq *rxq, void *buf_va,
> > + uint pkt_len, struct xdp_buff *xdp)
> > {
> > - struct sk_buff *skb = build_skb(buf_va, PAGE_SIZE);
> > + struct sk_buff *skb = napi_build_skb(buf_va, rxq->alloc_size);
>
> Changing build_skb() to napi_build_skb() seems like an optimization
> unrelated to jumbo frame support, seems like another patch to do that?
Will do. Thanks.
- Haiyang
Powered by blists - more mailing lists