[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <CAHS8izO7agxQ6nbc=BoK5KuYd_jgVLgJTbZbmEUqarfVn300Tw@mail.gmail.com>
Date: Fri, 10 May 2024 09:22:05 -0700
From: Mina Almasry <almasrymina@...gle.com>
To: Alexander Lobakin <aleksander.lobakin@...el.com>
Cc: intel-wired-lan@...ts.osuosl.org, Tony Nguyen <anthony.l.nguyen@...el.com>,
"David S. Miller" <davem@...emloft.net>, Eric Dumazet <edumazet@...gle.com>,
Jakub Kicinski <kuba@...nel.org>, Paolo Abeni <pabeni@...hat.com>,
nex.sw.ncis.osdt.itp.upstreaming@...el.com, netdev@...r.kernel.org,
linux-kernel@...r.kernel.org
Subject: Re: [PATCH RFC iwl-next 08/12] idpf: reuse libeth's definitions of
parsed ptype structures
On Fri, May 10, 2024 at 8:30 AM Alexander Lobakin
<aleksander.lobakin@...el.com> wrote:
>
> idpf's in-kernel parsed ptype structure is almost identical to the one
> used in the previous Intel drivers, which means it can be converted to
> use libeth's definitions and even helpers. The only difference is that
> it doesn't use a constant table (libie), rather than one obtained from
> the device.
> Remove the driver counterpart and use libeth's helpers for hashes and
> checksums. This slightly optimizes skb fields processing due to faster
> checks. Also don't define big static array of ptypes in &idpf_vport --
> allocate them dynamically. The pointer to it is anyway cached in
> &idpf_rx_queue.
>
> Signed-off-by: Alexander Lobakin <aleksander.lobakin@...el.com>
> ---
> drivers/net/ethernet/intel/idpf/Kconfig | 1 +
> drivers/net/ethernet/intel/idpf/idpf.h | 2 +-
> drivers/net/ethernet/intel/idpf/idpf_txrx.h | 88 +-----------
> drivers/net/ethernet/intel/idpf/idpf_lib.c | 3 +
> drivers/net/ethernet/intel/idpf/idpf_main.c | 1 +
> .../ethernet/intel/idpf/idpf_singleq_txrx.c | 113 +++++++---------
> drivers/net/ethernet/intel/idpf/idpf_txrx.c | 125 +++++++-----------
> .../net/ethernet/intel/idpf/idpf_virtchnl.c | 69 ++++++----
> 8 files changed, 151 insertions(+), 251 deletions(-)
>
..
> * idpf_send_get_rx_ptype_msg - Send virtchnl for ptype info
> * @vport: virtual port data structure
> @@ -2526,7 +2541,7 @@ int idpf_send_get_rx_ptype_msg(struct idpf_vport *vport)
> {
> struct virtchnl2_get_ptype_info *get_ptype_info __free(kfree) = NULL;
> struct virtchnl2_get_ptype_info *ptype_info __free(kfree) = NULL;
> - struct idpf_rx_ptype_decoded *ptype_lkup = vport->rx_ptype_lkup;
> + struct libeth_rx_pt *ptype_lkup __free(kfree) = NULL;
> int max_ptype, ptypes_recvd = 0, ptype_offset;
> struct idpf_adapter *adapter = vport->adapter;
> struct idpf_vc_xn_params xn_params = {};
> @@ -2534,12 +2549,17 @@ int idpf_send_get_rx_ptype_msg(struct idpf_vport *vport)
> ssize_t reply_sz;
> int i, j, k;
>
> + if (vport->rx_ptype_lkup)
> + return 0;
> +
> if (idpf_is_queue_model_split(vport->rxq_model))
> max_ptype = IDPF_RX_MAX_PTYPE;
> else
> max_ptype = IDPF_RX_MAX_BASE_PTYPE;
>
> - memset(vport->rx_ptype_lkup, 0, sizeof(vport->rx_ptype_lkup));
> + ptype_lkup = kcalloc(max_ptype, sizeof(*ptype_lkup), GFP_KERNEL);
> + if (!ptype_lkup)
> + return -ENOMEM;
>
> get_ptype_info = kzalloc(sizeof(*get_ptype_info), GFP_KERNEL);
> if (!get_ptype_info)
> @@ -2604,9 +2624,6 @@ int idpf_send_get_rx_ptype_msg(struct idpf_vport *vport)
> else
> k = ptype->ptype_id_8;
>
> - if (ptype->proto_id_count)
> - ptype_lkup[k].known = 1;
> -
> for (j = 0; j < ptype->proto_id_count; j++) {
> id = le16_to_cpu(ptype->proto_id[j]);
> switch (id) {
> @@ -2614,18 +2631,18 @@ int idpf_send_get_rx_ptype_msg(struct idpf_vport *vport)
> if (pstate.tunnel_state ==
> IDPF_PTYPE_TUNNEL_IP) {
> ptype_lkup[k].tunnel_type =
> - IDPF_RX_PTYPE_TUNNEL_IP_GRENAT;
> + LIBETH_RX_PT_TUNNEL_IP_GRENAT;
> pstate.tunnel_state |=
> IDPF_PTYPE_TUNNEL_IP_GRENAT;
> }
> break;
> case VIRTCHNL2_PROTO_HDR_MAC:
> ptype_lkup[k].outer_ip =
> - IDPF_RX_PTYPE_OUTER_L2;
> + LIBETH_RX_PT_OUTER_L2;
> if (pstate.tunnel_state ==
> IDPF_TUN_IP_GRE) {
> ptype_lkup[k].tunnel_type =
> - IDPF_RX_PTYPE_TUNNEL_IP_GRENAT_MAC;
> + LIBETH_RX_PT_TUNNEL_IP_GRENAT_MAC;
> pstate.tunnel_state |=
> IDPF_PTYPE_TUNNEL_IP_GRENAT_MAC;
> }
> @@ -2652,23 +2669,23 @@ int idpf_send_get_rx_ptype_msg(struct idpf_vport *vport)
> break;
> case VIRTCHNL2_PROTO_HDR_UDP:
> ptype_lkup[k].inner_prot =
> - IDPF_RX_PTYPE_INNER_PROT_UDP;
> + LIBETH_RX_PT_INNER_UDP;
> break;
> case VIRTCHNL2_PROTO_HDR_TCP:
> ptype_lkup[k].inner_prot =
> - IDPF_RX_PTYPE_INNER_PROT_TCP;
> + LIBETH_RX_PT_INNER_TCP;
> break;
> case VIRTCHNL2_PROTO_HDR_SCTP:
> ptype_lkup[k].inner_prot =
> - IDPF_RX_PTYPE_INNER_PROT_SCTP;
> + LIBETH_RX_PT_INNER_SCTP;
> break;
> case VIRTCHNL2_PROTO_HDR_ICMP:
> ptype_lkup[k].inner_prot =
> - IDPF_RX_PTYPE_INNER_PROT_ICMP;
> + LIBETH_RX_PT_INNER_ICMP;
> break;
> case VIRTCHNL2_PROTO_HDR_PAY:
> ptype_lkup[k].payload_layer =
> - IDPF_RX_PTYPE_PAYLOAD_LAYER_PAY2;
> + LIBETH_RX_PT_PAYLOAD_L2;
> break;
> case VIRTCHNL2_PROTO_HDR_ICMPV6:
> case VIRTCHNL2_PROTO_HDR_IPV6_EH:
> @@ -2722,9 +2739,13 @@ int idpf_send_get_rx_ptype_msg(struct idpf_vport *vport)
> break;
> }
> }
> +
> + idpf_finalize_ptype_lookup(&ptype_lkup[k]);
> }
> }
>
> + vport->rx_ptype_lkup = no_free_ptr(ptype_lkup);
> +
Hi Olek,
I think you need to also patch up the early return from
idpf_send_get_rx_ptype_msg, otherwise vport->rx_ptype_lkup is not set
and I run into a later crash. Something like:
diff --git a/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c
b/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c
index a0aaa849df24..80d9c09ff407 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c
+++ b/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c
@@ -2629,7 +2629,7 @@ int idpf_send_get_rx_ptype_msg(struct idpf_vport *vport)
/* 0xFFFF indicates end of ptypes */
if (le16_to_cpu(ptype->ptype_id_10) ==
IDPF_INVALID_PTYPE_ID)
- return 0;
+ goto done;
if (idpf_is_queue_model_split(vport->rxq_model))
k = le16_to_cpu(ptype->ptype_id_10);
@@ -2756,6 +2756,7 @@ int idpf_send_get_rx_ptype_msg(struct idpf_vport *vport)
}
}
+done:
vport->rx_ptype_lkup = no_free_ptr(ptype_lkup);
return 0;
--
Thanks,
Mina
Powered by blists - more mailing lists