[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <f7tbjsxfl22.fsf@redhat.com>
Date: Tue, 15 Apr 2025 12:26:13 -0400
From: Aaron Conole <aconole@...hat.com>
To: Sebastian Andrzej Siewior <bigeasy@...utronix.de>
Cc: netdev@...r.kernel.org, linux-rt-devel@...ts.linux.dev, "David S.
Miller" <davem@...emloft.net>, Eric Dumazet <edumazet@...gle.com>, Jakub
Kicinski <kuba@...nel.org>, Paolo Abeni <pabeni@...hat.com>, Simon
Horman <horms@...nel.org>, Thomas Gleixner <tglx@...utronix.de>, Eelco
Chaudron <echaudro@...hat.com>, Ilya Maximets <i.maximets@....org>,
dev@...nvswitch.org
Subject: Re: [PATCH net-next v2 12/18] openvswitch: Move
ovs_frag_data_storage into the struct ovs_pcpu_storage
Sebastian Andrzej Siewior <bigeasy@...utronix.de> writes:
> ovs_frag_data_storage is a per-CPU variable and relies on disabled BH for its
> locking. Without per-CPU locking in local_bh_disable() on PREEMPT_RT
> this data structure requires explicit locking.
>
> Move ovs_frag_data_storage into the struct ovs_pcpu_storage which already
> provides locking for the structure.
>
> Cc: Aaron Conole <aconole@...hat.com>
> Cc: Eelco Chaudron <echaudro@...hat.com>
> Cc: Ilya Maximets <i.maximets@....org>
> Cc: dev@...nvswitch.org
> Signed-off-by: Sebastian Andrzej Siewior <bigeasy@...utronix.de>
> ---
I'm going to reply here, but I need to bisect a bit more (though I
suspect the results below are due to 11/18). When I tested with this
patch there were lots of "unexplained" latency spikes during processing
(note, I'm not doing PREEMPT_RT in my testing, but I guess it would
smooth the spikes out at the cost of max performance).
With the series:
[SUM] 0.00-300.00 sec 3.28 TBytes 96.1 Gbits/sec 9417 sender
[SUM] 0.00-300.00 sec 3.28 TBytes 96.1 Gbits/sec receiver
Without the series:
[SUM] 0.00-300.00 sec 3.26 TBytes 95.5 Gbits/sec 149 sender
[SUM] 0.00-300.00 sec 3.26 TBytes 95.5 Gbits/sec receiver
And while the 'final' numbers might look acceptable, one thing I'll note
is I saw multiple stalls as:
[ 5] 57.00-58.00 sec 128 KBytes 903 Kbits/sec 0 4.02 MBytes
But without the patch, I didn't see such stalls. My testing:
1. Install openvswitch userspace and ipcalc
2. start userspace.
3. Setup two netns and connect them (I have a more complicated script to
set up the flows, and I can send that to you)
4. Use iperf3 to test (-P5 -t 300)
As I wrote I suspect the locking in 11 is leading to these stalls, as
the data I'm sending shouldn't be hitting the frag path.
Do these results seem expected to you?
> net/openvswitch/actions.c | 20 ++------------------
> net/openvswitch/datapath.h | 16 ++++++++++++++++
> 2 files changed, 18 insertions(+), 18 deletions(-)
>
> diff --git a/net/openvswitch/actions.c b/net/openvswitch/actions.c
> index f4996c11aefac..4d20eadd77ceb 100644
> --- a/net/openvswitch/actions.c
> +++ b/net/openvswitch/actions.c
> @@ -39,22 +39,6 @@
> #include "flow_netlink.h"
> #include "openvswitch_trace.h"
>
> -#define MAX_L2_LEN (VLAN_ETH_HLEN + 3 * MPLS_HLEN)
> -struct ovs_frag_data {
> - unsigned long dst;
> - struct vport *vport;
> - struct ovs_skb_cb cb;
> - __be16 inner_protocol;
> - u16 network_offset; /* valid only for MPLS */
> - u16 vlan_tci;
> - __be16 vlan_proto;
> - unsigned int l2_len;
> - u8 mac_proto;
> - u8 l2_data[MAX_L2_LEN];
> -};
> -
> -static DEFINE_PER_CPU(struct ovs_frag_data, ovs_frag_data_storage);
> -
> DEFINE_PER_CPU(struct ovs_pcpu_storage, ovs_pcpu_storage) = {
> .bh_lock = INIT_LOCAL_LOCK(bh_lock),
> };
> @@ -771,7 +755,7 @@ static int set_sctp(struct sk_buff *skb, struct sw_flow_key *flow_key,
> static int ovs_vport_output(struct net *net, struct sock *sk,
> struct sk_buff *skb)
> {
> - struct ovs_frag_data *data = this_cpu_ptr(&ovs_frag_data_storage);
> + struct ovs_frag_data *data = this_cpu_ptr(&ovs_pcpu_storage.frag_data);
> struct vport *vport = data->vport;
>
> if (skb_cow_head(skb, data->l2_len) < 0) {
> @@ -823,7 +807,7 @@ static void prepare_frag(struct vport *vport, struct sk_buff *skb,
> unsigned int hlen = skb_network_offset(skb);
> struct ovs_frag_data *data;
>
> - data = this_cpu_ptr(&ovs_frag_data_storage);
> + data = this_cpu_ptr(&ovs_pcpu_storage.frag_data);
> data->dst = skb->_skb_refdst;
> data->vport = vport;
> data->cb = *OVS_CB(skb);
> diff --git a/net/openvswitch/datapath.h b/net/openvswitch/datapath.h
> index 4a665c3cfa906..1b5348b0f5594 100644
> --- a/net/openvswitch/datapath.h
> +++ b/net/openvswitch/datapath.h
> @@ -13,6 +13,7 @@
> #include <linux/skbuff.h>
> #include <linux/u64_stats_sync.h>
> #include <net/ip_tunnels.h>
> +#include <net/mpls.h>
>
> #include "conntrack.h"
> #include "flow.h"
> @@ -173,6 +174,20 @@ struct ovs_net {
> bool xt_label;
> };
>
> +#define MAX_L2_LEN (VLAN_ETH_HLEN + 3 * MPLS_HLEN)
> +struct ovs_frag_data {
> + unsigned long dst;
> + struct vport *vport;
> + struct ovs_skb_cb cb;
> + __be16 inner_protocol;
> + u16 network_offset; /* valid only for MPLS */
> + u16 vlan_tci;
> + __be16 vlan_proto;
> + unsigned int l2_len;
> + u8 mac_proto;
> + u8 l2_data[MAX_L2_LEN];
> +};
> +
> struct deferred_action {
> struct sk_buff *skb;
> const struct nlattr *actions;
> @@ -200,6 +215,7 @@ struct action_flow_keys {
> struct ovs_pcpu_storage {
> struct action_fifo action_fifos;
> struct action_flow_keys flow_keys;
> + struct ovs_frag_data frag_data;
> int exec_level;
> struct task_struct *owner;
> local_lock_t bh_lock;
Powered by blists - more mailing lists