[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20230215183335.800122-21-dima@arista.com>
Date: Wed, 15 Feb 2023 18:33:34 +0000
From: Dmitry Safonov <dima@...sta.com>
To: linux-kernel@...r.kernel.org, David Ahern <dsahern@...nel.org>,
Eric Dumazet <edumazet@...gle.com>,
Paolo Abeni <pabeni@...hat.com>,
Jakub Kicinski <kuba@...nel.org>,
"David S. Miller" <davem@...emloft.net>
Cc: Dmitry Safonov <dima@...sta.com>,
Andy Lutomirski <luto@...capital.net>,
Ard Biesheuvel <ardb@...nel.org>,
Bob Gilligan <gilligan@...sta.com>,
Dan Carpenter <dan.carpenter@...cle.com>,
David Laight <David.Laight@...lab.com>,
Dmitry Safonov <0x7f454c46@...il.com>,
Eric Biggers <ebiggers@...nel.org>,
"Eric W. Biederman" <ebiederm@...ssion.com>,
Francesco Ruggeri <fruggeri05@...il.com>,
Herbert Xu <herbert@...dor.apana.org.au>,
Hideaki YOSHIFUJI <yoshfuji@...ux-ipv6.org>,
Ivan Delalande <colona@...sta.com>,
Leonard Crestez <cdleonard@...il.com>,
Salam Noureddine <noureddine@...sta.com>,
netdev@...r.kernel.org
Subject: [PATCH v4 20/21] net/tcp-ao: Add static_key for TCP-AO
Similarly to TCP-MD5, add a static key to TCP-AO that is patched out
when there are no keys on a machine and dynamically enabled with the
first setsockopt(TCP_AO) adds a key on any socket. The static key is as
well dynamically disabled later when the socket is destructed.
The lifetime of enabled static key here is the same as ao_info: it is
enabled on allocation, passed over from full socket to twsk and
destructed when ao_info is scheduled for destruction.
Signed-off-by: Dmitry Safonov <dima@...sta.com>
---
include/net/tcp_ao.h | 2 ++
net/ipv4/tcp_ao.c | 17 +++++++++++++++++
net/ipv4/tcp_input.c | 42 ++++++++++++++++++++++++++++--------------
3 files changed, 47 insertions(+), 14 deletions(-)
diff --git a/include/net/tcp_ao.h b/include/net/tcp_ao.h
index 04e3bcee05f7..253cf2719aed 100644
--- a/include/net/tcp_ao.h
+++ b/include/net/tcp_ao.h
@@ -131,6 +131,8 @@ do { \
#ifdef CONFIG_TCP_AO
/* TCP-AO structures and functions */
+#include <linux/jump_label.h>
+extern struct static_key_false_deferred tcp_ao_needed;
struct tcp4_ao_context {
__be32 saddr;
diff --git a/net/ipv4/tcp_ao.c b/net/ipv4/tcp_ao.c
index 2c38e991ecbd..adb25e42f64a 100644
--- a/net/ipv4/tcp_ao.c
+++ b/net/ipv4/tcp_ao.c
@@ -17,6 +17,9 @@
#include <net/ipv6.h>
#include <net/icmp.h>
+DEFINE_STATIC_KEY_DEFERRED_FALSE(tcp_ao_needed, HZ);
+EXPORT_SYMBOL(tcp_ao_needed);
+
int tcp_ao_calc_traffic_key(struct tcp_ao_key *mkt, u8 *key, void *ctx,
unsigned int len)
{
@@ -58,6 +61,9 @@ bool tcp_ao_ignore_icmp(struct sock *sk, int type, int code)
struct tcp_ao_info *ao;
bool ignore_icmp = false;
+ if (!static_branch_unlikely(&tcp_ao_needed.key))
+ return false;
+
/* RFC5925, 7.8:
* >> A TCP-AO implementation MUST default to ignore incoming ICMPv4
* messages of Type 3 (destination unreachable), Codes 2-4 (protocol
@@ -196,6 +202,9 @@ struct tcp_ao_key *tcp_ao_do_lookup(const struct sock *sk,
struct tcp_ao_key *key;
struct tcp_ao_info *ao;
+ if (!static_branch_unlikely(&tcp_ao_needed.key))
+ return NULL;
+
ao = rcu_dereference_check(tcp_sk(sk)->ao_info,
lockdep_sock_is_held(sk));
if (!ao)
@@ -283,6 +292,7 @@ void tcp_ao_destroy_sock(struct sock *sk, bool twsk)
}
kfree_rcu(ao, rcu);
+ static_branch_slow_dec_deferred(&tcp_ao_needed);
}
void tcp_ao_time_wait(struct tcp_timewait_sock *tcptw, struct tcp_sock *tp)
@@ -1052,6 +1062,11 @@ int tcp_ao_copy_all_matching(const struct sock *sk, struct sock *newsk,
goto free_and_exit;
}
+ if (!static_key_fast_inc_not_disabled(&tcp_ao_needed.key.key)) {
+ ret = -EUSERS;
+ goto free_and_exit;
+ }
+
key_head = rcu_dereference(hlist_first_rcu(&new_ao->head));
first_key = hlist_entry_safe(key_head, struct tcp_ao_key, node);
@@ -1607,6 +1622,8 @@ static int tcp_ao_add_cmd(struct sock *sk, unsigned short int family,
tcp_ao_link_mkt(ao_info, key);
if (first) {
+ if (!static_branch_inc(&tcp_ao_needed.key))
+ goto err_free_sock;
sk_gso_disable(sk);
rcu_assign_pointer(tcp_sk(sk)->ao_info, ao_info);
}
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 9f23cab1e835..dd9ff507bbc9 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -3524,17 +3524,14 @@ static inline bool tcp_may_update_window(const struct tcp_sock *tp,
(ack_seq == tp->snd_wl1 && nwin > tp->snd_wnd);
}
-/* If we update tp->snd_una, also update tp->bytes_acked */
-static void tcp_snd_una_update(struct tcp_sock *tp, u32 ack)
+static void tcp_snd_sne_update(struct tcp_sock *tp, u32 ack)
{
- u32 delta = ack - tp->snd_una;
#ifdef CONFIG_TCP_AO
struct tcp_ao_info *ao;
-#endif
- sock_owned_by_me((struct sock *)tp);
- tp->bytes_acked += delta;
-#ifdef CONFIG_TCP_AO
+ if (!static_branch_unlikely(&tcp_ao_needed.key))
+ return;
+
ao = rcu_dereference_protected(tp->ao_info,
lockdep_sock_is_held((struct sock *)tp));
if (ao) {
@@ -3543,20 +3540,27 @@ static void tcp_snd_una_update(struct tcp_sock *tp, u32 ack)
ao->snd_sne_seq = ack;
}
#endif
+}
+
+/* If we update tp->snd_una, also update tp->bytes_acked */
+static void tcp_snd_una_update(struct tcp_sock *tp, u32 ack)
+{
+ u32 delta = ack - tp->snd_una;
+
+ sock_owned_by_me((struct sock *)tp);
+ tp->bytes_acked += delta;
+ tcp_snd_sne_update(tp, ack);
tp->snd_una = ack;
}
-/* If we update tp->rcv_nxt, also update tp->bytes_received */
-static void tcp_rcv_nxt_update(struct tcp_sock *tp, u32 seq)
+static void tcp_rcv_sne_update(struct tcp_sock *tp, u32 seq)
{
- u32 delta = seq - tp->rcv_nxt;
#ifdef CONFIG_TCP_AO
struct tcp_ao_info *ao;
-#endif
- sock_owned_by_me((struct sock *)tp);
- tp->bytes_received += delta;
-#ifdef CONFIG_TCP_AO
+ if (!static_branch_unlikely(&tcp_ao_needed.key))
+ return;
+
ao = rcu_dereference_protected(tp->ao_info,
lockdep_sock_is_held((struct sock *)tp));
if (ao) {
@@ -3565,6 +3569,16 @@ static void tcp_rcv_nxt_update(struct tcp_sock *tp, u32 seq)
ao->rcv_sne_seq = seq;
}
#endif
+}
+
+/* If we update tp->rcv_nxt, also update tp->bytes_received */
+static void tcp_rcv_nxt_update(struct tcp_sock *tp, u32 seq)
+{
+ u32 delta = seq - tp->rcv_nxt;
+
+ sock_owned_by_me((struct sock *)tp);
+ tp->bytes_received += delta;
+ tcp_rcv_sne_update(tp, seq);
WRITE_ONCE(tp->rcv_nxt, seq);
}
--
2.39.1
Powered by blists - more mailing lists