[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-id: <20181214224007.54813-2-cpaasch@apple.com>
Date: Fri, 14 Dec 2018 14:40:03 -0800
From: Christoph Paasch <cpaasch@...le.com>
To: netdev@...r.kernel.org
Cc: Eric Dumazet <edumazet@...gle.com>,
Yuchung Cheng <ycheng@...gle.com>,
David Miller <davem@...emloft.net>
Subject: [PATCH net-next 1/5] tcp: Create list of TFO-contexts
Instead of having a single TFO-context, we now have a list of
tcp_fastopen_context, bounded by TCP_FASTOPEN_CTXT_LEN (set to 2).
This enables us to do a rolling TFO-key update that allows the server to
accept old cookies and at the same time announce new ones to the client
(see follow-up patch).
Signed-off-by: Christoph Paasch <cpaasch@...le.com>
---
include/net/tcp.h | 2 ++
net/ipv4/tcp_fastopen.c | 52 +++++++++++++++++++++++++++++++++++++++++++++----
2 files changed, 50 insertions(+), 4 deletions(-)
diff --git a/include/net/tcp.h b/include/net/tcp.h
index e0a65c067662..e629ea2e6c9d 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -1622,9 +1622,11 @@ bool tcp_fastopen_cookie_check(struct sock *sk, u16 *mss,
struct tcp_fastopen_cookie *cookie);
bool tcp_fastopen_defer_connect(struct sock *sk, int *err);
#define TCP_FASTOPEN_KEY_LENGTH 16
+#define TCP_FASTOPEN_CTXT_LEN 2
/* Fastopen key context */
struct tcp_fastopen_context {
+ struct tcp_fastopen_context __rcu *next;
struct crypto_cipher *tfm;
__u8 key[TCP_FASTOPEN_KEY_LENGTH];
struct rcu_head rcu;
diff --git a/net/ipv4/tcp_fastopen.c b/net/ipv4/tcp_fastopen.c
index 018a48477355..c52d5b8eabf0 100644
--- a/net/ipv4/tcp_fastopen.c
+++ b/net/ipv4/tcp_fastopen.c
@@ -37,8 +37,14 @@ static void tcp_fastopen_ctx_free(struct rcu_head *head)
{
struct tcp_fastopen_context *ctx =
container_of(head, struct tcp_fastopen_context, rcu);
- crypto_free_cipher(ctx->tfm);
- kfree(ctx);
+
+ while (ctx) {
+ struct tcp_fastopen_context *prev = ctx;
+ /* We own ctx, thus no need to hold the Fastopen-lock */
+ ctx = rcu_dereference_protected(ctx->next, 1);
+ crypto_free_cipher(prev->tfm);
+ kfree(prev);
+ }
}
void tcp_fastopen_destroy_cipher(struct sock *sk)
@@ -66,6 +72,35 @@ void tcp_fastopen_ctx_destroy(struct net *net)
call_rcu(&ctxt->rcu, tcp_fastopen_ctx_free);
}
+static struct tcp_fastopen_context *
+tcp_fastopen_cut_keypool(struct tcp_fastopen_context *ctx,
+ spinlock_t *lock)
+{
+ int cnt = 0;
+
+ while (ctx) {
+ /* We iterate the list to see if we have more than
+ * TCP_FASTOPEN_CTXT_LEN contexts. If we do, we remove the rest
+ * of the list and free it later
+ */
+
+ cnt++;
+ if (cnt >= TCP_FASTOPEN_CTXT_LEN) {
+ /* It's the last one, return the rest so it gets freed */
+ struct tcp_fastopen_context *prev = ctx;
+
+ ctx = rcu_dereference_protected(ctx->next,
+ lockdep_is_held(lock));
+ rcu_assign_pointer(prev->next, NULL);
+ break;
+ }
+ ctx = rcu_dereference_protected(ctx->next,
+ lockdep_is_held(lock));
+ }
+
+ return ctx;
+}
+
int tcp_fastopen_reset_cipher(struct net *net, struct sock *sk,
void *key, unsigned int len)
{
@@ -96,13 +131,22 @@ error: kfree(ctx);
spin_lock(&net->ipv4.tcp_fastopen_ctx_lock);
if (sk) {
q = &inet_csk(sk)->icsk_accept_queue.fastopenq;
+ rcu_assign_pointer(ctx->next, q->ctx);
+ rcu_assign_pointer(q->ctx, ctx);
+
octx = rcu_dereference_protected(q->ctx,
lockdep_is_held(&net->ipv4.tcp_fastopen_ctx_lock));
- rcu_assign_pointer(q->ctx, ctx);
+
+ octx = tcp_fastopen_cut_keypool(octx, &net->ipv4.tcp_fastopen_ctx_lock);
} else {
+ rcu_assign_pointer(ctx->next, net->ipv4.tcp_fastopen_ctx);
+ rcu_assign_pointer(net->ipv4.tcp_fastopen_ctx, ctx);
+
octx = rcu_dereference_protected(net->ipv4.tcp_fastopen_ctx,
lockdep_is_held(&net->ipv4.tcp_fastopen_ctx_lock));
- rcu_assign_pointer(net->ipv4.tcp_fastopen_ctx, ctx);
+
+ octx = tcp_fastopen_cut_keypool(octx,
+ &net->ipv4.tcp_fastopen_ctx_lock);
}
spin_unlock(&net->ipv4.tcp_fastopen_ctx_lock);
--
2.16.2
Powered by blists - more mailing lists