[<prev] [next>] [thread-next>] [day] [month] [year] [list]
Message-Id: <20180814141758.4930-1-vakul.garg@nxp.com>
Date: Tue, 14 Aug 2018 19:47:58 +0530
From: Vakul Garg <vakul.garg@....com>
To: netdev@...r.kernel.org
Cc: borisp@...lanox.com, aviadye@...lanox.com, davejwatson@...com,
davem@...emloft.net, Vakul Garg <vakul.garg@....com>
Subject: [PATCH net-next][RFC] net/tls: Add support for async decryption of tls records
Incoming TLS records which are directly decrypted into user space
application buffer i.e. records which are decrypted in zero-copy mode
are submitted for async decryption. When the decryption cryptoapi
returns -EINPROGRESS, the next tls record is parsed and then submitted
for decryption. The references to records which has been sent for async
decryption are dropped. This happens in a loop for all the records that
can be decrypted in zero-copy mode. For records for which decryption is
not possible in zero-copy mode, asynchronous decryption is not used and
we wait for decryption crypto api to complete.
For crypto requests executing in async fashion, the memory for
aead_request, sglists and skb etc is freed from the decryption
completion handler. The decryption completion handler wakesup the
sleeping user context. This happens when the user context is done
enqueueing all the crypto requests and is waiting for all the async
operations to finish. Since the splice() operation does not use
zero-copy decryption, async remains disabled for splice().
Signed-off-by: Vakul Garg <vakul.garg@....com>
---
include/net/tls.h | 6 +++
net/tls/tls_sw.c | 134 +++++++++++++++++++++++++++++++++++++++++++++++++-----
2 files changed, 129 insertions(+), 11 deletions(-)
diff --git a/include/net/tls.h b/include/net/tls.h
index d5c683e8bb22..cd0a65bd92f9 100644
--- a/include/net/tls.h
+++ b/include/net/tls.h
@@ -124,6 +124,12 @@ struct tls_sw_context_rx {
struct sk_buff *recv_pkt;
u8 control;
bool decrypted;
+ atomic_t decrypt_pending;
+ bool async_notify;
+};
+
+struct decrypt_req_ctx {
+ struct sock *sk;
};
struct tls_record_info {
diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c
index 52fbe727d7c1..e2f0df18b6cf 100644
--- a/net/tls/tls_sw.c
+++ b/net/tls/tls_sw.c
@@ -43,12 +43,50 @@
#define MAX_IV_SIZE TLS_CIPHER_AES_GCM_128_IV_SIZE
+static void tls_decrypt_done(struct crypto_async_request *req, int err)
+{
+ struct aead_request *aead_req = (struct aead_request *)req;
+ struct decrypt_req_ctx *req_ctx =
+ (struct decrypt_req_ctx *)(aead_req + 1);
+
+ struct scatterlist *sgout = aead_req->dst;
+
+ struct tls_context *tls_ctx = tls_get_ctx(req_ctx->sk);
+ struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
+ int pending = atomic_dec_return(&ctx->decrypt_pending);
+ struct scatterlist *sg;
+ unsigned int pages;
+
+ /* Propagate if there was an err */
+ if (err) {
+ ctx->async_wait.err = err;
+ tls_err_abort(req_ctx->sk, err);
+ }
+
+ /* Release the skb, pages and memory allocated for crypto req */
+ kfree_skb(req->data);
+
+ /* Skip the first S/G entry as it points to AAD */
+ for_each_sg(sg_next(sgout), sg, UINT_MAX, pages) {
+ if (!sg)
+ break;
+ put_page(sg_page(sg));
+ }
+
+ kfree(aead_req);
+
+ if (!pending && READ_ONCE(ctx->async_notify))
+ complete(&ctx->async_wait.completion);
+}
+
static int tls_do_decryption(struct sock *sk,
+ struct sk_buff *skb,
struct scatterlist *sgin,
struct scatterlist *sgout,
char *iv_recv,
size_t data_len,
- struct aead_request *aead_req)
+ struct aead_request *aead_req,
+ bool async)
{
struct tls_context *tls_ctx = tls_get_ctx(sk);
struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
@@ -59,10 +97,34 @@ static int tls_do_decryption(struct sock *sk,
aead_request_set_crypt(aead_req, sgin, sgout,
data_len + tls_ctx->rx.tag_size,
(u8 *)iv_recv);
- aead_request_set_callback(aead_req, CRYPTO_TFM_REQ_MAY_BACKLOG,
- crypto_req_done, &ctx->async_wait);
- ret = crypto_wait_req(crypto_aead_decrypt(aead_req), &ctx->async_wait);
+ if (async) {
+ struct decrypt_req_ctx *req_ctx;
+
+ req_ctx = (struct decrypt_req_ctx *)(aead_req + 1);
+ req_ctx->sk = sk;
+
+ aead_request_set_callback(aead_req,
+ CRYPTO_TFM_REQ_MAY_BACKLOG,
+ tls_decrypt_done, skb);
+ atomic_inc(&ctx->decrypt_pending);
+ } else {
+ aead_request_set_callback(aead_req,
+ CRYPTO_TFM_REQ_MAY_BACKLOG,
+ crypto_req_done, &ctx->async_wait);
+ }
+
+ ret = crypto_aead_decrypt(aead_req);
+ if (ret == -EINPROGRESS) {
+ if (async)
+ return ret;
+
+ ret = crypto_wait_req(ret, &ctx->async_wait);
+ }
+
+ if (async)
+ atomic_dec(&ctx->decrypt_pending);
+
return ret;
}
@@ -763,7 +825,10 @@ static int decrypt_internal(struct sock *sk, struct sk_buff *skb,
}
/* Prepare and submit AEAD request */
- err = tls_do_decryption(sk, sgin, sgout, iv, data_len, aead_req);
+ err = tls_do_decryption(sk, skb, sgin, sgout, iv,
+ data_len, aead_req, *zc);
+ if (err == -EINPROGRESS)
+ return err;
/* Release the pages in case iov was mapped to pages */
for (; pages > 0; pages--)
@@ -788,8 +853,12 @@ static int decrypt_skb_update(struct sock *sk, struct sk_buff *skb,
#endif
if (!ctx->decrypted) {
err = decrypt_internal(sk, skb, dest, NULL, chunk, zc);
- if (err < 0)
+ if (err < 0) {
+ if (err == -EINPROGRESS)
+ tls_advance_record_sn(sk, &tls_ctx->rx);
+
return err;
+ }
} else {
*zc = false;
}
@@ -851,6 +920,7 @@ int tls_sw_recvmsg(struct sock *sk,
int target, err = 0;
long timeo;
bool is_kvec = msg->msg_iter.type & ITER_KVEC;
+ int num_async = 0;
flags |= nonblock;
@@ -862,7 +932,10 @@ int tls_sw_recvmsg(struct sock *sk,
target = sock_rcvlowat(sk, flags & MSG_WAITALL, len);
timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
do {
+ int full_len;
+ int offset;
bool zc = false;
+ bool async = false;
int chunk = 0;
skb = tls_wait_data(sk, flags, timeo, &err);
@@ -870,6 +943,9 @@ int tls_sw_recvmsg(struct sock *sk,
goto recv_end;
rxm = strp_msg(skb);
+ full_len = rxm->full_len;
+ offset = rxm->offset;
+
if (!cmsg) {
int cerr;
@@ -888,7 +964,7 @@ int tls_sw_recvmsg(struct sock *sk,
}
if (!ctx->decrypted) {
- int to_copy = rxm->full_len - tls_ctx->rx.overhead_size;
+ int to_copy = full_len - tls_ctx->rx.overhead_size;
if (!is_kvec && to_copy <= len &&
likely(!(flags & MSG_PEEK)))
@@ -896,42 +972,76 @@ int tls_sw_recvmsg(struct sock *sk,
err = decrypt_skb_update(sk, skb, &msg->msg_iter,
&chunk, &zc);
- if (err < 0) {
+ if (err < 0 && err != -EINPROGRESS) {
tls_err_abort(sk, EBADMSG);
goto recv_end;
}
+
+ if (err == -EINPROGRESS) {
+ async = true;
+ num_async++;
+ goto pick_next_record;
+ }
+
ctx->decrypted = true;
}
if (!zc) {
- chunk = min_t(unsigned int, rxm->full_len, len);
- err = skb_copy_datagram_msg(skb, rxm->offset, msg,
+ chunk = min_t(unsigned int, full_len, len);
+
+ err = skb_copy_datagram_msg(skb, offset, msg,
chunk);
if (err < 0)
goto recv_end;
}
+pick_next_record:
copied += chunk;
len -= chunk;
if (likely(!(flags & MSG_PEEK))) {
u8 control = ctx->control;
- if (tls_sw_advance_skb(sk, skb, chunk)) {
+ if (async) {
+ /* Finished with current record, pick up next */
+ ctx->recv_pkt = NULL;
+ __strp_unpause(&ctx->strp);
+ goto mark_eor_chk_ctrl;
+ } else if (tls_sw_advance_skb(sk, skb, chunk)) {
/* Return full control message to
* userspace before trying to parse
* another message type
*/
+mark_eor_chk_ctrl:
msg->msg_flags |= MSG_EOR;
if (control != TLS_RECORD_TYPE_DATA)
goto recv_end;
+ } else {
+ break;
}
}
+
/* If we have a new message from strparser, continue now. */
if (copied >= target && !ctx->recv_pkt)
break;
} while (len);
recv_end:
+ if (num_async) {
+ /* Wait for all previously submitted records to be decrypted */
+ smp_store_mb(ctx->async_notify, true);
+ if (atomic_read(&ctx->decrypt_pending)) {
+ err = crypto_wait_req(-EINPROGRESS, &ctx->async_wait);
+ if (err) {
+ /* one of async decrypt failed */
+ tls_err_abort(sk, err);
+ copied = 0;
+ }
+ } else {
+ reinit_completion(&ctx->async_wait.completion);
+ }
+ WRITE_ONCE(ctx->async_notify, false);
+ }
+
release_sock(sk);
return copied ? : err;
}
@@ -1271,6 +1381,8 @@ int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx, int tx)
goto free_aead;
if (sw_ctx_rx) {
+ (*aead)->reqsize = sizeof(struct decrypt_req_ctx);
+
/* Set up strparser */
memset(&cb, 0, sizeof(cb));
cb.rcv_msg = tls_queue;
--
2.13.6
Powered by blists - more mailing lists