lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <9681d1febfec295449a62300938ed2ae66983f28.1694018970.git.sd@queasysnail.net>
Date: Wed,  6 Sep 2023 19:08:31 +0200
From: Sabrina Dubroca <sd@...asysnail.net>
To: netdev@...r.kernel.org
Cc: Sabrina Dubroca <sd@...asysnail.net>,
	Dave Watson <davejwatson@...com>,
	Jakub Kicinski <kuba@...nel.org>,
	Vakul Garg <vakul.garg@....com>,
	Boris Pismenny <borisp@...dia.com>,
	John Fastabend <john.fastabend@...il.com>
Subject: [PATCH net 1/5] net: tls: handle -EBUSY on async encrypt/decrypt requests

Since we're setting the CRYPTO_TFM_REQ_MAY_BACKLOG flag on our
requests to the crypto API, crypto_aead_{encrypt,decrypt} can return
 -EBUSY instead of -EINPROGRESS in valid situations. For example, when
the cryptd queue for AESNI is full (easy to trigger with an
artifically low cryptd.cryptd_max_cpu_qlen), requests will be enqueued
to the backlog but still processed. In that case, the async callback
will also be called twice: first with err == -EINPROGRESS, which it
seems we can just ignore, then with err == 0.

I've only tested this on AESNI with cryptd.

Fixes: a54667f6728c ("tls: Add support for encryption using async offload accelerator")
Fixes: 94524d8fc965 ("net/tls: Add support for async decryption of tls records")
Signed-off-by: Sabrina Dubroca <sd@...asysnail.net>
---
 net/tls/tls_sw.c | 23 +++++++++++++++--------
 1 file changed, 15 insertions(+), 8 deletions(-)

diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c
index 1ed4a611631f..4f3dd0403efb 100644
--- a/net/tls/tls_sw.c
+++ b/net/tls/tls_sw.c
@@ -196,6 +196,9 @@ static void tls_decrypt_done(void *data, int err)
 	struct sock *sk;
 	int aead_size;
 
+	if (err == -EINPROGRESS)
+		return;
+
 	aead_size = sizeof(*aead_req) + crypto_aead_reqsize(aead);
 	aead_size = ALIGN(aead_size, __alignof__(*dctx));
 	dctx = (void *)((u8 *)aead_req + aead_size);
@@ -261,7 +264,7 @@ static int tls_do_decryption(struct sock *sk,
 	}
 
 	ret = crypto_aead_decrypt(aead_req);
-	if (ret == -EINPROGRESS) {
+	if (ret == -EINPROGRESS || ret == -EBUSY) {
 		if (darg->async)
 			return 0;
 
@@ -443,6 +446,9 @@ static void tls_encrypt_done(void *data, int err)
 	struct sock *sk;
 	int pending;
 
+	if (err == -EINPROGRESS)
+		return;
+
 	msg_en = &rec->msg_encrypted;
 
 	sk = rec->sk;
@@ -544,7 +550,7 @@ static int tls_do_encryption(struct sock *sk,
 	atomic_inc(&ctx->encrypt_pending);
 
 	rc = crypto_aead_encrypt(aead_req);
-	if (!rc || rc != -EINPROGRESS) {
+	if (!rc || (rc != -EINPROGRESS && rc != -EBUSY)) {
 		atomic_dec(&ctx->encrypt_pending);
 		sge->offset -= prot->prepend_size;
 		sge->length += prot->prepend_size;
@@ -552,7 +558,7 @@ static int tls_do_encryption(struct sock *sk,
 
 	if (!rc) {
 		WRITE_ONCE(rec->tx_ready, true);
-	} else if (rc != -EINPROGRESS) {
+	} else if (rc != -EINPROGRESS && rc != -EBUSY) {
 		list_del(&rec->list);
 		return rc;
 	}
@@ -779,7 +785,7 @@ static int tls_push_record(struct sock *sk, int flags,
 	rc = tls_do_encryption(sk, tls_ctx, ctx, req,
 			       msg_pl->sg.size + prot->tail_size, i);
 	if (rc < 0) {
-		if (rc != -EINPROGRESS) {
+		if (rc != -EINPROGRESS && rc != -EBUSY) {
 			tls_err_abort(sk, -EBADMSG);
 			if (split) {
 				tls_ctx->pending_open_record_frags = true;
@@ -990,7 +996,7 @@ static int tls_sw_sendmsg_locked(struct sock *sk, struct msghdr *msg,
 	if (unlikely(msg->msg_controllen)) {
 		ret = tls_process_cmsg(sk, msg, &record_type);
 		if (ret) {
-			if (ret == -EINPROGRESS)
+			if (ret == -EINPROGRESS || ret == -EBUSY)
 				num_async++;
 			else if (ret != -EAGAIN)
 				goto send_end;
@@ -1071,7 +1077,7 @@ static int tls_sw_sendmsg_locked(struct sock *sk, struct msghdr *msg,
 						  record_type, &copied,
 						  msg->msg_flags);
 			if (ret) {
-				if (ret == -EINPROGRESS)
+				if (ret == -EINPROGRESS || ret == -EBUSY)
 					num_async++;
 				else if (ret == -ENOMEM)
 					goto wait_for_memory;
@@ -1125,7 +1131,7 @@ static int tls_sw_sendmsg_locked(struct sock *sk, struct msghdr *msg,
 						  record_type, &copied,
 						  msg->msg_flags);
 			if (ret) {
-				if (ret == -EINPROGRESS)
+				if (ret == -EINPROGRESS || ret == -EBUSY)
 					num_async++;
 				else if (ret == -ENOMEM)
 					goto wait_for_memory;
@@ -1248,6 +1254,7 @@ void tls_sw_splice_eof(struct socket *sock)
 			goto unlock;
 		retrying = true;
 		goto retry;
+	case -EBUSY:
 	case -EINPROGRESS:
 		break;
 	default:
@@ -2106,7 +2113,7 @@ int tls_sw_recvmsg(struct sock *sk,
 		__skb_queue_purge(&ctx->async_hold);
 
 		if (ret) {
-			if (err >= 0 || err == -EINPROGRESS)
+			if (err >= 0 || err == -EINPROGRESS || err == -EBUSY)
 				err = ret;
 			decrypted = 0;
 			goto end;
-- 
2.40.1


Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ