lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [thread-next>] [day] [month] [year] [list]
Date:   Wed, 26 Sep 2018 16:22:08 +0530
From:   Vakul Garg <vakul.garg@....com>
To:     netdev@...r.kernel.org
Cc:     borisp@...lanox.com, aviadye@...lanox.com, davejwatson@...com,
        davem@...emloft.net, doronrk@...com,
        Vakul Garg <vakul.garg@....com>
Subject: [PATCH net-next] tls: Remove redundant vars from tls record structure

Structure 'tls_rec' contains sg_aead_in and sg_aead_out which point
to a aad_space and then chain scatterlists sg_plaintext_data,
sg_encrypted_data respectively. Rather than using chained scatterlists
for plaintext and encrypted data in aead_req, it is efficient to store
aad_space in sg_encrypted_data and sg_plaintext_data itself in the
first index and get rid of sg_aead_in, sg_aead_in and further chaining.

This requires increasing size of sg_encrypted_data & sg_plaintext_data
arrarys by 1 to accommodate entry for aad_space. The code which uses
sg_encrypted_data and sg_plaintext_data has been modified to skip first
index as it points to aad_space.

Signed-off-by: Vakul Garg <vakul.garg@....com>
---
 include/net/tls.h |  6 ++--
 net/tls/tls_sw.c  | 92 ++++++++++++++++++++++++++-----------------------------
 2 files changed, 45 insertions(+), 53 deletions(-)

diff --git a/include/net/tls.h b/include/net/tls.h
index 1615fb5ea114..262420cdad10 100644
--- a/include/net/tls.h
+++ b/include/net/tls.h
@@ -101,13 +101,11 @@ struct tls_rec {
 	struct list_head list;
 	int tx_ready;
 	int tx_flags;
-	struct scatterlist sg_plaintext_data[MAX_SKB_FRAGS];
-	struct scatterlist sg_encrypted_data[MAX_SKB_FRAGS];
 
 	/* AAD | sg_plaintext_data | sg_tag */
-	struct scatterlist sg_aead_in[2];
+	struct scatterlist sg_plaintext_data[MAX_SKB_FRAGS + 1];
 	/* AAD | sg_encrypted_data (data contain overhead for hdr&iv&tag) */
-	struct scatterlist sg_aead_out[2];
+	struct scatterlist sg_encrypted_data[MAX_SKB_FRAGS + 1];
 
 	unsigned int sg_plaintext_size;
 	unsigned int sg_encrypted_size;
diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c
index 4c18b4dba284..8cf7bef7c5a2 100644
--- a/net/tls/tls_sw.c
+++ b/net/tls/tls_sw.c
@@ -248,7 +248,7 @@ static void trim_both_sgl(struct sock *sk, int target_size)
 	struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
 	struct tls_rec *rec = ctx->open_rec;
 
-	trim_sg(sk, rec->sg_plaintext_data,
+	trim_sg(sk, &rec->sg_plaintext_data[1],
 		&rec->sg_plaintext_num_elem,
 		&rec->sg_plaintext_size,
 		target_size);
@@ -256,7 +256,7 @@ static void trim_both_sgl(struct sock *sk, int target_size)
 	if (target_size > 0)
 		target_size += tls_ctx->tx.overhead_size;
 
-	trim_sg(sk, rec->sg_encrypted_data,
+	trim_sg(sk, &rec->sg_encrypted_data[1],
 		&rec->sg_encrypted_num_elem,
 		&rec->sg_encrypted_size,
 		target_size);
@@ -270,12 +270,13 @@ static int alloc_encrypted_sg(struct sock *sk, int len)
 	int rc = 0;
 
 	rc = sk_alloc_sg(sk, len,
-			 rec->sg_encrypted_data, 0,
+			 &rec->sg_encrypted_data[1], 0,
 			 &rec->sg_encrypted_num_elem,
 			 &rec->sg_encrypted_size, 0);
 
 	if (rc == -ENOSPC)
-		rec->sg_encrypted_num_elem = ARRAY_SIZE(rec->sg_encrypted_data);
+		rec->sg_encrypted_num_elem =
+			ARRAY_SIZE(rec->sg_encrypted_data) - 1;
 
 	return rc;
 }
@@ -287,12 +288,15 @@ static int alloc_plaintext_sg(struct sock *sk, int len)
 	struct tls_rec *rec = ctx->open_rec;
 	int rc = 0;
 
-	rc = sk_alloc_sg(sk, len, rec->sg_plaintext_data, 0,
-			 &rec->sg_plaintext_num_elem, &rec->sg_plaintext_size,
+	rc = sk_alloc_sg(sk, len,
+			 &rec->sg_plaintext_data[1], 0,
+			 &rec->sg_plaintext_num_elem,
+			 &rec->sg_plaintext_size,
 			 tls_ctx->pending_open_record_frags);
 
 	if (rc == -ENOSPC)
-		rec->sg_plaintext_num_elem = ARRAY_SIZE(rec->sg_plaintext_data);
+		rec->sg_plaintext_num_elem =
+			ARRAY_SIZE(rec->sg_plaintext_data) - 1;
 
 	return rc;
 }
@@ -320,11 +324,11 @@ static void tls_free_open_rec(struct sock *sk)
 	if (!rec)
 		return;
 
-	free_sg(sk, rec->sg_encrypted_data,
+	free_sg(sk, &rec->sg_encrypted_data[1],
 		&rec->sg_encrypted_num_elem,
 		&rec->sg_encrypted_size);
 
-	free_sg(sk, rec->sg_plaintext_data,
+	free_sg(sk, &rec->sg_plaintext_data[1],
 		&rec->sg_plaintext_num_elem,
 		&rec->sg_plaintext_size);
 
@@ -355,7 +359,7 @@ int tls_tx_records(struct sock *sk, int flags)
 		 * Remove the head of tx_list
 		 */
 		list_del(&rec->list);
-		free_sg(sk, rec->sg_plaintext_data,
+		free_sg(sk, &rec->sg_plaintext_data[1],
 			&rec->sg_plaintext_num_elem, &rec->sg_plaintext_size);
 
 		kfree(rec);
@@ -370,13 +374,13 @@ int tls_tx_records(struct sock *sk, int flags)
 				tx_flags = flags;
 
 			rc = tls_push_sg(sk, tls_ctx,
-					 &rec->sg_encrypted_data[0],
+					 &rec->sg_encrypted_data[1],
 					 0, tx_flags);
 			if (rc)
 				goto tx_err;
 
 			list_del(&rec->list);
-			free_sg(sk, rec->sg_plaintext_data,
+			free_sg(sk, &rec->sg_plaintext_data[1],
 				&rec->sg_plaintext_num_elem,
 				&rec->sg_plaintext_size);
 
@@ -405,16 +409,12 @@ static void tls_encrypt_done(struct crypto_async_request *req, int err)
 
 	rec = container_of(aead_req, struct tls_rec, aead_req);
 
-	rec->sg_encrypted_data[0].offset -= tls_ctx->tx.prepend_size;
-	rec->sg_encrypted_data[0].length += tls_ctx->tx.prepend_size;
+	rec->sg_encrypted_data[1].offset -= tls_ctx->tx.prepend_size;
+	rec->sg_encrypted_data[1].length += tls_ctx->tx.prepend_size;
 
 
-	/* Free the record if error is previously set on socket */
+	/* Check if error is previously set on socket */
 	if (err || sk->sk_err) {
-		free_sg(sk, rec->sg_encrypted_data,
-			&rec->sg_encrypted_num_elem, &rec->sg_encrypted_size);
-
-		kfree(rec);
 		rec = NULL;
 
 		/* If err is already set on socket, return the same code */
@@ -449,7 +449,7 @@ static void tls_encrypt_done(struct crypto_async_request *req, int err)
 
 	/* Schedule the transmission */
 	if (!test_and_set_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask))
-		schedule_delayed_work(&ctx->tx_work.work, 1);
+		schedule_delayed_work(&ctx->tx_work.work, 2);
 }
 
 static int tls_do_encryption(struct sock *sk,
@@ -461,13 +461,14 @@ static int tls_do_encryption(struct sock *sk,
 	struct tls_rec *rec = ctx->open_rec;
 	int rc;
 
-	rec->sg_encrypted_data[0].offset += tls_ctx->tx.prepend_size;
-	rec->sg_encrypted_data[0].length -= tls_ctx->tx.prepend_size;
+	/* Skip the first index as it contains AAD data */
+	rec->sg_encrypted_data[1].offset += tls_ctx->tx.prepend_size;
+	rec->sg_encrypted_data[1].length -= tls_ctx->tx.prepend_size;
 
 	aead_request_set_tfm(aead_req, ctx->aead_send);
 	aead_request_set_ad(aead_req, TLS_AAD_SPACE_SIZE);
-	aead_request_set_crypt(aead_req, rec->sg_aead_in,
-			       rec->sg_aead_out,
+	aead_request_set_crypt(aead_req, rec->sg_plaintext_data,
+			       rec->sg_encrypted_data,
 			       data_len, tls_ctx->tx.iv);
 
 	aead_request_set_callback(aead_req, CRYPTO_TFM_REQ_MAY_BACKLOG,
@@ -480,8 +481,8 @@ static int tls_do_encryption(struct sock *sk,
 	rc = crypto_aead_encrypt(aead_req);
 	if (!rc || rc != -EINPROGRESS) {
 		atomic_dec(&ctx->encrypt_pending);
-		rec->sg_encrypted_data[0].offset -= tls_ctx->tx.prepend_size;
-		rec->sg_encrypted_data[0].length += tls_ctx->tx.prepend_size;
+		rec->sg_encrypted_data[1].offset -= tls_ctx->tx.prepend_size;
+		rec->sg_encrypted_data[1].length += tls_ctx->tx.prepend_size;
 	}
 
 	if (!rc) {
@@ -512,16 +513,16 @@ static int tls_push_record(struct sock *sk, int flags,
 	rec->tx_flags = flags;
 	req = &rec->aead_req;
 
-	sg_mark_end(rec->sg_plaintext_data + rec->sg_plaintext_num_elem - 1);
-	sg_mark_end(rec->sg_encrypted_data + rec->sg_encrypted_num_elem - 1);
+	sg_mark_end(rec->sg_plaintext_data + rec->sg_plaintext_num_elem);
+	sg_mark_end(rec->sg_encrypted_data + rec->sg_encrypted_num_elem);
 
 	tls_make_aad(rec->aad_space, rec->sg_plaintext_size,
 		     tls_ctx->tx.rec_seq, tls_ctx->tx.rec_seq_size,
 		     record_type);
 
 	tls_fill_prepend(tls_ctx,
-			 page_address(sg_page(&rec->sg_encrypted_data[0])) +
-			 rec->sg_encrypted_data[0].offset,
+			 page_address(sg_page(&rec->sg_encrypted_data[1])) +
+			 rec->sg_encrypted_data[1].offset,
 			 rec->sg_plaintext_size, record_type);
 
 	tls_ctx->pending_open_record_frags = 0;
@@ -613,7 +614,7 @@ static int memcopy_from_iter(struct sock *sk, struct iov_iter *from,
 	struct tls_context *tls_ctx = tls_get_ctx(sk);
 	struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
 	struct tls_rec *rec = ctx->open_rec;
-	struct scatterlist *sg = rec->sg_plaintext_data;
+	struct scatterlist *sg = &rec->sg_plaintext_data[1];
 	int copy, i, rc = 0;
 
 	for (i = tls_ctx->pending_open_record_frags;
@@ -659,17 +660,10 @@ struct tls_rec *get_rec(struct sock *sk)
 	sg_init_table(&rec->sg_encrypted_data[0],
 		      ARRAY_SIZE(rec->sg_encrypted_data));
 
-	sg_init_table(rec->sg_aead_in, 2);
-	sg_set_buf(&rec->sg_aead_in[0], rec->aad_space,
+	sg_set_buf(&rec->sg_plaintext_data[0], rec->aad_space,
 		   sizeof(rec->aad_space));
-	sg_unmark_end(&rec->sg_aead_in[1]);
-	sg_chain(rec->sg_aead_in, 2, rec->sg_plaintext_data);
-
-	sg_init_table(rec->sg_aead_out, 2);
-	sg_set_buf(&rec->sg_aead_out[0], rec->aad_space,
+	sg_set_buf(&rec->sg_encrypted_data[0], rec->aad_space,
 		   sizeof(rec->aad_space));
-	sg_unmark_end(&rec->sg_aead_out[1]);
-	sg_chain(rec->sg_aead_out, 2, rec->sg_encrypted_data);
 
 	ctx->open_rec = rec;
 
@@ -763,8 +757,8 @@ int tls_sw_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
 			ret = zerocopy_from_iter(sk, &msg->msg_iter,
 				try_to_copy, &rec->sg_plaintext_num_elem,
 				&rec->sg_plaintext_size,
-				rec->sg_plaintext_data,
-				ARRAY_SIZE(rec->sg_plaintext_data),
+				&rec->sg_plaintext_data[1],
+				ARRAY_SIZE(rec->sg_plaintext_data) - 1,
 				true);
 			if (ret)
 				goto fallback_to_reg_send;
@@ -781,7 +775,7 @@ int tls_sw_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
 			continue;
 
 fallback_to_reg_send:
-			trim_sg(sk, rec->sg_plaintext_data,
+			trim_sg(sk, &rec->sg_plaintext_data[1],
 				&rec->sg_plaintext_num_elem,
 				&rec->sg_plaintext_size,
 				orig_size);
@@ -801,7 +795,7 @@ int tls_sw_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
 			try_to_copy -= required_size - rec->sg_plaintext_size;
 			full_record = true;
 
-			trim_sg(sk, rec->sg_encrypted_data,
+			trim_sg(sk, &rec->sg_encrypted_data[1],
 				&rec->sg_encrypted_num_elem,
 				&rec->sg_encrypted_size,
 				rec->sg_plaintext_size +
@@ -949,7 +943,7 @@ int tls_sw_sendpage(struct sock *sk, struct page *page,
 		}
 
 		get_page(page);
-		sg = rec->sg_plaintext_data + rec->sg_plaintext_num_elem;
+		sg = &rec->sg_plaintext_data[1] + rec->sg_plaintext_num_elem;
 		sg_set_page(sg, page, copy, offset);
 		sg_unmark_end(sg);
 
@@ -963,7 +957,7 @@ int tls_sw_sendpage(struct sock *sk, struct page *page,
 
 		if (full_record || eor ||
 		    rec->sg_plaintext_num_elem ==
-		    ARRAY_SIZE(rec->sg_plaintext_data)) {
+		    ARRAY_SIZE(rec->sg_plaintext_data) - 1) {
 			ret = tls_push_record(sk, flags, record_type);
 			if (ret) {
 				if (ret == -EINPROGRESS)
@@ -1571,7 +1565,7 @@ void tls_sw_free_resources_tx(struct sock *sk)
 		rec = list_first_entry(&ctx->tx_list,
 				       struct tls_rec, list);
 
-		free_sg(sk, rec->sg_plaintext_data,
+		free_sg(sk, &rec->sg_plaintext_data[1],
 			&rec->sg_plaintext_num_elem,
 			&rec->sg_plaintext_size);
 
@@ -1580,11 +1574,11 @@ void tls_sw_free_resources_tx(struct sock *sk)
 	}
 
 	list_for_each_entry_safe(rec, tmp, &ctx->tx_list, list) {
-		free_sg(sk, rec->sg_encrypted_data,
+		free_sg(sk, &rec->sg_encrypted_data[1],
 			&rec->sg_encrypted_num_elem,
 			&rec->sg_encrypted_size);
 
-		free_sg(sk, rec->sg_plaintext_data,
+		free_sg(sk, &rec->sg_plaintext_data[1],
 			&rec->sg_plaintext_num_elem,
 			&rec->sg_plaintext_size);
 
-- 
2.13.6

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ