[<prev] [next>] [day] [month] [year] [list]
Message-ID: <20180308165013.GA19496@davejwatson-mba>
Date: Thu, 8 Mar 2018 08:50:13 -0800
From: Dave Watson <davejwatson@...com>
To: "David S. Miller" <davem@...emloft.net>,
Tom Herbert <tom@...ntonium.net>,
Alexei Starovoitov <alexei.starovoitov@...il.com>,
<herbert@...dor.apana.org.au>, <linux-crypto@...r.kernel.org>,
<netdev@...r.kernel.org>, <ilyal@...lanox.com>,
<borisp@...lanox.com>
CC: Atul Gupta <atul.gupta@...lsio.com>,
Vakul Garg <vakul.garg@....com>,
Hannes Frederic Sowa <hannes@...essinduktion.org>,
Steffen Klassert <steffen.klassert@...unet.com>,
John Fastabend <john.fastabend@...il.com>,
Daniel Borkmann <daniel@...earbox.net>
Subject: [PATCH RFC 1/5] tls: Generalize zerocopy_from_iter
Refactor zerocopy_from_iter to take arguments for pages and size,
such that it can be used for both tx and rx. RX will also support
zerocopy direct to output iter, as long as the full message can
be copied at once (a large enough userspace buffer was provided).
Signed-off-by: Dave Watson <davejwatson@...com>
---
net/tls/tls_sw.c | 31 +++++++++++++++++++------------
1 file changed, 19 insertions(+), 12 deletions(-)
diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c
index f26376e..d58f675 100644
--- a/net/tls/tls_sw.c
+++ b/net/tls/tls_sw.c
@@ -281,23 +281,24 @@ static int tls_sw_push_pending_record(struct sock *sk, int flags)
}
static int zerocopy_from_iter(struct sock *sk, struct iov_iter *from,
- int length)
+ int length, int *pages_used,
+ unsigned int *size_used,
+ struct scatterlist *to, int to_max_pages,
+ bool charge)
{
- struct tls_context *tls_ctx = tls_get_ctx(sk);
- struct tls_sw_context *ctx = tls_sw_ctx(tls_ctx);
struct page *pages[MAX_SKB_FRAGS];
size_t offset;
ssize_t copied, use;
int i = 0;
- unsigned int size = ctx->sg_plaintext_size;
- int num_elem = ctx->sg_plaintext_num_elem;
+ unsigned int size = *size_used;
+ int num_elem = *pages_used;
int rc = 0;
int maxpages;
while (length > 0) {
i = 0;
- maxpages = ARRAY_SIZE(ctx->sg_plaintext_data) - num_elem;
+ maxpages = to_max_pages - num_elem;
if (maxpages == 0) {
rc = -EFAULT;
goto out;
@@ -317,10 +318,11 @@ static int zerocopy_from_iter(struct sock *sk, struct iov_iter *from,
while (copied) {
use = min_t(int, copied, PAGE_SIZE - offset);
- sg_set_page(&ctx->sg_plaintext_data[num_elem],
+ sg_set_page(&to[num_elem],
pages[i], use, offset);
- sg_unmark_end(&ctx->sg_plaintext_data[num_elem]);
- sk_mem_charge(sk, use);
+ sg_unmark_end(&to[num_elem]);
+ if (charge)
+ sk_mem_charge(sk, use);
offset = 0;
copied -= use;
@@ -331,8 +333,9 @@ static int zerocopy_from_iter(struct sock *sk, struct iov_iter *from,
}
out:
- ctx->sg_plaintext_size = size;
- ctx->sg_plaintext_num_elem = num_elem;
+ *size_used = size;
+ *pages_used = num_elem;
+
return rc;
}
@@ -429,7 +432,11 @@ int tls_sw_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
if (full_record || eor) {
ret = zerocopy_from_iter(sk, &msg->msg_iter,
- try_to_copy);
+ try_to_copy, &ctx->sg_plaintext_num_elem,
+ &ctx->sg_plaintext_size,
+ ctx->sg_plaintext_data,
+ ARRAY_SIZE(ctx->sg_plaintext_data),
+ true);
if (ret)
goto fallback_to_reg_send;
--
2.9.5
Powered by blists - more mailing lists