[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20180920.111833.80229845984902983.davem@davemloft.net>
Date: Thu, 20 Sep 2018 11:18:33 -0700 (PDT)
From: David Miller <davem@...emloft.net>
To: vakul.garg@....com
Cc: netdev@...r.kernel.org, borisp@...lanox.com, aviadye@...lanox.com,
davejwatson@...com, doronrk@...com
Subject: Re: [PATCH net-next] net/tls: Add support for async encryption of
records for performance
From: Vakul Garg <vakul.garg@....com>
Date: Wed, 19 Sep 2018 20:51:35 +0530
> This patch enables encryption of multiple records in parallel when an
> async capable crypto accelerator is present in system.
This seems to be trading off zero copy with async support.
Async crypto device support is not the common case at all, and
synchronous crypto via cpu crypto acceleration instructions is
so much more likely.
Oh I see, the new logic is only triggered with ASYNC_CAPABLE is
set?
> +static inline bool is_tx_ready(struct tls_context *tls_ctx,
> + struct tls_sw_context_tx *ctx)
> +{
Two space between "inline" and "bool", please make it one.
> static void tls_write_space(struct sock *sk)
> {
> struct tls_context *ctx = tls_get_ctx(sk);
> + struct tls_sw_context_tx *tx_ctx = tls_sw_ctx_tx(ctx);
Longest to shortest line (reverse christmas tree) ordering for
local variable declarations please.
>
> + list_for_each_prev(pos, &ctx->tx_ready_list) {
> + struct tls_rec *rec = (struct tls_rec *)pos;
> + u64 seq = be64_to_cpup((const __be64 *)&rec->aad_space);
Likewise.
> -static int tls_do_encryption(struct tls_context *tls_ctx,
> +int tls_tx_records(struct sock *sk, int flags)
> +{
> + struct tls_rec *rec, *tmp;
> + struct tls_context *tls_ctx = tls_get_ctx(sk);
> + struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
> + int rc = 0;
> + int tx_flags;
Likewise.
> +static void tls_encrypt_done(struct crypto_async_request *req, int err)
> +{
> + struct aead_request *aead_req = (struct aead_request *)req;
> + struct sock *sk = req->data;
> + struct tls_context *tls_ctx = tls_get_ctx(sk);
> + struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
> + struct tls_rec *rec;
> + int pending;
> + bool ready = false;
Likewise.
> +static int tls_do_encryption(struct sock *sk,
> + struct tls_context *tls_ctx,
> struct tls_sw_context_tx *ctx,
> struct aead_request *aead_req,
> size_t data_len)
> {
> int rc;
> + struct tls_rec *rec = ctx->open_rec;
Likewise.
> @@ -473,11 +630,12 @@ static int memcopy_from_iter(struct sock *sk, struct iov_iter *from,
> {
> struct tls_context *tls_ctx = tls_get_ctx(sk);
> struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
> - struct scatterlist *sg = ctx->sg_plaintext_data;
> + struct tls_rec *rec = ctx->open_rec;
> + struct scatterlist *sg = rec->sg_plaintext_data;
> int copy, i, rc = 0;
Likewise.
> +struct tls_rec *get_rec(struct sock *sk)
> +{
> + int mem_size;
> + struct tls_context *tls_ctx = tls_get_ctx(sk);
> + struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
> + struct tls_rec *rec;
Likewise.
> @@ -510,21 +707,33 @@ int tls_sw_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
> int record_room;
> bool full_record;
> int orig_size;
> + struct tls_rec *rec;
> bool is_kvec = msg->msg_iter.type & ITER_KVEC;
> + struct crypto_tfm *tfm = crypto_aead_tfm(ctx->aead_send);
> + bool async_capable = tfm->__crt_alg->cra_flags & CRYPTO_ALG_ASYNC;
> + int num_async = 0;
> + int num_zc = 0;
Likewise.
> @@ -661,6 +904,8 @@ int tls_sw_sendpage(struct sock *sk, struct page *page,
> struct scatterlist *sg;
> bool full_record;
> int record_room;
> + struct tls_rec *rec;
> + int num_async = 0;
Likewise.
Powered by blists - more mailing lists