[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20171121075125.GA6539@kroah.com>
Date: Tue, 21 Nov 2017 08:51:25 +0100
From: Greg Kroah-Hartman <gregkh@...uxfoundation.org>
To: linux-kernel@...r.kernel.org
Cc: stable@...r.kernel.org,
Raveendra Padasalagi <raveendra.padasalagi@...adcom.com>,
Anup Patel <anup.patel@...adcom.com>,
Scott Branden <scott.branden@...adcom.com>,
Herbert Xu <herbert@...dor.apana.org.au>
Subject: Re: [PATCH 4.13 07/28] crypto: brcm - Explicity ACK mailbox message
Nope, this patch breaks the build as it relies on a previous patch that
is not in 4.13-stable, so I'm dropping it. If anyone wants it there,
well, it really feels like it shouldn't be included in a stable tree
anyway...
thanks,
greg k-h
On Sun, Nov 19, 2017 at 03:43:54PM +0100, Greg Kroah-Hartman wrote:
> 4.13-stable review patch. If anyone has any objections, please let me know.
>
> ------------------
>
> From: raveendra padasalagi <raveendra.padasalagi@...adcom.com>
>
> commit f0e2ce58f853634d7ad1a418a49bd5fbd556227c upstream.
>
> Add support to explicity ACK mailbox message
> because after sending message we can know
> the send status via error attribute of brcm_message.
>
> This is needed to support "txdone_ack" supported in
> mailbox controller driver.
>
> Fixes: 9d12ba86f818 ("crypto: brcm - Add Broadcom SPU driver")
> Signed-off-by: Raveendra Padasalagi <raveendra.padasalagi@...adcom.com>
> Reviewed-by: Anup Patel <anup.patel@...adcom.com>
> Reviewed-by: Scott Branden <scott.branden@...adcom.com>
> Signed-off-by: Herbert Xu <herbert@...dor.apana.org.au>
> Signed-off-by: Greg Kroah-Hartman <gregkh@...uxfoundation.org>
>
> ---
> drivers/crypto/bcm/cipher.c | 101 ++++++++++++++++++++------------------------
> 1 file changed, 46 insertions(+), 55 deletions(-)
>
> --- a/drivers/crypto/bcm/cipher.c
> +++ b/drivers/crypto/bcm/cipher.c
> @@ -258,6 +258,44 @@ spu_ablkcipher_tx_sg_create(struct brcm_
> return 0;
> }
>
> +static int mailbox_send_message(struct brcm_message *mssg, u32 flags,
> + u8 chan_idx)
> +{
> + int err;
> + int retry_cnt = 0;
> + struct device *dev = &(iproc_priv.pdev->dev);
> +
> + err = mbox_send_message(iproc_priv.mbox[chan_idx], mssg);
> + if (flags & CRYPTO_TFM_REQ_MAY_SLEEP) {
> + while ((err == -ENOBUFS) && (retry_cnt < SPU_MB_RETRY_MAX)) {
> + /*
> + * Mailbox queue is full. Since MAY_SLEEP is set, assume
> + * not in atomic context and we can wait and try again.
> + */
> + retry_cnt++;
> + usleep_range(MBOX_SLEEP_MIN, MBOX_SLEEP_MAX);
> + err = mbox_send_message(iproc_priv.mbox[chan_idx],
> + mssg);
> + atomic_inc(&iproc_priv.mb_no_spc);
> + }
> + }
> + if (err < 0) {
> + atomic_inc(&iproc_priv.mb_send_fail);
> + return err;
> + }
> +
> + /* Check error returned by mailbox controller */
> + err = mssg->error;
> + if (unlikely(err < 0)) {
> + dev_err(dev, "message error %d", err);
> + /* Signal txdone for mailbox channel */
> + }
> +
> + /* Signal txdone for mailbox channel */
> + mbox_client_txdone(iproc_priv.mbox[chan_idx], err);
> + return err;
> +}
> +
> /**
> * handle_ablkcipher_req() - Submit as much of a block cipher request as fits in
> * a single SPU request message, starting at the current position in the request
> @@ -295,7 +333,6 @@ static int handle_ablkcipher_req(struct
> u32 pad_len; /* total length of all padding */
> bool update_key = false;
> struct brcm_message *mssg; /* mailbox message */
> - int retry_cnt = 0;
>
> /* number of entries in src and dst sg in mailbox message. */
> u8 rx_frag_num = 2; /* response header and STATUS */
> @@ -464,24 +501,9 @@ static int handle_ablkcipher_req(struct
> if (err)
> return err;
>
> - err = mbox_send_message(iproc_priv.mbox[rctx->chan_idx], mssg);
> - if (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) {
> - while ((err == -ENOBUFS) && (retry_cnt < SPU_MB_RETRY_MAX)) {
> - /*
> - * Mailbox queue is full. Since MAY_SLEEP is set, assume
> - * not in atomic context and we can wait and try again.
> - */
> - retry_cnt++;
> - usleep_range(MBOX_SLEEP_MIN, MBOX_SLEEP_MAX);
> - err = mbox_send_message(iproc_priv.mbox[rctx->chan_idx],
> - mssg);
> - atomic_inc(&iproc_priv.mb_no_spc);
> - }
> - }
> - if (unlikely(err < 0)) {
> - atomic_inc(&iproc_priv.mb_send_fail);
> + err = mailbox_send_message(mssg, req->base.flags, rctx->chan_idx);
> + if (unlikely(err < 0))
> return err;
> - }
>
> return -EINPROGRESS;
> }
> @@ -712,7 +734,6 @@ static int handle_ahash_req(struct iproc
> u32 spu_hdr_len;
> unsigned int digestsize;
> u16 rem = 0;
> - int retry_cnt = 0;
>
> /*
> * number of entries in src and dst sg. Always includes SPU msg header.
> @@ -906,24 +927,10 @@ static int handle_ahash_req(struct iproc
> if (err)
> return err;
>
> - err = mbox_send_message(iproc_priv.mbox[rctx->chan_idx], mssg);
> - if (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) {
> - while ((err == -ENOBUFS) && (retry_cnt < SPU_MB_RETRY_MAX)) {
> - /*
> - * Mailbox queue is full. Since MAY_SLEEP is set, assume
> - * not in atomic context and we can wait and try again.
> - */
> - retry_cnt++;
> - usleep_range(MBOX_SLEEP_MIN, MBOX_SLEEP_MAX);
> - err = mbox_send_message(iproc_priv.mbox[rctx->chan_idx],
> - mssg);
> - atomic_inc(&iproc_priv.mb_no_spc);
> - }
> - }
> - if (err < 0) {
> - atomic_inc(&iproc_priv.mb_send_fail);
> + err = mailbox_send_message(mssg, req->base.flags, rctx->chan_idx);
> + if (unlikely(err < 0))
> return err;
> - }
> +
> return -EINPROGRESS;
> }
>
> @@ -1322,7 +1329,6 @@ static int handle_aead_req(struct iproc_
> int assoc_nents = 0;
> bool incl_icv = false;
> unsigned int digestsize = ctx->digestsize;
> - int retry_cnt = 0;
>
> /* number of entries in src and dst sg. Always includes SPU msg header.
> */
> @@ -1560,24 +1566,9 @@ static int handle_aead_req(struct iproc_
> if (err)
> return err;
>
> - err = mbox_send_message(iproc_priv.mbox[rctx->chan_idx], mssg);
> - if (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) {
> - while ((err == -ENOBUFS) && (retry_cnt < SPU_MB_RETRY_MAX)) {
> - /*
> - * Mailbox queue is full. Since MAY_SLEEP is set, assume
> - * not in atomic context and we can wait and try again.
> - */
> - retry_cnt++;
> - usleep_range(MBOX_SLEEP_MIN, MBOX_SLEEP_MAX);
> - err = mbox_send_message(iproc_priv.mbox[rctx->chan_idx],
> - mssg);
> - atomic_inc(&iproc_priv.mb_no_spc);
> - }
> - }
> - if (err < 0) {
> - atomic_inc(&iproc_priv.mb_send_fail);
> + err = mailbox_send_message(mssg, req->base.flags, rctx->chan_idx);
> + if (unlikely(err < 0))
> return err;
> - }
>
> return -EINPROGRESS;
> }
> @@ -4534,7 +4525,7 @@ static int spu_mb_init(struct device *de
> mcl->dev = dev;
> mcl->tx_block = false;
> mcl->tx_tout = 0;
> - mcl->knows_txdone = false;
> + mcl->knows_txdone = true;
> mcl->rx_callback = spu_rx_callback;
> mcl->tx_done = NULL;
>
>
Powered by blists - more mailing lists