[<prev] [next>] [<thread-prev] [day] [month] [year] [list]
Message-ID: <SA0PR15MB39192B3D3A84BE3154B20124994C9@SA0PR15MB3919.namprd15.prod.outlook.com>
Date: Tue, 20 Sep 2022 08:18:29 +0000
From: Bernard Metzler <BMT@...ich.ibm.com>
To: "linux-kernel@...r.kernel.org" <linux-kernel@...r.kernel.org>
Subject: RE: [PATCH] RDMA/siw: Always consume all skbuf data in
sk_data_ready() upcall.
sorry, please ignore this patch to @linux-kernel. It was intended
for @linux-rdma mailing list. I'll resend there.
Sorry again,
Bernard.
> -----Original Message-----
> From: Bernard Metzler <bmt@...ich.ibm.com>
> Sent: Wednesday, 14 September 2022 17:21
> To: linux-kernel@...r.kernel.org
> Cc: jgg@...dia.com; leonro@...dia.com; Bernard Metzler
> <BMT@...ich.ibm.com>; Olga Kornievskaia <kolga@...app.com>
> Subject: [PATCH] RDMA/siw: Always consume all skbuf data in
> sk_data_ready() upcall.
>
> For header and trailer/padding processing, siw did not consume new
> skb data until minimum amount present to fill current header or trailer
> structure, including potential payload padding. Not consuming any
> data during upcall may cause a receive stall, since tcp_read_sock()
> is not upcalling again if no new data arrive.
> A NFSoRDMA client got stuck at RDMA Write reception of unaligned
> payload, if the current skb did contain only the expected 3 padding
> bytes, but not the 4 bytes CRC trailer. Expecting 4 more bytes already
> arrived in another skb, and not consuming those 3 bytes in the current
> upcall left the Write incomplete, waiting for the CRC forever.
>
> Fixes: 8b6a361b8c48 ("rdma/siw: receive path")
>
> Reported-by: Olga Kornievskaia <kolga@...app.com>
> Tested-by: Olga Kornievskaia <kolga@...app.com>
> Signed-off-by: Bernard Metzler <bmt@...ich.ibm.com>
> ---
> drivers/infiniband/sw/siw/siw_qp_rx.c | 27 +++++++++++++++------------
> 1 file changed, 15 insertions(+), 12 deletions(-)
>
> diff --git a/drivers/infiniband/sw/siw/siw_qp_rx.c
> b/drivers/infiniband/sw/siw/siw_qp_rx.c
> index 875ea6f1b04a..fd721cc19682 100644
> --- a/drivers/infiniband/sw/siw/siw_qp_rx.c
> +++ b/drivers/infiniband/sw/siw/siw_qp_rx.c
> @@ -961,27 +961,28 @@ int siw_proc_terminate(struct siw_qp *qp)
> static int siw_get_trailer(struct siw_qp *qp, struct siw_rx_stream
> *srx)
> {
> struct sk_buff *skb = srx->skb;
> + int avail = min(srx->skb_new, srx->fpdu_part_rem);
> u8 *tbuf = (u8 *)&srx->trailer.crc - srx->pad;
> __wsum crc_in, crc_own = 0;
>
> siw_dbg_qp(qp, "expected %d, available %d, pad %u\n",
> srx->fpdu_part_rem, srx->skb_new, srx->pad);
>
> - if (srx->skb_new < srx->fpdu_part_rem)
> - return -EAGAIN;
> -
> - skb_copy_bits(skb, srx->skb_offset, tbuf, srx->fpdu_part_rem);
> + skb_copy_bits(skb, srx->skb_offset, tbuf, avail);
>
> - if (srx->mpa_crc_hd && srx->pad)
> - crypto_shash_update(srx->mpa_crc_hd, tbuf, srx->pad);
> + srx->skb_new -= avail;
> + srx->skb_offset += avail;
> + srx->skb_copied += avail;
> + srx->fpdu_part_rem -= avail;
>
> - srx->skb_new -= srx->fpdu_part_rem;
> - srx->skb_offset += srx->fpdu_part_rem;
> - srx->skb_copied += srx->fpdu_part_rem;
> + if (srx->fpdu_part_rem)
> + return -EAGAIN;
>
> if (!srx->mpa_crc_hd)
> return 0;
>
> + if (srx->pad)
> + crypto_shash_update(srx->mpa_crc_hd, tbuf, srx->pad);
> /*
> * CRC32 is computed, transmitted and received directly in NBO,
> * so there's never a reason to convert byte order.
> @@ -1083,10 +1084,9 @@ static int siw_get_hdr(struct siw_rx_stream *srx)
> * completely received.
> */
> if (iwarp_pktinfo[opcode].hdr_len > sizeof(struct
> iwarp_ctrl_tagged)) {
> - bytes = iwarp_pktinfo[opcode].hdr_len - MIN_DDP_HDR;
> + int hdrlen = iwarp_pktinfo[opcode].hdr_len;
>
> - if (srx->skb_new < bytes)
> - return -EAGAIN;
> + bytes = min_t(int, hdrlen - MIN_DDP_HDR, srx->skb_new);
>
> skb_copy_bits(skb, srx->skb_offset,
> (char *)c_hdr + srx->fpdu_part_rcvd, bytes);
> @@ -1096,6 +1096,9 @@ static int siw_get_hdr(struct siw_rx_stream *srx)
> srx->skb_new -= bytes;
> srx->skb_offset += bytes;
> srx->skb_copied += bytes;
> +
> + if (srx->fpdu_part_rcvd < hdrlen)
> + return -EAGAIN;
> }
>
> /*
> --
> 2.32.0
Powered by blists - more mailing lists