[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20181102182909.320110111@linuxfoundation.org>
Date: Fri, 2 Nov 2018 19:34:05 +0100
From: Greg Kroah-Hartman <gregkh@...uxfoundation.org>
To: linux-kernel@...r.kernel.org
Cc: Greg Kroah-Hartman <gregkh@...uxfoundation.org>,
stable@...r.kernel.org, David Howells <dhowells@...hat.com>,
Sasha Levin <sashal@...nel.org>
Subject: [PATCH 4.18 083/150] rxrpc: Dont check RXRPC_CALL_TX_LAST after calling rxrpc_rotate_tx_window()
4.18-stable review patch. If anyone has any objections, please let me know.
------------------
[ Upstream commit c479d5f2c2e1ce609da08c075054440d97ddff52 ]
We should only call the function to end a call's Tx phase if we rotated the
marked-last packet out of the transmission buffer.
Make rxrpc_rotate_tx_window() return an indication of whether it just
rotated the packet marked as the last out of the transmit buffer, carrying
the information out of the locked section in that function.
We can then check the return value instead of examining RXRPC_CALL_TX_LAST.
Fixes: 70790dbe3f66 ("rxrpc: Pass the last Tx packet marker in the annotation buffer")
Signed-off-by: David Howells <dhowells@...hat.com>
Signed-off-by: Sasha Levin <sashal@...nel.org>
---
net/rxrpc/input.c | 35 +++++++++++++++++++----------------
1 file changed, 19 insertions(+), 16 deletions(-)
--- a/net/rxrpc/input.c
+++ b/net/rxrpc/input.c
@@ -216,10 +216,11 @@ static void rxrpc_send_ping(struct rxrpc
/*
* Apply a hard ACK by advancing the Tx window.
*/
-static void rxrpc_rotate_tx_window(struct rxrpc_call *call, rxrpc_seq_t to,
+static bool rxrpc_rotate_tx_window(struct rxrpc_call *call, rxrpc_seq_t to,
struct rxrpc_ack_summary *summary)
{
struct sk_buff *skb, *list = NULL;
+ bool rot_last = false;
int ix;
u8 annotation;
@@ -243,15 +244,17 @@ static void rxrpc_rotate_tx_window(struc
skb->next = list;
list = skb;
- if (annotation & RXRPC_TX_ANNO_LAST)
+ if (annotation & RXRPC_TX_ANNO_LAST) {
set_bit(RXRPC_CALL_TX_LAST, &call->flags);
+ rot_last = true;
+ }
if ((annotation & RXRPC_TX_ANNO_MASK) != RXRPC_TX_ANNO_ACK)
summary->nr_rot_new_acks++;
}
spin_unlock(&call->lock);
- trace_rxrpc_transmit(call, (test_bit(RXRPC_CALL_TX_LAST, &call->flags) ?
+ trace_rxrpc_transmit(call, (rot_last ?
rxrpc_transmit_rotate_last :
rxrpc_transmit_rotate));
wake_up(&call->waitq);
@@ -262,6 +265,8 @@ static void rxrpc_rotate_tx_window(struc
skb->next = NULL;
rxrpc_free_skb(skb, rxrpc_skb_tx_freed);
}
+
+ return rot_last;
}
/*
@@ -332,11 +337,11 @@ static bool rxrpc_receiving_reply(struct
trace_rxrpc_timer(call, rxrpc_timer_init_for_reply, now);
}
- if (!test_bit(RXRPC_CALL_TX_LAST, &call->flags))
- rxrpc_rotate_tx_window(call, top, &summary);
if (!test_bit(RXRPC_CALL_TX_LAST, &call->flags)) {
- rxrpc_proto_abort("TXL", call, top);
- return false;
+ if (!rxrpc_rotate_tx_window(call, top, &summary)) {
+ rxrpc_proto_abort("TXL", call, top);
+ return false;
+ }
}
if (!rxrpc_end_tx_phase(call, true, "ETD"))
return false;
@@ -891,8 +896,12 @@ static void rxrpc_input_ack(struct rxrpc
if (nr_acks > call->tx_top - hard_ack)
return rxrpc_proto_abort("AKN", call, 0);
- if (after(hard_ack, call->tx_hard_ack))
- rxrpc_rotate_tx_window(call, hard_ack, &summary);
+ if (after(hard_ack, call->tx_hard_ack)) {
+ if (rxrpc_rotate_tx_window(call, hard_ack, &summary)) {
+ rxrpc_end_tx_phase(call, false, "ETA");
+ return;
+ }
+ }
if (nr_acks > 0) {
if (skb_copy_bits(skb, offset, buf.acks, nr_acks) < 0)
@@ -901,11 +910,6 @@ static void rxrpc_input_ack(struct rxrpc
&summary);
}
- if (test_bit(RXRPC_CALL_TX_LAST, &call->flags)) {
- rxrpc_end_tx_phase(call, false, "ETA");
- return;
- }
-
if (call->rxtx_annotations[call->tx_top & RXRPC_RXTX_BUFF_MASK] &
RXRPC_TX_ANNO_LAST &&
summary.nr_acks == call->tx_top - hard_ack &&
@@ -927,8 +931,7 @@ static void rxrpc_input_ackall(struct rx
_proto("Rx ACKALL %%%u", sp->hdr.serial);
- rxrpc_rotate_tx_window(call, call->tx_top, &summary);
- if (test_bit(RXRPC_CALL_TX_LAST, &call->flags))
+ if (rxrpc_rotate_tx_window(call, call->tx_top, &summary))
rxrpc_end_tx_phase(call, false, "ETL");
}
Powered by blists - more mailing lists