[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20250512012748.79749-8-damien.riegel@silabs.com>
Date: Sun, 11 May 2025 21:27:40 -0400
From: Damien Riégel <damien.riegel@...abs.com>
To: Andrew Lunn <andrew+netdev@...n.ch>,
"David S . Miller" <davem@...emloft.net>,
Eric Dumazet <edumazet@...gle.com>, Jakub Kicinski <kuba@...nel.org>,
Paolo Abeni <pabeni@...hat.com>, Rob Herring <robh@...nel.org>,
Krzysztof Kozlowski <krzk+dt@...nel.org>,
Conor Dooley <conor+dt@...nel.org>,
Silicon Labs Kernel Team <linux-devel@...abs.com>,
netdev@...r.kernel.org, devicetree@...r.kernel.org,
linux-kernel@...r.kernel.org
Subject: [RFC net-next 07/15] net: cpc: implement sequencing and ack
CPC frames are sequenced and must be acked by the remote. If not acked
in a timely manner, they should be retransmitted but that feature is not
part of this commit.
Another key feature is that peers advertise how many frames they can
receive. As the remote is usually a microcontroller with limited memory,
this serves as a way to throttle the host and prevent it from sending
frames that the microcontroller is not yet able to receive. This is
where endpoint's holding_queue becomes useful and serves as storage for
frames that endpoint is ready to send but that the remote is not yet
able to receive.
Signed-off-by: Damien Riégel <damien.riegel@...abs.com>
---
drivers/net/cpc/cpc.h | 26 +++++++++
drivers/net/cpc/endpoint.c | 24 ++++++++-
drivers/net/cpc/protocol.c | 108 ++++++++++++++++++++++++++++++++++++-
3 files changed, 156 insertions(+), 2 deletions(-)
diff --git a/drivers/net/cpc/cpc.h b/drivers/net/cpc/cpc.h
index dc05b36b6e6..94284e2d59d 100644
--- a/drivers/net/cpc/cpc.h
+++ b/drivers/net/cpc/cpc.h
@@ -18,6 +18,27 @@ struct cpc_endpoint;
extern const struct bus_type cpc_bus;
+/**
+ * struct cpc_endpoint_tcb - endpoint's transmission control block
+ * @lock: synchronize tcb access
+ * @send_wnd: send window, maximum number of frames that the remote can accept
+ * TX frames should have a sequence in the range
+ * [send_una; send_una + send_wnd].
+ * @send_nxt: send next, the next sequence number that will be used for transmission
+ * @send_una: send unacknowledged, the oldest unacknowledged sequence number
+ * @ack: current acknowledge number
+ * @seq: current sequence number
+ * @mtu: maximum transmission unit
+ */
+struct cpc_endpoint_tcb {
+ struct mutex lock; /* Synchronize access to all other attributes. */
+ u8 send_wnd;
+ u8 send_nxt;
+ u8 send_una;
+ u8 ack;
+ u8 seq;
+};
+
/** struct cpc_endpoint_ops - Endpoint's callbacks.
* @rx: Data availability is provided with a skb owned by the driver.
*/
@@ -32,6 +53,8 @@ struct cpc_endpoint_ops {
* @id: Endpoint id, uniquely identifies an endpoint within a CPC device.
* @intf: Pointer to CPC device this endpoint belongs to.
* @list_node: list_head member for linking in a CPC device.
+ * @tcb: Transmission control block.
+ * @pending_ack_queue: Contain frames pending on an acknowledge.
* @holding_queue: Contains frames that were not pushed to the transport layer
* due to having insufficient space in the transmit window.
*
@@ -48,6 +71,9 @@ struct cpc_endpoint {
struct list_head list_node;
struct cpc_endpoint_ops *ops;
+ struct cpc_endpoint_tcb tcb;
+
+ struct sk_buff_head pending_ack_queue;
struct sk_buff_head holding_queue;
};
diff --git a/drivers/net/cpc/endpoint.c b/drivers/net/cpc/endpoint.c
index 51007ba5bcc..db925cc078d 100644
--- a/drivers/net/cpc/endpoint.c
+++ b/drivers/net/cpc/endpoint.c
@@ -20,12 +20,26 @@ static void cpc_ep_release(struct device *dev)
{
struct cpc_endpoint *ep = cpc_endpoint_from_dev(dev);
+ skb_queue_purge(&ep->pending_ack_queue);
skb_queue_purge(&ep->holding_queue);
cpc_interface_put(ep->intf);
kfree(ep);
}
+/**
+ * cpc_endpoint_tcb_reset() - Reset endpoint's TCB to initial values.
+ * @ep: endpoint pointer
+ */
+static void cpc_endpoint_tcb_reset(struct cpc_endpoint *ep)
+{
+ ep->tcb.seq = ep->id;
+ ep->tcb.ack = 0;
+ ep->tcb.send_nxt = ep->id;
+ ep->tcb.send_una = ep->id;
+ ep->tcb.send_wnd = 1;
+}
+
/**
* cpc_endpoint_alloc() - Allocate memory for new CPC endpoint.
* @intf: CPC interface owning this endpoint.
@@ -55,6 +69,10 @@ struct cpc_endpoint *cpc_endpoint_alloc(struct cpc_interface *intf, u8 id)
ep->dev.bus = &cpc_bus;
ep->dev.release = cpc_ep_release;
+ mutex_init(&ep->tcb.lock);
+ cpc_endpoint_tcb_reset(ep);
+
+ skb_queue_head_init(&ep->pending_ack_queue);
skb_queue_head_init(&ep->holding_queue);
device_initialize(&ep->dev);
@@ -195,6 +213,8 @@ int cpc_endpoint_write(struct cpc_endpoint *ep, struct sk_buff *skb)
struct cpc_header hdr;
int err;
+ mutex_lock(&ep->tcb.lock);
+
if (ep->intf->ops->csum)
ep->intf->ops->csum(skb);
@@ -202,10 +222,12 @@ int cpc_endpoint_write(struct cpc_endpoint *ep, struct sk_buff *skb)
hdr.ctrl = cpc_header_get_ctrl(CPC_FRAME_TYPE_DATA, true);
hdr.ep_id = ep->id;
hdr.recv_wnd = CPC_HEADER_MAX_RX_WINDOW;
- hdr.seq = 0;
+ hdr.seq = ep->tcb.seq;
hdr.dat.payload_len = skb->len;
err = __cpc_protocol_write(ep, &hdr, skb);
+ mutex_unlock(&ep->tcb.lock);
+
return err;
}
diff --git a/drivers/net/cpc/protocol.c b/drivers/net/cpc/protocol.c
index 91335160981..92e3b0a9cdf 100644
--- a/drivers/net/cpc/protocol.c
+++ b/drivers/net/cpc/protocol.c
@@ -11,15 +11,54 @@
#include "interface.h"
#include "protocol.h"
+static void __cpc_protocol_send_ack(struct cpc_endpoint *ep)
+{
+ struct cpc_header hdr;
+ struct sk_buff *skb;
+
+ skb = cpc_skb_alloc(0, GFP_KERNEL);
+ if (!skb)
+ return;
+
+ memset(&hdr, 0, sizeof(hdr));
+ hdr.ctrl = cpc_header_get_ctrl(CPC_FRAME_TYPE_DATA, false);
+ hdr.ep_id = ep->id;
+ hdr.recv_wnd = CPC_HEADER_MAX_RX_WINDOW;
+ hdr.ack = ep->tcb.ack;
+ memcpy(skb_push(skb, sizeof(hdr)), &hdr, sizeof(hdr));
+
+ cpc_interface_send_frame(ep->intf, skb);
+}
+
+static void cpc_protocol_on_tx_complete(struct sk_buff *skb)
+{
+ struct cpc_endpoint *ep = cpc_skb_get_ctx(skb);
+
+ /*
+ * Increase the send_nxt sequence, this is used as the upper bound of sequence number that
+ * can be ACK'd by the remote.
+ */
+ mutex_lock(&ep->tcb.lock);
+ ep->tcb.send_nxt++;
+ mutex_unlock(&ep->tcb.lock);
+}
+
static int __cpc_protocol_queue_tx_frame(struct cpc_endpoint *ep, struct sk_buff *skb)
{
+ struct cpc_header *hdr = (struct cpc_header *)skb->data;
struct cpc_interface *intf = ep->intf;
struct sk_buff *cloned_skb;
+ hdr->ack = ep->tcb.ack;
+
cloned_skb = skb_clone(skb, GFP_KERNEL);
if (!cloned_skb)
return -ENOMEM;
+ skb_queue_tail(&ep->pending_ack_queue, skb);
+
+ cpc_skb_set_ctx(cloned_skb, cpc_protocol_on_tx_complete, ep);
+
cpc_interface_send_frame(intf, cloned_skb);
return 0;
@@ -28,10 +67,19 @@ static int __cpc_protocol_queue_tx_frame(struct cpc_endpoint *ep, struct sk_buff
static void __cpc_protocol_process_pending_tx_frames(struct cpc_endpoint *ep)
{
struct sk_buff *skb;
+ u8 window;
int err;
+ window = ep->tcb.send_wnd;
+
while ((skb = skb_dequeue(&ep->holding_queue))) {
- err = __cpc_protocol_queue_tx_frame(ep, skb);
+ if (!cpc_header_number_in_window(ep->tcb.send_una,
+ window,
+ cpc_header_get_seq(skb->data)))
+ err = -ERANGE;
+ else
+ err = __cpc_protocol_queue_tx_frame(ep, skb);
+
if (err < 0) {
skb_queue_head(&ep->holding_queue, skb);
return;
@@ -39,8 +87,64 @@ static void __cpc_protocol_process_pending_tx_frames(struct cpc_endpoint *ep)
}
}
+static void __cpc_protocol_receive_ack(struct cpc_endpoint *ep, u8 recv_wnd, u8 ack)
+{
+ struct sk_buff *skb;
+ u8 acked_frames;
+
+ ep->tcb.send_wnd = recv_wnd;
+
+ skb = skb_peek(&ep->pending_ack_queue);
+ if (!skb)
+ goto out;
+
+ /* Return if no frame to ACK. */
+ if (!cpc_header_number_in_range(ep->tcb.send_una, ep->tcb.send_nxt, ack))
+ goto out;
+
+ /* Calculate how many frames will be ACK'd. */
+ acked_frames = cpc_header_get_frames_acked_count(cpc_header_get_seq(skb->data),
+ ack,
+ skb_queue_len(&ep->pending_ack_queue));
+
+ for (u8 i = 0; i < acked_frames; i++)
+ kfree_skb(skb_dequeue(&ep->pending_ack_queue));
+
+ ep->tcb.send_una += acked_frames;
+
+out:
+ __cpc_protocol_process_pending_tx_frames(ep);
+}
+
void cpc_protocol_on_data(struct cpc_endpoint *ep, struct sk_buff *skb)
{
+ bool expected_seq;
+
+ mutex_lock(&ep->tcb.lock);
+
+ __cpc_protocol_receive_ack(ep,
+ cpc_header_get_recv_wnd(skb->data),
+ cpc_header_get_ack(skb->data));
+
+ if (cpc_header_get_req_ack(skb->data)) {
+ expected_seq = cpc_header_get_seq(skb->data) == ep->tcb.ack;
+ if (expected_seq)
+ ep->tcb.ack++;
+
+ __cpc_protocol_send_ack(ep);
+
+ if (!expected_seq) {
+ dev_warn(&ep->dev,
+ "unexpected seq: %u, expected seq: %u\n",
+ cpc_header_get_seq(skb->data), ep->tcb.ack);
+ mutex_unlock(&ep->tcb.lock);
+ kfree_skb(skb);
+ return;
+ }
+ }
+
+ mutex_unlock(&ep->tcb.lock);
+
if (skb->len > CPC_HEADER_SIZE) {
/* Strip header. */
skb_pull(skb, CPC_HEADER_SIZE);
@@ -74,5 +178,7 @@ int __cpc_protocol_write(struct cpc_endpoint *ep,
__cpc_protocol_process_pending_tx_frames(ep);
+ ep->tcb.seq++;
+
return 0;
}
--
2.49.0
Powered by blists - more mailing lists