lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20251114150738.32426-10-damien.riegel@silabs.com>
Date: Fri, 14 Nov 2025 10:07:35 -0500
From: Damien Riégel <damien.riegel@...abs.com>
To: greybus-dev@...ts.linaro.org, Johan Hovold <johan@...nel.org>,
        Alex Elder <elder@...nel.org>,
        Greg Kroah-Hartman <gregkh@...uxfoundation.org>,
        linux-kernel@...r.kernel.org
Cc: Silicon Labs Kernel Team <linux-devel@...abs.com>,
        Damien Riégel <damien.riegel@...abs.com>
Subject: [RFC PATCH v2 09/12] greybus: cpc: acknowledge all incoming messages

Currently, CPC doesn't send messages on its own, it only prepends its
header to outgoing messages. This can lead to messages not being
acknowledged, for instance in the case of an SVC Ping

	Host				Device

  SVC Ping (seq=X, ack=Y)
				  SVC Ping Reply (seq=Y, ack=X+1)

The "Ping Reply" is never acknowledged at the CPC level, which can lead
to retransmissions, or worst the device might think the link is broken
and do something to recover.

To prevent that scenario, an ack mechanism is implemented in the most
straightforward manner: send an ACK to all incoming messages. Here, two
flags need to be added:
 - First, an ACK frame should not be passed to the Greybus layer, so a
   "CONTROL" flag is added. If this flag is set, it means it's a control
   messages and should stay at the CPC level. Currently there is only
   one type of control frame, the standalone ack. Control messages have
   the same format as Greybus operations.
 - Second, ack themselves should not be acked, so to determine if a
   message should be acked or not, a REQUEST_ACK flag is added.

Signed-off-by: Damien Riégel <damien.riegel@...abs.com>
---
 drivers/greybus/cpc/cpc.h      |  3 ++
 drivers/greybus/cpc/cport.c    |  1 +
 drivers/greybus/cpc/header.c   | 41 ++++++++++++++++++++++++
 drivers/greybus/cpc/header.h   |  3 ++
 drivers/greybus/cpc/protocol.c | 57 ++++++++++++++++++++++++++++------
 5 files changed, 96 insertions(+), 9 deletions(-)

diff --git a/drivers/greybus/cpc/cpc.h b/drivers/greybus/cpc/cpc.h
index db760cf8b32..bec2580e358 100644
--- a/drivers/greybus/cpc/cpc.h
+++ b/drivers/greybus/cpc/cpc.h
@@ -51,6 +51,9 @@ struct cpc_skb_cb {
 	struct gb_message *gb_message;
 
 	u8 seq;
+
+#define CPC_SKB_FLAG_REQ_ACK	(1 << 0)
+	u8 cpc_flags;
 };
 
 #define CPC_SKB_CB(__skb)	((struct cpc_skb_cb *)&((__skb)->cb[0]))
diff --git a/drivers/greybus/cpc/cport.c b/drivers/greybus/cpc/cport.c
index 2ee0b129996..35a148abbed 100644
--- a/drivers/greybus/cpc/cport.c
+++ b/drivers/greybus/cpc/cport.c
@@ -86,6 +86,7 @@ int cpc_cport_transmit(struct cpc_cport *cport, struct sk_buff *skb)
 	mutex_lock(&cport->lock);
 
 	CPC_SKB_CB(skb)->seq = cport->tcb.seq;
+	CPC_SKB_CB(skb)->cpc_flags = CPC_SKB_FLAG_REQ_ACK;
 
 	cport->tcb.seq++;
 	ack = cport->tcb.ack;
diff --git a/drivers/greybus/cpc/header.c b/drivers/greybus/cpc/header.c
index 62946d6077e..973883c1d5c 100644
--- a/drivers/greybus/cpc/header.c
+++ b/drivers/greybus/cpc/header.c
@@ -3,8 +3,25 @@
  * Copyright (c) 2025, Silicon Laboratories, Inc.
  */
 
+#include <linux/bitfield.h>
+#include <linux/bits.h>
+
 #include "header.h"
 
+#define CPC_HEADER_CONTROL_IS_CONTROL_MASK	BIT(7)
+#define CPC_HEADER_CONTROL_REQ_ACK_MASK		BIT(6)
+
+/**
+ * cpc_header_is_control() - Identify if this is a control frame.
+ * @hdr: CPC header.
+ *
+ * Return: True if this is a control frame, false if this a Greybus frame.
+ */
+bool cpc_header_is_control(const struct cpc_header *hdr)
+{
+	return hdr->ctrl_flags & CPC_HEADER_CONTROL_IS_CONTROL_MASK;
+}
+
 /**
  * cpc_header_get_seq() - Get the sequence number.
  * @hdr: CPC header.
@@ -15,3 +32,27 @@ u8 cpc_header_get_seq(const struct cpc_header *hdr)
 {
 	return hdr->seq;
 }
+
+/**
+ * cpc_header_get_req_ack() - Get the request acknowledge frame flag.
+ * @hdr: CPC header.
+ *
+ * Return: Request acknowledge frame flag.
+ */
+bool cpc_header_get_req_ack(const struct cpc_header *hdr)
+{
+	return FIELD_GET(CPC_HEADER_CONTROL_REQ_ACK_MASK, hdr->ctrl_flags);
+}
+
+/**
+ * cpc_header_encode_ctrl_flags() - Encode parameters into the control byte.
+ * @control: True if CPC control frame, false if Greybus frame.
+ * @req_ack: Frame flag indicating a request to be acknowledged.
+ *
+ * Return: Encoded control byte.
+ */
+u8 cpc_header_encode_ctrl_flags(bool control, bool req_ack)
+{
+	return FIELD_PREP(CPC_HEADER_CONTROL_IS_CONTROL_MASK, control) |
+	       FIELD_PREP(CPC_HEADER_CONTROL_REQ_ACK_MASK, req_ack);
+}
diff --git a/drivers/greybus/cpc/header.h b/drivers/greybus/cpc/header.h
index d46ad8e53fe..5ac431b3c54 100644
--- a/drivers/greybus/cpc/header.h
+++ b/drivers/greybus/cpc/header.h
@@ -40,6 +40,9 @@ struct cpc_header {
 
 #define CPC_HEADER_SIZE			(sizeof(struct cpc_header))
 
+bool cpc_header_is_control(const struct cpc_header *hdr);
 u8 cpc_header_get_seq(const struct cpc_header *hdr);
+bool cpc_header_get_req_ack(const struct cpc_header *hdr);
+u8 cpc_header_encode_ctrl_flags(bool control, bool req_ack);
 
 #endif
diff --git a/drivers/greybus/cpc/protocol.c b/drivers/greybus/cpc/protocol.c
index 5684557df64..63b4127fcea 100644
--- a/drivers/greybus/cpc/protocol.c
+++ b/drivers/greybus/cpc/protocol.c
@@ -10,6 +10,11 @@
 #include "host.h"
 
 
+static bool cpc_skb_is_sequenced(struct sk_buff *skb)
+{
+	return CPC_SKB_CB(skb)->cpc_flags & CPC_SKB_FLAG_REQ_ACK;
+}
+
 void cpc_protocol_prepare_header(struct sk_buff *skb, u8 ack)
 {
 	struct cpc_header *hdr;
@@ -19,29 +24,63 @@ void cpc_protocol_prepare_header(struct sk_buff *skb, u8 ack)
 	hdr = (struct cpc_header *)skb->data;
 	hdr->ack = ack;
 	hdr->recv_wnd = 0;
-	hdr->ctrl_flags = 0;
 	hdr->seq = CPC_SKB_CB(skb)->seq;
+	hdr->ctrl_flags = cpc_header_encode_ctrl_flags(!CPC_SKB_CB(skb)->gb_message,
+						       cpc_skb_is_sequenced(skb));
+}
+
+static void cpc_protocol_queue_ack(struct cpc_cport *cport, u8 ack)
+{
+	struct gb_operation_msg_hdr *gb_hdr;
+	struct sk_buff *skb;
+
+	skb = alloc_skb(CPC_HEADER_SIZE + sizeof(*gb_hdr), GFP_KERNEL);
+	if (!skb)
+		return;
+
+	skb_reserve(skb, CPC_HEADER_SIZE);
+
+	gb_hdr = skb_put(skb, sizeof(*gb_hdr));
+	memset(gb_hdr, 0, sizeof(*gb_hdr));
+
+	/* In the CPC Operation Header, only the size and cport_id matter for ACKs. */
+	gb_hdr->size = sizeof(*gb_hdr);
+	cpc_cport_pack(gb_hdr, cport->id);
+
+	cpc_protocol_prepare_header(skb, ack);
+
+	cpc_hd_send_skb(cport->cpc_hd, skb);
 }
 
 void cpc_protocol_on_data(struct cpc_cport *cport, struct sk_buff *skb)
 {
 	struct cpc_header *cpc_hdr = (struct cpc_header *)skb->data;
+	bool require_ack = cpc_header_get_req_ack(cpc_hdr);
 	u8 seq = cpc_header_get_seq(cpc_hdr);
 	bool expected_seq = false;
+	u8 ack;
 
 	mutex_lock(&cport->lock);
 
-	expected_seq = seq == cport->tcb.ack;
-	if (expected_seq)
-		cport->tcb.ack++;
-	else
-		dev_warn(cpc_hd_dev(cport->cpc_hd),
-			 "unexpected seq: %u, expected seq: %u\n",
-			 seq, cport->tcb.ack);
+	if (require_ack) {
+		expected_seq = seq == cport->tcb.ack;
+		if (expected_seq)
+			cport->tcb.ack++;
+		else
+			dev_warn(cpc_hd_dev(cport->cpc_hd),
+				 "unexpected seq: %u, expected seq: %u\n",
+				 seq, cport->tcb.ack);
+	}
+
+	ack = cport->tcb.ack;
 
 	mutex_unlock(&cport->lock);
 
-	if (expected_seq) {
+	/* Ack no matter if the sequence was valid or not, to resync with remote */
+	if (require_ack)
+		cpc_protocol_queue_ack(cport, ack);
+
+	if (expected_seq && !cpc_header_is_control(cpc_hdr)) {
 		skb_pull(skb, CPC_HEADER_SIZE);
 
 		greybus_data_rcvd(cport->cpc_hd->gb_hd, cport->id, skb->data, skb->len);
-- 
2.49.0


Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ