[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20240702222034.1378764-3-o-takashi@sakamocchi.jp>
Date: Wed, 3 Jul 2024 07:20:31 +0900
From: Takashi Sakamoto <o-takashi@...amocchi.jp>
To: linux1394-devel@...ts.sourceforge.net
Cc: linux-kernel@...r.kernel.org
Subject: [PATCH 2/5] firewire: ohci: use static function to handle endian issue on PowerPC platform
It is preferable to use static function instead of functional macro in
some points. It checks type of argument, but would be optimized to
embedded code instead of function calls.
This commit obsoletes the functional macro with the static function.
Additionally this commit refactors quirk detection to ease the later work.
Signed-off-by: Takashi Sakamoto <o-takashi@...amocchi.jp>
---
drivers/firewire/ohci.c | 39 +++++++++++++++++++++++++++------------
1 file changed, 27 insertions(+), 12 deletions(-)
diff --git a/drivers/firewire/ohci.c b/drivers/firewire/ohci.c
index a2d6d1d1ec2b..5b90ca72fa64 100644
--- a/drivers/firewire/ohci.c
+++ b/drivers/firewire/ohci.c
@@ -875,10 +875,25 @@ static void ar_sync_buffers_for_cpu(struct ar_context *ctx,
}
#if defined(CONFIG_PPC_PMAC) && defined(CONFIG_PPC32)
-#define cond_le32_to_cpu(v) \
- (ohci->quirks & QUIRK_BE_HEADERS ? be32_to_cpu(v) : le32_to_cpu(v))
+static u32 cond_le32_to_cpu(__le32 value, bool has_be_header_quirk)
+{
+ return has_be_header_quirk ? be32_to_cpu(value) : le32_to_cpu(value);
+}
+
+static bool has_be_header_quirk(const struct fw_ohci *ohci)
+{
+ return !!(ohci->quirks & QUIRK_BE_HEADERS);
+}
#else
-#define cond_le32_to_cpu(v) le32_to_cpu(v)
+static u32 cond_le32_to_cpu(__le32 value, bool has_be_header_quirk __maybe_unused)
+{
+ return le32_to_cpu(value);
+}
+
+static bool has_be_header_quirk(const struct fw_ohci *ohci)
+{
+ return false;
+}
#endif
static __le32 *handle_ar_packet(struct ar_context *ctx, __le32 *buffer)
@@ -888,9 +903,9 @@ static __le32 *handle_ar_packet(struct ar_context *ctx, __le32 *buffer)
u32 status, length, tcode;
int evt;
- p.header[0] = cond_le32_to_cpu(buffer[0]);
- p.header[1] = cond_le32_to_cpu(buffer[1]);
- p.header[2] = cond_le32_to_cpu(buffer[2]);
+ p.header[0] = cond_le32_to_cpu(buffer[0], has_be_header_quirk(ohci));
+ p.header[1] = cond_le32_to_cpu(buffer[1], has_be_header_quirk(ohci));
+ p.header[2] = cond_le32_to_cpu(buffer[2], has_be_header_quirk(ohci));
tcode = async_header_get_tcode(p.header);
switch (tcode) {
@@ -902,7 +917,7 @@ static __le32 *handle_ar_packet(struct ar_context *ctx, __le32 *buffer)
break;
case TCODE_READ_BLOCK_REQUEST :
- p.header[3] = cond_le32_to_cpu(buffer[3]);
+ p.header[3] = cond_le32_to_cpu(buffer[3], has_be_header_quirk(ohci));
p.header_length = 16;
p.payload_length = 0;
break;
@@ -911,7 +926,7 @@ static __le32 *handle_ar_packet(struct ar_context *ctx, __le32 *buffer)
case TCODE_READ_BLOCK_RESPONSE:
case TCODE_LOCK_REQUEST:
case TCODE_LOCK_RESPONSE:
- p.header[3] = cond_le32_to_cpu(buffer[3]);
+ p.header[3] = cond_le32_to_cpu(buffer[3], has_be_header_quirk(ohci));
p.header_length = 16;
p.payload_length = async_header_get_data_length(p.header);
if (p.payload_length > MAX_ASYNC_PAYLOAD) {
@@ -936,7 +951,7 @@ static __le32 *handle_ar_packet(struct ar_context *ctx, __le32 *buffer)
/* FIXME: What to do about evt_* errors? */
length = (p.header_length + p.payload_length + 3) / 4;
- status = cond_le32_to_cpu(buffer[length]);
+ status = cond_le32_to_cpu(buffer[length], has_be_header_quirk(ohci));
evt = (status >> 16) & 0x1f;
p.ack = evt - 16;
@@ -2030,12 +2045,12 @@ static void bus_reset_work(struct work_struct *work)
return;
}
- generation = (cond_le32_to_cpu(ohci->self_id[0]) >> 16) & 0xff;
+ generation = (cond_le32_to_cpu(ohci->self_id[0], has_be_header_quirk(ohci)) >> 16) & 0xff;
rmb();
for (i = 1, j = 0; j < self_id_count; i += 2, j++) {
- u32 id = cond_le32_to_cpu(ohci->self_id[i]);
- u32 id2 = cond_le32_to_cpu(ohci->self_id[i + 1]);
+ u32 id = cond_le32_to_cpu(ohci->self_id[i], has_be_header_quirk(ohci));
+ u32 id2 = cond_le32_to_cpu(ohci->self_id[i + 1], has_be_header_quirk(ohci));
if (id != ~id2) {
/*
--
2.43.0
Powered by blists - more mailing lists