[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20240327152358.2368467-6-aleksander.lobakin@intel.com>
Date: Wed, 27 Mar 2024 16:23:42 +0100
From: Alexander Lobakin <aleksander.lobakin@...el.com>
To: "David S. Miller" <davem@...emloft.net>,
Eric Dumazet <edumazet@...gle.com>,
Jakub Kicinski <kuba@...nel.org>,
Paolo Abeni <pabeni@...hat.com>
Cc: Alexander Lobakin <aleksander.lobakin@...el.com>,
Yury Norov <yury.norov@...il.com>,
Alexander Potapenko <glider@...gle.com>,
nex.sw.ncis.osdt.itp.upstreaming@...el.com,
intel-wired-lan@...ts.osuosl.org,
netdev@...r.kernel.org,
linux-kernel@...r.kernel.org,
Andy Shevchenko <andriy.shevchenko@...ux.intel.com>,
Przemek Kitszel <przemyslaw.kitszel@...el.com>
Subject: [PATCH net-next v6 05/21] bitops: make BYTES_TO_BITS() treewide-available
Avoid open-coding that simple expression each time by moving
BYTES_TO_BITS() from the probes code to <linux/bitops.h> to export
it to the rest of the kernel.
Simplify the macro while at it. `BITS_PER_LONG / sizeof(long)` always
equals to %BITS_PER_BYTE, regardless of the target architecture.
Do the same for the tools ecosystem as well (incl. its version of
bitops.h). The previous implementation had its implicit type of long,
while the new one is int, so adjust the format literal accordingly in
the perf code.
Suggested-by: Andy Shevchenko <andriy.shevchenko@...ux.intel.com>
Reviewed-by: Przemek Kitszel <przemyslaw.kitszel@...el.com>
Acked-by: Yury Norov <yury.norov@...il.com>
Signed-off-by: Alexander Lobakin <aleksander.lobakin@...el.com>
---
include/linux/bitops.h | 2 ++
tools/include/linux/bitops.h | 2 ++
kernel/trace/trace_probe.c | 2 --
tools/perf/util/probe-finder.c | 4 +---
4 files changed, 5 insertions(+), 5 deletions(-)
diff --git a/include/linux/bitops.h b/include/linux/bitops.h
index f7f5a783da2a..e0cd09eb91cd 100644
--- a/include/linux/bitops.h
+++ b/include/linux/bitops.h
@@ -21,6 +21,8 @@
#define BITS_TO_U32(nr) __KERNEL_DIV_ROUND_UP(nr, BITS_PER_TYPE(u32))
#define BITS_TO_BYTES(nr) __KERNEL_DIV_ROUND_UP(nr, BITS_PER_TYPE(char))
+#define BYTES_TO_BITS(nb) ((nb) * BITS_PER_BYTE)
+
extern unsigned int __sw_hweight8(unsigned int w);
extern unsigned int __sw_hweight16(unsigned int w);
extern unsigned int __sw_hweight32(unsigned int w);
diff --git a/tools/include/linux/bitops.h b/tools/include/linux/bitops.h
index 7319f6ced108..272f15d0e434 100644
--- a/tools/include/linux/bitops.h
+++ b/tools/include/linux/bitops.h
@@ -20,6 +20,8 @@
#define BITS_TO_U32(nr) DIV_ROUND_UP(nr, BITS_PER_TYPE(u32))
#define BITS_TO_BYTES(nr) DIV_ROUND_UP(nr, BITS_PER_TYPE(char))
+#define BYTES_TO_BITS(nb) ((nb) * BITS_PER_BYTE)
+
extern unsigned int __sw_hweight8(unsigned int w);
extern unsigned int __sw_hweight16(unsigned int w);
extern unsigned int __sw_hweight32(unsigned int w);
diff --git a/kernel/trace/trace_probe.c b/kernel/trace/trace_probe.c
index 217169de0920..cd5cab0be1f3 100644
--- a/kernel/trace/trace_probe.c
+++ b/kernel/trace/trace_probe.c
@@ -1180,8 +1180,6 @@ parse_probe_arg(char *arg, const struct fetch_type *type,
return ret;
}
-#define BYTES_TO_BITS(nb) ((BITS_PER_LONG * (nb)) / sizeof(long))
-
/* Bitfield type needs to be parsed into a fetch function */
static int __parse_bitfield_probe_arg(const char *bf,
const struct fetch_type *t,
diff --git a/tools/perf/util/probe-finder.c b/tools/perf/util/probe-finder.c
index c8923375e30d..630e16c54ed5 100644
--- a/tools/perf/util/probe-finder.c
+++ b/tools/perf/util/probe-finder.c
@@ -186,8 +186,6 @@ static int convert_variable_location(Dwarf_Die *vr_die, Dwarf_Addr addr,
return ret2;
}
-#define BYTES_TO_BITS(nb) ((nb) * BITS_PER_LONG / sizeof(long))
-
static int convert_variable_type(Dwarf_Die *vr_die,
struct probe_trace_arg *tvar,
const char *cast, bool user_access)
@@ -217,7 +215,7 @@ static int convert_variable_type(Dwarf_Die *vr_die,
total = dwarf_bytesize(vr_die);
if (boffs < 0 || total < 0)
return -ENOENT;
- ret = snprintf(buf, 16, "b%d@...%zd", bsize, boffs,
+ ret = snprintf(buf, 16, "b%d@...%d", bsize, boffs,
BYTES_TO_BITS(total));
goto formatted;
}
--
2.44.0
Powered by blists - more mailing lists