lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:   Tue, 12 Dec 2017 23:48:56 +0000
From:   Al Viro <viro@...IV.linux.org.uk>
To:     Jakub Kicinski <kubakici@...pl>
Cc:     Linus Torvalds <torvalds@...ux-foundation.org>,
        netdev@...r.kernel.org, linux-kernel@...r.kernel.org
Subject: Re: [RFC][PATCH] new byteorder primitives - ..._{replace,get}_bits()

On Tue, Dec 12, 2017 at 12:04:09PM -0800, Jakub Kicinski wrote:

> > static __always_inline u64 mask_to_multiplier(u64 mask)
> > {
> > 	return mask & (mask ^ (mask - 1));
> > }

D'oh.  Even simpler than that, of course -

static __always_inline u64 mask_to_multiplier(u64 mask)
{
 	return mask & -mask;
}

> Very nice!  The compilation-time check if the value can fit in a field
> covered by the mask (if they're both known) did help me catch bugs
> early a few times over the years, so if it could be preserved we can
> maybe even drop the FIELD_* macros and just use this approach?

Umm...  Something like this, perhaps?  Same bunch, plus u{16,32,64}_...
variants for host-endian.  Adding sanity check on mask is also not
hard, but I don't know how useful it actually is...

diff --git a/include/linux/byteorder/generic.h b/include/linux/byteorder/generic.h
index 451aaa0786ae..a032de9aa03d 100644
--- a/include/linux/byteorder/generic.h
+++ b/include/linux/byteorder/generic.h
@@ -187,4 +187,36 @@ static inline void be32_to_cpu_array(u32 *dst, const __be32 *src, size_t len)
 		dst[i] = be32_to_cpu(src[i]);
 }
 
+extern void __compiletime_error("value doesn't fit into mask")
+__field_overflow(void);
+static __always_inline u64 mask_to_multiplier(u64 mask)
+{
+	return mask & -mask;
+}
+
+#define ____MAKE_OP(type,base,to,from)					\
+static __always_inline __##type type##_replace_bits(__##type old,	\
+					base val, base mask)		\
+{									\
+	__##type m = to(mask);						\
+        if (__builtin_constant_p(val) &&				\
+		    (val & ~(mask/mask_to_multiplier(mask))))		\
+				    __field_overflow();			\
+	return (old & ~m) |						\
+		(to(val * mask_to_multiplier(mask)) & m);		\
+}									\
+static __always_inline base type##_get_bits(__##type v, base mask)	\
+{									\
+	return (from(v) & mask)/mask_to_multiplier(mask);		\
+}
+#define __MAKE_OP(size)							\
+	____MAKE_OP(le##size,u##size,cpu_to_le##size,le##size##_to_cpu)	\
+	____MAKE_OP(be##size,u##size,cpu_to_be##size,be##size##_to_cpu)	\
+	____MAKE_OP(u##size,u##size,,)
+__MAKE_OP(16)
+__MAKE_OP(32)
+__MAKE_OP(64)
+#undef __MAKE_OP
+#undef ____MAKE_OP
+
 #endif /* _LINUX_BYTEORDER_GENERIC_H */

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ