lists.openwall.net | lists / announce owl-users owl-dev john-users john-dev passwdqc-users yescrypt popa3d-users / oss-security kernel-hardening musl sabotage tlsify passwords / crypt-dev xvendor / Bugtraq Full-Disclosure linux-kernel linux-netdev linux-ext4 linux-hardening linux-cve-announce PHC | |
Open Source and information security mailing list archives
| ||
|
Date: Thu, 23 Jul 2020 11:21:32 -0700 From: Mark Salyzyn <salyzyn@...roid.com> To: linux-kernel@...r.kernel.org Cc: kernel-team@...roid.com, Mark Salyzyn <salyzyn@...roid.com>, netdev@...r.kernel.org, "David S. Miller" <davem@...emloft.net>, Jakub Kicinski <kuba@...nel.org>, Thomas Graf <tgraf@...g.ch> Subject: [PATCH] netlink: add buffer boundary checking Many of the nla_get_* inlines fail to check attribute's length before copying the content resulting in possible out-of-boundary accesses. Adjust the inlines to perform nla_len checking, for the most part using the nla_memcpy function to faciliate since these are not necessarily performance critical and do not need a likely fast path. Signed-off-by: Mark Salyzyn <salyzyn@...roid.com> Cc: netdev@...r.kernel.org Cc: linux-kernel@...r.kernel.org Cc: kernel-team@...roid.com Cc: "David S. Miller" <davem@...emloft.net> Cc: Jakub Kicinski <kuba@...nel.org> Cc: Thomas Graf <tgraf@...g.ch> Fixes: bfa83a9e03cf ("[NETLINK]: Type-safe netlink messages/attributes interface") --- include/net/netlink.h | 66 +++++++++++++++++++++++++++++++++++-------- 1 file changed, 54 insertions(+), 12 deletions(-) diff --git a/include/net/netlink.h b/include/net/netlink.h index c0411f14fb53..11c0f153be7c 100644 --- a/include/net/netlink.h +++ b/include/net/netlink.h @@ -1538,7 +1538,11 @@ static inline int nla_put_bitfield32(struct sk_buff *skb, int attrtype, */ static inline u32 nla_get_u32(const struct nlattr *nla) { - return *(u32 *) nla_data(nla); + u32 tmp; + + nla_memcpy(&tmp, nla, sizeof(tmp)); + + return tmp; } /** @@ -1547,7 +1551,11 @@ static inline u32 nla_get_u32(const struct nlattr *nla) */ static inline __be32 nla_get_be32(const struct nlattr *nla) { - return *(__be32 *) nla_data(nla); + __be32 tmp; + + nla_memcpy(&tmp, nla, sizeof(tmp)); + + return tmp; } /** @@ -1556,7 +1564,11 @@ static inline __be32 nla_get_be32(const struct nlattr *nla) */ static inline __le32 nla_get_le32(const struct nlattr *nla) { - return *(__le32 *) nla_data(nla); + __le32 tmp; + + nla_memcpy(&tmp, nla, sizeof(tmp)); + + return tmp; } /** @@ -1565,7 +1577,11 @@ static inline __le32 nla_get_le32(const struct nlattr *nla) */ static inline u16 nla_get_u16(const struct nlattr *nla) { - return *(u16 *) nla_data(nla); + u16 tmp; + + nla_memcpy(&tmp, nla, sizeof(tmp)); + + return tmp; } /** @@ -1574,7 +1590,11 @@ static inline u16 nla_get_u16(const struct nlattr *nla) */ static inline __be16 nla_get_be16(const struct nlattr *nla) { - return *(__be16 *) nla_data(nla); + __be16 tmp; + + nla_memcpy(&tmp, nla, sizeof(tmp)); + + return tmp; } /** @@ -1583,7 +1603,11 @@ static inline __be16 nla_get_be16(const struct nlattr *nla) */ static inline __le16 nla_get_le16(const struct nlattr *nla) { - return *(__le16 *) nla_data(nla); + __le16 tmp; + + nla_memcpy(&tmp, nla, sizeof(tmp)); + + return tmp; } /** @@ -1592,7 +1616,7 @@ static inline __le16 nla_get_le16(const struct nlattr *nla) */ static inline u8 nla_get_u8(const struct nlattr *nla) { - return *(u8 *) nla_data(nla); + return (nla_len(nla) >= sizeof(u8)) ? *(u8 *) nla_data(nla) : 0; } /** @@ -1627,7 +1651,11 @@ static inline __be64 nla_get_be64(const struct nlattr *nla) */ static inline __le64 nla_get_le64(const struct nlattr *nla) { - return *(__le64 *) nla_data(nla); + __le64 tmp; + + nla_memcpy(&tmp, nla, sizeof(tmp)); + + return tmp; } /** @@ -1636,7 +1664,11 @@ static inline __le64 nla_get_le64(const struct nlattr *nla) */ static inline s32 nla_get_s32(const struct nlattr *nla) { - return *(s32 *) nla_data(nla); + s32 tmp; + + nla_memcpy(&tmp, nla, sizeof(tmp)); + + return tmp; } /** @@ -1645,7 +1677,11 @@ static inline s32 nla_get_s32(const struct nlattr *nla) */ static inline s16 nla_get_s16(const struct nlattr *nla) { - return *(s16 *) nla_data(nla); + s16 tmp; + + nla_memcpy(&tmp, nla, sizeof(tmp)); + + return tmp; } /** @@ -1654,7 +1690,7 @@ static inline s16 nla_get_s16(const struct nlattr *nla) */ static inline s8 nla_get_s8(const struct nlattr *nla) { - return *(s8 *) nla_data(nla); + return (nla_len(nla) >= sizeof(s8)) ? *(s8 *) nla_data(nla) : 0; } /** @@ -1698,7 +1734,11 @@ static inline unsigned long nla_get_msecs(const struct nlattr *nla) */ static inline __be32 nla_get_in_addr(const struct nlattr *nla) { - return *(__be32 *) nla_data(nla); + __be32 tmp; + + nla_memcpy(&tmp, nla, sizeof(tmp)); + + return tmp; } /** @@ -1710,6 +1750,7 @@ static inline struct in6_addr nla_get_in6_addr(const struct nlattr *nla) struct in6_addr tmp; nla_memcpy(&tmp, nla, sizeof(tmp)); + return tmp; } @@ -1722,6 +1763,7 @@ static inline struct nla_bitfield32 nla_get_bitfield32(const struct nlattr *nla) struct nla_bitfield32 tmp; nla_memcpy(&tmp, nla, sizeof(tmp)); + return tmp; } -- 2.28.0.rc0.105.gf9edc3c819-goog
Powered by blists - more mailing lists