[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <670272e2-a4b2-4bdd-95c0-26d1e7c65816@stanley.mountain>
Date: Wed, 22 Jan 2025 16:16:48 +0300
From: Dan Carpenter <dan.carpenter@...aro.org>
To: Simon Horman <horms@...nel.org>,
Herbert Xu <herbert@...dor.apana.org.au>
Cc: Steffen Klassert <steffen.klassert@...unet.com>,
"David S. Miller" <davem@...emloft.net>,
Eric Dumazet <edumazet@...gle.com>,
Jakub Kicinski <kuba@...nel.org>, Paolo Abeni <pabeni@...hat.com>,
netdev@...r.kernel.org, linux-kernel@...r.kernel.org,
kernel-janitors@...r.kernel.org
Subject: Re: [PATCH net] xfrm: fix integer overflow in
xfrm_replay_state_esn_len()
On Wed, Jan 22, 2025 at 12:39:36PM +0000, Simon Horman wrote:
> > The one caller that I didn't modify was xfrm_sa_len(). That's a bit
> > complicated and also I'm kind of hoping that we don't handle user
> > controlled data in that function? The place where we definitely are
> > handling user data is in xfrm_alloc_replay_state_esn() and this patch
> > fixes that.
>
> Yes, that is a bit "complex".
>
I don't have a reason to suspect xfrm_sa_len() but if we were to write
a paranoid version of it then I've written that draft below. I stole
Herbert's xfrm_kblen2klen() function[1]. Also the nlmsg_new() function
would need to be updated as well.
https://lore.kernel.org/all/Z2KZC71JZ0QnrhfU@gondor.apana.org.au/
regards,
dan carpenter
diff --git a/include/net/netlink.h b/include/net/netlink.h
index e015ffbed819..ca7a8152e6d4 100644
--- a/include/net/netlink.h
+++ b/include/net/netlink.h
@@ -1015,6 +1015,8 @@ static inline struct nlmsghdr *nlmsg_put_answer(struct sk_buff *skb,
*/
static inline struct sk_buff *nlmsg_new(size_t payload, gfp_t flags)
{
+ if (payload > INT_MAX)
+ return NULL;
return alloc_skb(nlmsg_total_size(payload), flags);
}
diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
index 08c6d6f0179f..ea51a730f268 100644
--- a/net/xfrm/xfrm_user.c
+++ b/net/xfrm/xfrm_user.c
@@ -3575,61 +3575,69 @@ static int xfrm_notify_sa_flush(const struct km_event *c)
return xfrm_nlmsg_multicast(net, skb, 0, XFRMNLGRP_SA);
}
+static inline unsigned int xfrm_kblen2klen(unsigned int klen_in_bits)
+{
+ return klen_in_bits / 8 + !!(klen_in_bits & 7);
+}
-static inline unsigned int xfrm_sa_len(struct xfrm_state *x)
+static inline size_t xfrm_sa_len(struct xfrm_state *x)
{
- unsigned int l = 0;
+ size_t l = 0;
+
if (x->aead)
- l += nla_total_size(aead_len(x->aead));
+ l = size_add(l, nla_total_size(aead_len(x->aead)));
if (x->aalg) {
- l += nla_total_size(sizeof(struct xfrm_algo) +
- (x->aalg->alg_key_len + 7) / 8);
- l += nla_total_size(xfrm_alg_auth_len(x->aalg));
+ unsigned int old_size;
+
+ old_size = nla_total_size(sizeof(struct xfrm_algo) +
+ xfrm_kblen2klen(x->aalg->alg_key_len));
+ l = size_add(l, old_size);
+ l = size_add(l, nla_total_size(xfrm_alg_auth_len(x->aalg)));
}
if (x->ealg)
- l += nla_total_size(xfrm_alg_len(x->ealg));
+ l = size_add(l, nla_total_size(xfrm_alg_len(x->ealg)));
if (x->calg)
- l += nla_total_size(sizeof(*x->calg));
+ l = size_add(l, nla_total_size(sizeof(*x->calg)));
if (x->encap)
- l += nla_total_size(sizeof(*x->encap));
+ l = size_add(l, nla_total_size(sizeof(*x->encap)));
if (x->tfcpad)
- l += nla_total_size(sizeof(x->tfcpad));
+ l = size_add(l, nla_total_size(sizeof(x->tfcpad)));
if (x->replay_esn)
- l += nla_total_size(xfrm_replay_state_esn_len(x->replay_esn));
+ l = size_add(l, nla_total_size(xfrm_replay_state_esn_len(x->replay_esn)));
else
- l += nla_total_size(sizeof(struct xfrm_replay_state));
+ l = size_add(l, nla_total_size(sizeof(struct xfrm_replay_state)));
if (x->security)
- l += nla_total_size(sizeof(struct xfrm_user_sec_ctx) +
- x->security->ctx_len);
+ l = size_add(l, nla_total_size(sizeof(struct xfrm_user_sec_ctx) +
+ x->security->ctx_len));
if (x->coaddr)
- l += nla_total_size(sizeof(*x->coaddr));
+ l = size_add(l, nla_total_size(sizeof(*x->coaddr)));
if (x->props.extra_flags)
- l += nla_total_size(sizeof(x->props.extra_flags));
+ l = size_add(l, nla_total_size(sizeof(x->props.extra_flags)));
if (x->xso.dev)
- l += nla_total_size(sizeof(struct xfrm_user_offload));
+ l = size_add(l, nla_total_size(sizeof(struct xfrm_user_offload)));
if (x->props.smark.v | x->props.smark.m) {
- l += nla_total_size(sizeof(x->props.smark.v));
- l += nla_total_size(sizeof(x->props.smark.m));
+ l = size_add(l, nla_total_size(sizeof(x->props.smark.v)));
+ l = size_add(l, nla_total_size(sizeof(x->props.smark.m)));
}
if (x->if_id)
- l += nla_total_size(sizeof(x->if_id));
+ l = size_add(l, nla_total_size(sizeof(x->if_id)));
if (x->pcpu_num)
- l += nla_total_size(sizeof(x->pcpu_num));
+ l = size_add(l, nla_total_size(sizeof(x->pcpu_num)));
/* Must count x->lastused as it may become non-zero behind our back. */
- l += nla_total_size_64bit(sizeof(u64));
+ l = size_add(l, nla_total_size_64bit(sizeof(u64)));
if (x->mapping_maxage)
- l += nla_total_size(sizeof(x->mapping_maxage));
+ l = size_add(l, nla_total_size(sizeof(x->mapping_maxage)));
if (x->dir)
- l += nla_total_size(sizeof(x->dir));
+ l = size_add(l, nla_total_size(sizeof(x->dir)));
if (x->nat_keepalive_interval)
- l += nla_total_size(sizeof(x->nat_keepalive_interval));
+ l = size_add(l, nla_total_size(sizeof(x->nat_keepalive_interval)));
if (x->mode_cbs && x->mode_cbs->sa_len)
- l += x->mode_cbs->sa_len(x);
+ l = size_add(l, x->mode_cbs->sa_len(x));
return l;
}
@@ -3641,17 +3649,17 @@ static int xfrm_notify_sa(struct xfrm_state *x, const struct km_event *c)
struct xfrm_usersa_id *id;
struct nlmsghdr *nlh;
struct sk_buff *skb;
- unsigned int len = xfrm_sa_len(x);
+ size_t len = xfrm_sa_len(x);
unsigned int headlen;
int err;
headlen = sizeof(*p);
if (c->event == XFRM_MSG_DELSA) {
- len += nla_total_size(headlen);
+ len = size_add(len, nla_total_size(headlen));
headlen = sizeof(*id);
- len += nla_total_size(sizeof(struct xfrm_mark));
+ len = size_add(len, nla_total_size(sizeof(struct xfrm_mark)));
}
- len += NLMSG_ALIGN(headlen);
+ len = size_add(len, NLMSG_ALIGN(headlen));
skb = nlmsg_new(len, GFP_ATOMIC);
if (skb == NULL)
Powered by blists - more mailing lists