[<prev] [next>] [day] [month] [year] [list]
Message-Id: <20210111130033.066979023@linuxfoundation.org>
Date: Mon, 11 Jan 2021 14:00:44 +0100
From: Greg Kroah-Hartman <gregkh@...uxfoundation.org>
To: linux-kernel@...r.kernel.org
Cc: Greg Kroah-Hartman <gregkh@...uxfoundation.org>,
stable@...r.kernel.org, Randy Dunlap <rdunlap@...radead.org>,
syzbot+97c5bd9cc81eca63d36e@...kaller.appspotmail.com,
Nogah Frankel <nogahf@...lanox.com>,
Jamal Hadi Salim <jhs@...atatu.com>,
Cong Wang <xiyou.wangcong@...il.com>,
Jiri Pirko <jiri@...nulli.us>, netdev@...r.kernel.org,
"David S. Miller" <davem@...emloft.net>,
Jakub Kicinski <kuba@...nel.org>
Subject: [PATCH 4.4 12/38] net: sched: prevent invalid Scell_log shift count
From: Randy Dunlap <rdunlap@...radead.org>
[ Upstream commit bd1248f1ddbc48b0c30565fce897a3b6423313b8 ]
Check Scell_log shift size in red_check_params() and modify all callers
of red_check_params() to pass Scell_log.
This prevents a shift out-of-bounds as detected by UBSAN:
UBSAN: shift-out-of-bounds in ./include/net/red.h:252:22
shift exponent 72 is too large for 32-bit type 'int'
Fixes: 8afa10cbe281 ("net_sched: red: Avoid illegal values")
Signed-off-by: Randy Dunlap <rdunlap@...radead.org>
Reported-by: syzbot+97c5bd9cc81eca63d36e@...kaller.appspotmail.com
Cc: Nogah Frankel <nogahf@...lanox.com>
Cc: Jamal Hadi Salim <jhs@...atatu.com>
Cc: Cong Wang <xiyou.wangcong@...il.com>
Cc: Jiri Pirko <jiri@...nulli.us>
Cc: netdev@...r.kernel.org
Cc: "David S. Miller" <davem@...emloft.net>
Cc: Jakub Kicinski <kuba@...nel.org>
Signed-off-by: David S. Miller <davem@...emloft.net>
Signed-off-by: Greg Kroah-Hartman <gregkh@...uxfoundation.org>
---
include/net/red.h | 4 +++-
net/sched/sch_choke.c | 2 +-
net/sched/sch_gred.c | 2 +-
net/sched/sch_red.c | 2 +-
net/sched/sch_sfq.c | 2 +-
5 files changed, 7 insertions(+), 5 deletions(-)
--- a/include/net/red.h
+++ b/include/net/red.h
@@ -167,12 +167,14 @@ static inline void red_set_vars(struct r
v->qcount = -1;
}
-static inline bool red_check_params(u32 qth_min, u32 qth_max, u8 Wlog)
+static inline bool red_check_params(u32 qth_min, u32 qth_max, u8 Wlog, u8 Scell_log)
{
if (fls(qth_min) + Wlog > 32)
return false;
if (fls(qth_max) + Wlog > 32)
return false;
+ if (Scell_log >= 32)
+ return false;
if (qth_max < qth_min)
return false;
return true;
--- a/net/sched/sch_choke.c
+++ b/net/sched/sch_choke.c
@@ -439,7 +439,7 @@ static int choke_change(struct Qdisc *sc
ctl = nla_data(tb[TCA_CHOKE_PARMS]);
- if (!red_check_params(ctl->qth_min, ctl->qth_max, ctl->Wlog))
+ if (!red_check_params(ctl->qth_min, ctl->qth_max, ctl->Wlog, ctl->Scell_log))
return -EINVAL;
if (ctl->limit > CHOKE_MAX_QUEUE)
--- a/net/sched/sch_gred.c
+++ b/net/sched/sch_gred.c
@@ -389,7 +389,7 @@ static inline int gred_change_vq(struct
struct gred_sched *table = qdisc_priv(sch);
struct gred_sched_data *q = table->tab[dp];
- if (!red_check_params(ctl->qth_min, ctl->qth_max, ctl->Wlog))
+ if (!red_check_params(ctl->qth_min, ctl->qth_max, ctl->Wlog, ctl->Scell_log))
return -EINVAL;
if (!q) {
--- a/net/sched/sch_red.c
+++ b/net/sched/sch_red.c
@@ -203,7 +203,7 @@ static int red_change(struct Qdisc *sch,
max_P = tb[TCA_RED_MAX_P] ? nla_get_u32(tb[TCA_RED_MAX_P]) : 0;
ctl = nla_data(tb[TCA_RED_PARMS]);
- if (!red_check_params(ctl->qth_min, ctl->qth_max, ctl->Wlog))
+ if (!red_check_params(ctl->qth_min, ctl->qth_max, ctl->Wlog, ctl->Scell_log))
return -EINVAL;
if (ctl->limit > 0) {
--- a/net/sched/sch_sfq.c
+++ b/net/sched/sch_sfq.c
@@ -645,7 +645,7 @@ static int sfq_change(struct Qdisc *sch,
}
if (ctl_v1 && !red_check_params(ctl_v1->qth_min, ctl_v1->qth_max,
- ctl_v1->Wlog))
+ ctl_v1->Wlog, ctl_v1->Scell_log))
return -EINVAL;
if (ctl_v1 && ctl_v1->qth_min) {
p = kmalloc(sizeof(*p), GFP_KERNEL);
Powered by blists - more mailing lists