[<prev] [next>] [thread-next>] [day] [month] [year] [list]
Message-ID: <20220812093412.808351-1-sunsuwan3@huawei.com>
Date: Fri, 12 Aug 2022 17:34:12 +0800
From: sunsuwan <sunsuwan3@...wei.com>
To: <horms@...ge.net.au>, <ja@....bg>, <pablo@...filter.org>,
<kadlec@...filter.org>, <netdev@...r.kernel.org>,
<lvs-devel@...r.kernel.org>
CC: <chenzhen126@...wei.com>, <yanan@...wei.com>,
<liaichun@...wei.com>, <caowangbao@...wei.com>,
<sunsuwan3@...wei.com>
Subject: [PATCH] net:ipvs: add rcu read lock in some parts
We founf a possible UAF if rmmod pe_sid or schedule,
when packages in hook and get pe or sched.
Signed-off-by: sunsuwan <sunsuwan3@...wei.com>
Signed-off-by: chenzhen <chenzhen126@...wei.com>
---
net/netfilter/ipvs/ip_vs_core.c | 6 ++++++
net/netfilter/ipvs/ip_vs_ctl.c | 3 +++
net/netfilter/ipvs/ip_vs_dh.c | 2 ++
3 files changed, 11 insertions(+)
diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
index 51ad557a525b..d289f184d5c1 100644
--- a/net/netfilter/ipvs/ip_vs_core.c
+++ b/net/netfilter/ipvs/ip_vs_core.c
@@ -235,7 +235,9 @@ ip_vs_conn_fill_param_persist(const struct ip_vs_service *svc,
{
ip_vs_conn_fill_param(svc->ipvs, svc->af, protocol, caddr, cport, vaddr,
vport, p);
+ rcu_read_lock();
p->pe = rcu_dereference(svc->pe);
+ rcu_read_unlock();
if (p->pe && p->pe->fill_param)
return p->pe->fill_param(p, skb);
@@ -346,7 +348,9 @@ ip_vs_sched_persist(struct ip_vs_service *svc,
* template is not available.
* return *ignored=0 i.e. ICMP and NF_DROP
*/
+ rcu_read_lock();
sched = rcu_dereference(svc->scheduler);
+ rcu_read_unlock();
if (sched) {
/* read svc->sched_data after svc->scheduler */
smp_rmb();
@@ -521,7 +525,9 @@ ip_vs_schedule(struct ip_vs_service *svc, struct sk_buff *skb,
return NULL;
}
+ rcu_read_lock();
sched = rcu_dereference(svc->scheduler);
+ rcu_read_unlock();
if (sched) {
/* read svc->sched_data after svc->scheduler */
smp_rmb();
diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
index efab2b06d373..91e568028001 100644
--- a/net/netfilter/ipvs/ip_vs_ctl.c
+++ b/net/netfilter/ipvs/ip_vs_ctl.c
@@ -580,6 +580,7 @@ bool ip_vs_has_real_service(struct netns_ipvs *ipvs, int af, __u16 protocol,
/* Check for "full" addressed entries */
hash = ip_vs_rs_hashkey(af, daddr, dport);
+ rcu_read_lock();
hlist_for_each_entry_rcu(dest, &ipvs->rs_table[hash], d_list) {
if (dest->port == dport &&
dest->af == af &&
@@ -587,9 +588,11 @@ bool ip_vs_has_real_service(struct netns_ipvs *ipvs, int af, __u16 protocol,
(dest->protocol == protocol || dest->vfwmark) &&
IP_VS_DFWD_METHOD(dest) == IP_VS_CONN_F_MASQ) {
/* HIT */
+ rcu_read_unlock();
return true;
}
}
+ rcu_read_unlock();
return false;
}
diff --git a/net/netfilter/ipvs/ip_vs_dh.c b/net/netfilter/ipvs/ip_vs_dh.c
index 5e6ec32aff2b..3e4b9607172b 100644
--- a/net/netfilter/ipvs/ip_vs_dh.c
+++ b/net/netfilter/ipvs/ip_vs_dh.c
@@ -219,7 +219,9 @@ ip_vs_dh_schedule(struct ip_vs_service *svc, const struct sk_buff *skb,
IP_VS_DBG(6, "%s(): Scheduling...\n", __func__);
s = (struct ip_vs_dh_state *) svc->sched_data;
+ rcu_read_lock();
dest = ip_vs_dh_get(svc->af, s, &iph->daddr);
+ rcu_read_unlock();
if (!dest
|| !(dest->flags & IP_VS_DEST_F_AVAILABLE)
|| atomic_read(&dest->weight) <= 0
--
2.30.0
Powered by blists - more mailing lists