[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <CANn89iLGZj5MVG-sYpn_eyBTNT7JyunpYgv2aOsxGa9EkNV3Gw@mail.gmail.com>
Date: Thu, 28 Mar 2024 14:42:14 +0100
From: Eric Dumazet <edumazet@...gle.com>
To: Denis Kirjanov <kirjanov@...il.com>
Cc: netdev@...r.kernel.org, jgg@...pe.ca, leon@...nel.org,
Denis Kirjanov <dkirjanov@...e.de>, syzbot+5fe14f2ff4ccbace9a26@...kaller.appspotmail.com
Subject: Re: [PATCH v3 net] Subject: [PATCH] RDMA/core: fix UAF with ib_device_get_netdev()
On Thu, Mar 28, 2024 at 2:36 PM Denis Kirjanov <kirjanov@...il.com> wrote:
>
> A call to ib_device_get_netdev may lead to a race condition
> while accessing a netdevice instance since we don't hold
> the rtnl lock while checking
> the registration state:
> if (res && res->reg_state != NETREG_REGISTERED) {
>
> v2: unlock rtnl on error path
> v3: update remaining callers of ib_device_get_netdev
>
> Reported-by: syzbot+5fe14f2ff4ccbace9a26@...kaller.appspotmail.com
> Fixes: d41861942fc55 ("IB/core: Add generic function to extract IB speed from netdev")
> Signed-off-by: Denis Kirjanov <dkirjanov@...e.de>
> ---
> drivers/infiniband/core/cache.c | 2 ++
> drivers/infiniband/core/device.c | 15 ++++++++++++---
> drivers/infiniband/core/nldev.c | 2 ++
> drivers/infiniband/core/verbs.c | 6 ++++--
> 4 files changed, 20 insertions(+), 5 deletions(-)
>
> diff --git a/drivers/infiniband/core/cache.c b/drivers/infiniband/core/cache.c
> index c02a96d3572a..cf9c826cd520 100644
> --- a/drivers/infiniband/core/cache.c
> +++ b/drivers/infiniband/core/cache.c
> @@ -1461,7 +1461,9 @@ static int config_non_roce_gid_cache(struct ib_device *device,
> if (rdma_protocol_iwarp(device, port)) {
> struct net_device *ndev;
>
> + rtnl_lock();
> ndev = ib_device_get_netdev(device, port);
> + rtnl_unlock();
> if (!ndev)
> continue;
> RCU_INIT_POINTER(gid_attr.ndev, ndev);
> diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c
> index 07cb6c5ffda0..53074a4b04c9 100644
> --- a/drivers/infiniband/core/device.c
> +++ b/drivers/infiniband/core/device.c
> @@ -2026,9 +2026,12 @@ static int iw_query_port(struct ib_device *device,
>
> memset(port_attr, 0, sizeof(*port_attr));
>
> + rtnl_lock();
> netdev = ib_device_get_netdev(device, port_num);
> - if (!netdev)
> + if (!netdev) {
> + rtnl_unlock();
> return -ENODEV;
> + }
>
> port_attr->max_mtu = IB_MTU_4096;
> port_attr->active_mtu = ib_mtu_int_to_enum(netdev->mtu);
> @@ -2052,6 +2055,7 @@ static int iw_query_port(struct ib_device *device,
> rcu_read_unlock();
> }
>
> + rtnl_unlock();
> dev_put(netdev);
> return device->ops.query_port(device, port_num, port_attr);
> }
> @@ -2220,6 +2224,8 @@ struct net_device *ib_device_get_netdev(struct ib_device *ib_dev,
> struct ib_port_data *pdata;
> struct net_device *res;
>
> + ASSERT_RTNL();
> +
> if (!rdma_is_port_valid(ib_dev, port))
> return NULL;
>
> @@ -2306,12 +2312,15 @@ void ib_enum_roce_netdev(struct ib_device *ib_dev,
>
> rdma_for_each_port (ib_dev, port)
> if (rdma_protocol_roce(ib_dev, port)) {
> - struct net_device *idev =
> - ib_device_get_netdev(ib_dev, port);
> + struct net_device *idev;
> +
> + rtnl_lock();
> + idev = ib_device_get_netdev(ib_dev, port);
>
> if (filter(ib_dev, port, idev, filter_cookie))
> cb(ib_dev, port, idev, cookie);
>
> + rtnl_unlock();
> if (idev)
> dev_put(idev);
> }
> diff --git a/drivers/infiniband/core/nldev.c b/drivers/infiniband/core/nldev.c
> index 4900a0848124..cfa204a224f2 100644
> --- a/drivers/infiniband/core/nldev.c
> +++ b/drivers/infiniband/core/nldev.c
> @@ -360,6 +360,7 @@ static int fill_port_info(struct sk_buff *msg,
> if (nla_put_u8(msg, RDMA_NLDEV_ATTR_PORT_PHYS_STATE, attr.phys_state))
> return -EMSGSIZE;
>
> + rtnl_lock();
I am guessing rtnl is already held here.
Please double check all paths you are adding rtnl if this is not going
to deadlock.
> netdev = ib_device_get_netdev(device, port);
> if (netdev && net_eq(dev_net(netdev), net)) {
> ret = nla_put_u32(msg,
> @@ -371,6 +372,7 @@ static int fill_port_info(struct sk_buff *msg,
> }
>
>
Please wait one day before sending a new version, thanks.
Powered by blists - more mailing lists