lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [day] [month] [year] [list]
Message-ID: <CAOi1vP8X+aQXPMHYE6J=GvmE4_EiM8r6fb_Fx0Lw=-9XPKhvGQ@mail.gmail.com>
Date: Sun, 7 Dec 2025 18:02:14 +0100
From: Ilya Dryomov <idryomov@...il.com>
To: Haoxiang Li <lihaoxiang@...c.iscas.ac.cn>
Cc: xiubli@...hat.com, zyan@...hat.com, ceph-devel@...r.kernel.org, 
	linux-kernel@...r.kernel.org, stable@...r.kernel.org
Subject: Re: [PATCH] ceph: Drop the string reference in __ceph_pool_perm_get()

On Sat, Dec 6, 2025 at 8:29 AM Haoxiang Li <lihaoxiang@...c.iscas.ac.cn> wrote:
>
> After calling ceph_get_string(), ceph_put_string() is required
> to drop the reference in error paths.

Hi Haoxiang,

I think the reference is moved to the OSD request (i.e. rd_req).  It's
dropped when the OSD request is destroyed in ceph_osdc_release_request()
via target_destroy() and ceph_oloc_destroy().  Do you see evidence of
the opposite?

Thanks,

                Ilya

>
> Fixes: 779fe0fb8e18 ("ceph: rados pool namespace support")
> Cc: stable@...r.kernel.org
> Signed-off-by: Haoxiang Li <lihaoxiang@...c.iscas.ac.cn>
> ---
>  fs/ceph/addr.c | 16 +++++++++-------
>  1 file changed, 9 insertions(+), 7 deletions(-)
>
> diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c
> index 322ed268f14a..690a54b4c316 100644
> --- a/fs/ceph/addr.c
> +++ b/fs/ceph/addr.c
> @@ -2440,13 +2440,13 @@ static int __ceph_pool_perm_get(struct ceph_inode_info *ci,
>
>         err = ceph_osdc_alloc_messages(rd_req, GFP_NOFS);
>         if (err)
> -               goto out_unlock;
> +               goto put_string;
>
>         wr_req = ceph_osdc_alloc_request(&fsc->client->osdc, NULL,
>                                          1, false, GFP_NOFS);
>         if (!wr_req) {
>                 err = -ENOMEM;
> -               goto out_unlock;
> +               goto put_string;
>         }
>
>         wr_req->r_flags = CEPH_OSD_FLAG_WRITE;
> @@ -2456,13 +2456,13 @@ static int __ceph_pool_perm_get(struct ceph_inode_info *ci,
>
>         err = ceph_osdc_alloc_messages(wr_req, GFP_NOFS);
>         if (err)
> -               goto out_unlock;
> +               goto put_string;
>
>         /* one page should be large enough for STAT data */
>         pages = ceph_alloc_page_vector(1, GFP_KERNEL);
>         if (IS_ERR(pages)) {
>                 err = PTR_ERR(pages);
> -               goto out_unlock;
> +               goto put_string;
>         }
>
>         osd_req_op_raw_data_in_pages(rd_req, 0, pages, PAGE_SIZE,
> @@ -2480,7 +2480,7 @@ static int __ceph_pool_perm_get(struct ceph_inode_info *ci,
>         else if (err != -EPERM) {
>                 if (err == -EBLOCKLISTED)
>                         fsc->blocklisted = true;
> -               goto out_unlock;
> +               goto put_string;
>         }
>
>         if (err2 == 0 || err2 == -EEXIST)
> @@ -2489,14 +2489,14 @@ static int __ceph_pool_perm_get(struct ceph_inode_info *ci,
>                 if (err2 == -EBLOCKLISTED)
>                         fsc->blocklisted = true;
>                 err = err2;
> -               goto out_unlock;
> +               goto put_string;
>         }
>
>         pool_ns_len = pool_ns ? pool_ns->len : 0;
>         perm = kmalloc(struct_size(perm, pool_ns, pool_ns_len + 1), GFP_NOFS);
>         if (!perm) {
>                 err = -ENOMEM;
> -               goto out_unlock;
> +               goto put_string;
>         }
>
>         perm->pool = pool;
> @@ -2509,6 +2509,8 @@ static int __ceph_pool_perm_get(struct ceph_inode_info *ci,
>         rb_link_node(&perm->node, parent, p);
>         rb_insert_color(&perm->node, &mdsc->pool_perm_tree);
>         err = 0;
> +put_string:
> +       ceph_put_string(rd_req->r_base_oloc.pool_ns);
>  out_unlock:
>         up_write(&mdsc->pool_perm_rwsem);
>
> --
> 2.25.1
>

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ