lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date: Tue, 11 Jun 2024 10:11:44 +0800
From: Xiubo Li <xiubli@...hat.com>
To: Max Kellermann <max.kellermann@...os.com>, idryomov@...il.com,
 ceph-devel@...r.kernel.org
Cc: linux-kernel@...r.kernel.org
Subject: Re: [PATCH] fs/ceph/mds_client: use cap_wait_list only if debugfs is
 enabled


On 6/7/24 00:41, Max Kellermann wrote:
> Only debugfs uses this list.  By omitting it, we save some memory and
> reduce lock contention on `caps_list_lock`.
>
> Signed-off-by: Max Kellermann <max.kellermann@...os.com>
> ---
>   fs/ceph/caps.c       | 6 ++++++
>   fs/ceph/mds_client.c | 2 ++
>   fs/ceph/mds_client.h | 6 ++++++
>   3 files changed, 14 insertions(+)
>
> diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c
> index c4941ba245ac..772879aa26ee 100644
> --- a/fs/ceph/caps.c
> +++ b/fs/ceph/caps.c
> @@ -3067,10 +3067,13 @@ int __ceph_get_caps(struct inode *inode, struct ceph_file_info *fi, int need,
>   				       flags, &_got);
>   		WARN_ON_ONCE(ret == -EAGAIN);
>   		if (!ret) {
> +#ifdef CONFIG_DEBUG_FS
>   			struct ceph_mds_client *mdsc = fsc->mdsc;
>   			struct cap_wait cw;
> +#endif
>   			DEFINE_WAIT_FUNC(wait, woken_wake_function);
>   
> +#ifdef CONFIG_DEBUG_FS
>   			cw.ino = ceph_ino(inode);
>   			cw.tgid = current->tgid;
>   			cw.need = need;
> @@ -3079,6 +3082,7 @@ int __ceph_get_caps(struct inode *inode, struct ceph_file_info *fi, int need,
>   			spin_lock(&mdsc->caps_list_lock);
>   			list_add(&cw.list, &mdsc->cap_wait_list);
>   			spin_unlock(&mdsc->caps_list_lock);
> +#endif // CONFIG_DEBUG_FS
>   
>   			/* make sure used fmode not timeout */
>   			ceph_get_fmode(ci, flags, FMODE_WAIT_BIAS);
> @@ -3097,9 +3101,11 @@ int __ceph_get_caps(struct inode *inode, struct ceph_file_info *fi, int need,
>   			remove_wait_queue(&ci->i_cap_wq, &wait);
>   			ceph_put_fmode(ci, flags, FMODE_WAIT_BIAS);
>   
> +#ifdef CONFIG_DEBUG_FS
>   			spin_lock(&mdsc->caps_list_lock);
>   			list_del(&cw.list);
>   			spin_unlock(&mdsc->caps_list_lock);
> +#endif
>   
>   			if (ret == -EAGAIN)
>   				continue;
> diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c
> index c2157f6e0c69..62238f3e6e19 100644
> --- a/fs/ceph/mds_client.c
> +++ b/fs/ceph/mds_client.c
> @@ -5505,7 +5505,9 @@ int ceph_mdsc_init(struct ceph_fs_client *fsc)
>   	INIT_DELAYED_WORK(&mdsc->delayed_work, delayed_work);
>   	mdsc->last_renew_caps = jiffies;
>   	INIT_LIST_HEAD(&mdsc->cap_delay_list);
> +#ifdef CONFIG_DEBUG_FS
>   	INIT_LIST_HEAD(&mdsc->cap_wait_list);
> +#endif
>   	spin_lock_init(&mdsc->cap_delay_lock);
>   	INIT_LIST_HEAD(&mdsc->cap_unlink_delay_list);
>   	INIT_LIST_HEAD(&mdsc->snap_flush_list);
> diff --git a/fs/ceph/mds_client.h b/fs/ceph/mds_client.h
> index cfa18cf915a0..13dd83f783ec 100644
> --- a/fs/ceph/mds_client.h
> +++ b/fs/ceph/mds_client.h
> @@ -416,6 +416,8 @@ struct ceph_quotarealm_inode {
>   	struct inode *inode;
>   };
>   
> +#ifdef CONFIG_DEBUG_FS
> +
>   struct cap_wait {
>   	struct list_head	list;
>   	u64			ino;
> @@ -424,6 +426,8 @@ struct cap_wait {
>   	int			want;
>   };
>   
> +#endif // CONFIG_DEBUG_FS
> +
>   enum {
>   	CEPH_MDSC_STOPPING_BEGIN = 1,
>   	CEPH_MDSC_STOPPING_FLUSHING = 2,
> @@ -512,7 +516,9 @@ struct ceph_mds_client {
>   	spinlock_t	caps_list_lock;
>   	struct		list_head caps_list; /* unused (reserved or
>   						unreserved) */
> +#ifdef CONFIG_DEBUG_FS
>   	struct		list_head cap_wait_list;
> +#endif
>   	int		caps_total_count;    /* total caps allocated */
>   	int		caps_use_count;      /* in use */
>   	int		caps_use_max;	     /* max used caps */
Reviewed-by: Xiubo Li <xiubli@...hat.com>


Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ