[<prev] [next>] [<thread-prev] [day] [month] [year] [list]
Message-ID: <fc02b2d29ecb7e277d48afb340b01cdc684e2e5a.camel@kernel.org>
Date: Fri, 01 Apr 2022 11:23:52 -0400
From: Jeff Layton <jlayton@...nel.org>
To: Jakob Koschel <jakobkoschel@...il.com>
Cc: Ilya Dryomov <idryomov@...il.com>, ceph-devel@...r.kernel.org,
linux-kernel@...r.kernel.org, Mike Rapoport <rppt@...nel.org>,
Brian Johannesmeyer <bjohannesmeyer@...il.com>,
Cristiano Giuffrida <c.giuffrida@...nl>,
"Bos, H.J." <h.j.bos@...nl>
Subject: Re: [PATCH 2/2] ceph: replace usage of found with dedicated list
iterator variable
On Thu, 2022-03-31 at 23:53 +0200, Jakob Koschel wrote:
> To move the list iterator variable into the list_for_each_entry_*()
> macro in the future it should be avoided to use the list iterator
> variable after the loop body.
>
> To *never* use the list iterator variable after the loop it was
> concluded to use a separate iterator variable instead of a
> found boolean [1].
>
> This removes the need to use a found variable and simply checking if
> the variable was set, can determine if the break/goto was hit.
>
> Link: https://lore.kernel.org/all/CAHk-=wgRr_D8CB-D9Kg-c=EHreAsk5SqXPwr9Y7k9sA6cWXJ6w@mail.gmail.com/ [1]
> Signed-off-by: Jakob Koschel <jakobkoschel@...il.com>
> ---
> fs/ceph/caps.c | 32 +++++++++++++++-----------------
> 1 file changed, 15 insertions(+), 17 deletions(-)
>
> diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c
> index 519750bc5a1a..70f9c5ffa9b7 100644
> --- a/fs/ceph/caps.c
> +++ b/fs/ceph/caps.c
> @@ -3179,10 +3179,9 @@ void ceph_put_wrbuffer_cap_refs(struct ceph_inode_info *ci, int nr,
> struct ceph_snap_context *snapc)
> {
> struct inode *inode = &ci->vfs_inode;
> - struct ceph_cap_snap *capsnap = NULL;
> + struct ceph_cap_snap *capsnap = NULL, *iter;
> int put = 0;
> bool last = false;
> - bool found = false;
> bool flush_snaps = false;
> bool complete_capsnap = false;
>
> @@ -3209,14 +3208,14 @@ void ceph_put_wrbuffer_cap_refs(struct ceph_inode_info *ci, int nr,
> ci->i_wrbuffer_ref, ci->i_wrbuffer_ref_head,
> last ? " LAST" : "");
> } else {
> - list_for_each_entry(capsnap, &ci->i_cap_snaps, ci_item) {
> - if (capsnap->context == snapc) {
> - found = true;
> + list_for_each_entry(iter, &ci->i_cap_snaps, ci_item) {
> + if (iter->context == snapc) {
> + capsnap = iter;
> break;
> }
> }
>
> - if (!found) {
> + if (!capsnap) {
> /*
> * The capsnap should already be removed when removing
> * auth cap in the case of a forced unmount.
> @@ -3766,8 +3765,7 @@ static void handle_cap_flushsnap_ack(struct inode *inode, u64 flush_tid,
> struct ceph_inode_info *ci = ceph_inode(inode);
> struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc;
> u64 follows = le64_to_cpu(m->snap_follows);
> - struct ceph_cap_snap *capsnap;
> - bool flushed = false;
> + struct ceph_cap_snap *capsnap = NULL, *iter;
> bool wake_ci = false;
> bool wake_mdsc = false;
>
> @@ -3775,26 +3773,26 @@ static void handle_cap_flushsnap_ack(struct inode *inode, u64 flush_tid,
> inode, ci, session->s_mds, follows);
>
> spin_lock(&ci->i_ceph_lock);
> - list_for_each_entry(capsnap, &ci->i_cap_snaps, ci_item) {
> - if (capsnap->follows == follows) {
> - if (capsnap->cap_flush.tid != flush_tid) {
> + list_for_each_entry(iter, &ci->i_cap_snaps, ci_item) {
> + if (iter->follows == follows) {
> + if (iter->cap_flush.tid != flush_tid) {
> dout(" cap_snap %p follows %lld tid %lld !="
> - " %lld\n", capsnap, follows,
> - flush_tid, capsnap->cap_flush.tid);
> + " %lld\n", iter, follows,
> + flush_tid, iter->cap_flush.tid);
> break;
> }
> - flushed = true;
> + capsnap = iter;
> break;
> } else {
> dout(" skipping cap_snap %p follows %lld\n",
> - capsnap, capsnap->follows);
> + iter, iter->follows);
> }
> }
> - if (flushed)
> + if (capsnap)
> ceph_remove_capsnap(inode, capsnap, &wake_ci, &wake_mdsc);
> spin_unlock(&ci->i_ceph_lock);
>
> - if (flushed) {
> + if (capsnap) {
> ceph_put_snap_context(capsnap->context);
> ceph_put_cap_snap(capsnap);
> if (wake_ci)
Thanks Jakob,
Both patches look fine. Applied to the ceph-client/testing branch. This
should make v5.19 if there are no issues.
Thanks!
--
Jeff Layton <jlayton@...nel.org>
Powered by blists - more mailing lists