[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <625acc9e-b871-4912-965e-82fe3f9228d7@linux.alibaba.com>
Date: Mon, 6 May 2024 11:09:06 +0800
From: Jingbo Xu <jefflexu@...ux.alibaba.com>
To: libaokun@...weicloud.com, netfs@...ts.linux.dev
Cc: dhowells@...hat.com, jlayton@...nel.org, zhujia.zj@...edance.com,
linux-erofs@...ts.ozlabs.org, linux-fsdevel@...r.kernel.org,
linux-kernel@...r.kernel.org, Baokun Li <libaokun1@...wei.com>
Subject: Re: [PATCH 08/12] cachefiles: never get a new anon fd if ondemand_id
is valid
On 4/24/24 11:39 AM, libaokun@...weicloud.com wrote:
> From: Baokun Li <libaokun1@...wei.com>
>
> Now every time the daemon reads an open request, it requests a new anon fd
> and ondemand_id. With the introduction of "restore", it is possible to read
> the same open request more than once, and therefore have multiple anon fd's
> for the same object.
>
> To avoid this, allocate a new anon fd only if no anon fd has been allocated
> (ondemand_id == 0) or if the previously allocated anon fd has been closed
> (ondemand_id == -1). Returns an error if ondemand_id is valid, letting the
> daemon know that the current userland restore logic is abnormal and needs
> to be checked.
I have no obvious preference on strengthening this on kernel side or
not. Could you explain more about what will happen if the daemon gets
several distinct anon fd corresponding to one same object? IMHO the
daemon should expect the side effect if it issues a 'restore' command
when the daemon doesn't crash. IOW, it's something that shall be fixed
or managed either on the kernel side, or on the daemon side.
> ---
> fs/cachefiles/ondemand.c | 34 ++++++++++++++++++++++++++++------
> 1 file changed, 28 insertions(+), 6 deletions(-)
>
> diff --git a/fs/cachefiles/ondemand.c b/fs/cachefiles/ondemand.c
> index b5e6a851ef04..0cf63bfedc9e 100644
> --- a/fs/cachefiles/ondemand.c
> +++ b/fs/cachefiles/ondemand.c
> @@ -14,11 +14,18 @@ static int cachefiles_ondemand_fd_release(struct inode *inode,
> struct file *file)
> {
> struct cachefiles_object *object = file->private_data;
> - struct cachefiles_cache *cache = object->volume->cache;
> - struct cachefiles_ondemand_info *info = object->ondemand;
> + struct cachefiles_cache *cache;
> + struct cachefiles_ondemand_info *info;
> int object_id;
> struct cachefiles_req *req;
> - XA_STATE(xas, &cache->reqs, 0);
> + XA_STATE(xas, NULL, 0);
> +
> + if (!object)
> + return 0;
> +
> + info = object->ondemand;
> + cache = object->volume->cache;
> + xas.xa = &cache->reqs;
>
> xa_lock(&cache->reqs);
> spin_lock(&info->lock);
> @@ -269,22 +276,39 @@ static int cachefiles_ondemand_get_fd(struct cachefiles_req *req)
> goto err_put_fd;
> }
>
> + spin_lock(&object->ondemand->lock);
> + if (object->ondemand->ondemand_id > 0) {
> + spin_unlock(&object->ondemand->lock);
> + ret = -EEXIST;
> + /* Avoid performing cachefiles_ondemand_fd_release(). */
> + file->private_data = NULL;
> + goto err_put_file;
> + }
> +
> file->f_mode |= FMODE_PWRITE | FMODE_LSEEK;
> fd_install(fd, file);
>
> load = (void *)req->msg.data;
> load->fd = fd;
> object->ondemand->ondemand_id = object_id;
> + spin_unlock(&object->ondemand->lock);
>
> cachefiles_get_unbind_pincount(cache);
> trace_cachefiles_ondemand_open(object, &req->msg, load);
> return 0;
>
> +err_put_file:
> + fput(file);
> err_put_fd:
> put_unused_fd(fd);
> err_free_id:
> xa_erase(&cache->ondemand_ids, object_id);
> err:
> + spin_lock(&object->ondemand->lock);
> + /* Avoid marking an opened object as closed. */
> + if (object->ondemand->ondemand_id <= 0)
> + cachefiles_ondemand_set_object_close(object);
> + spin_unlock(&object->ondemand->lock);
> cachefiles_put_object(object, cachefiles_obj_put_ondemand_fd);
> return ret;
> }
> @@ -367,10 +391,8 @@ ssize_t cachefiles_ondemand_daemon_read(struct cachefiles_cache *cache,
>
> if (msg->opcode == CACHEFILES_OP_OPEN) {
> ret = cachefiles_ondemand_get_fd(req);
> - if (ret) {
> - cachefiles_ondemand_set_object_close(req->object);
> + if (ret)
> goto out;
> - }
> }
>
> msg->msg_id = xas.xa_index;
--
Thanks,
Jingbo
Powered by blists - more mailing lists