[<prev] [next>] [<thread-prev] [day] [month] [year] [list]
Message-Id: <20180830172423.21964-3-jlayton@kernel.org>
Date: Thu, 30 Aug 2018 13:24:22 -0400
From: Jeff Layton <jlayton@...nel.org>
To: Alexander Viro <viro@...iv.linux.org.uk>
Cc: "Eric W. Biederman" <ebiederm@...ssion.com>,
Daniel P . Berrangé <berrange@...hat.com>,
linux-fsdevel@...r.kernel.org, linux-kernel@...r.kernel.org
Subject: [PATCH v2 2/3] exec: delay clone(CLONE_FILES) if task associated with current files_struct is exec'ing
In a later patch, we're going to move the unshare_files call in
__do_execve_file to later in the process, but this opens up a potential
race with clone(CLONE_FILES). We could end up bumping the refcount on
the files_struct after we've checked to see if it was shared. What we
really want to do in that case is have the clone return -EAGAIN, much
like the handling of the similar situation in copy_fs.
Add a new in_exec boolean to files_struct that is protected by the
file_lock. Set it if the files_struct turns out not to be shared, and
clear at the end of __do_execve_file. If it's set when we go to
copy_process, then just return -EAGAIN.
Cc: Eric W. Biederman <ebiederm@...ssion.com>
Signed-off-by: Jeff Layton <jlayton@...nel.org>
---
fs/exec.c | 14 ++++++++++++--
fs/file.c | 1 +
include/linux/fdtable.h | 1 +
kernel/fork.c | 21 ++++++++++++++-------
4 files changed, 28 insertions(+), 9 deletions(-)
diff --git a/fs/exec.c b/fs/exec.c
index 1ebf6e5a521d..ca25f805ebad 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -1831,8 +1831,13 @@ static int __do_execve_file(int fd, struct filename *filename,
kfree(pathbuf);
if (filename)
putname(filename);
- if (displaced)
+ if (displaced) {
put_files_struct(displaced);
+ } else {
+ spin_lock(¤t->files->file_lock);
+ current->files->in_exec = false;
+ spin_unlock(¤t->files->file_lock);
+ }
return retval;
out:
@@ -1850,8 +1855,13 @@ static int __do_execve_file(int fd, struct filename *filename,
kfree(pathbuf);
out_files:
- if (displaced)
+ if (displaced) {
reset_files_struct(displaced);
+ } else {
+ spin_lock(¤t->files->file_lock);
+ current->files->in_exec = false;
+ spin_unlock(¤t->files->file_lock);
+ }
out_ret:
if (filename)
putname(filename);
diff --git a/fs/file.c b/fs/file.c
index 42e0c8448b20..dc5de1ec5395 100644
--- a/fs/file.c
+++ b/fs/file.c
@@ -286,6 +286,7 @@ struct files_struct *dup_fd(struct files_struct *oldf, int *errorp)
spin_lock_init(&newf->file_lock);
newf->thread_count = 1;
newf->resize_in_progress = false;
+ newf->in_exec = false;
init_waitqueue_head(&newf->resize_wait);
newf->next_fd = 0;
new_fdt = &newf->fdtab;
diff --git a/include/linux/fdtable.h b/include/linux/fdtable.h
index 213ec1430ba4..310454db9049 100644
--- a/include/linux/fdtable.h
+++ b/include/linux/fdtable.h
@@ -51,6 +51,7 @@ struct files_struct {
*/
atomic_t count;
bool resize_in_progress;
+ bool in_exec;
wait_queue_head_t resize_wait;
struct fdtable __rcu *fdt;
diff --git a/kernel/fork.c b/kernel/fork.c
index 82799544b646..d296dc501c0c 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -1373,10 +1373,15 @@ static int copy_files(unsigned long clone_flags, struct task_struct *tsk)
goto out;
if (clone_flags & CLONE_FILES) {
- atomic_inc(&oldf->count);
spin_lock(&oldf->file_lock);
- oldf->thread_count++;
- spin_unlock(&oldf->file_lock);
+ if (oldf->in_exec) {
+ spin_unlock(&oldf->file_lock);
+ error = -EAGAIN;
+ } else {
+ oldf->thread_count++;
+ spin_unlock(&oldf->file_lock);
+ atomic_inc(&oldf->count);
+ }
goto out;
}
@@ -2420,18 +2425,20 @@ static int unshare_fs(unsigned long unshare_flags, struct fs_struct **new_fsp)
static int unshare_fd(unsigned long unshare_flags, struct files_struct **new_fdp)
{
struct files_struct *fd = current->files;
- int error, count;
+ int error;
if (!(unshare_flags & CLONE_FILES) || !fd)
return 0;
spin_lock(&fd->file_lock);
- count = fd->thread_count;
- spin_unlock(&fd->file_lock);
- if (count > 1) {
+ if (fd->thread_count > 1) {
+ spin_unlock(&fd->file_lock);
*new_fdp = dup_fd(fd, &error);
if (!*new_fdp)
return error;
+ } else {
+ fd->in_exec = true;
+ spin_unlock(&fd->file_lock);
}
return 0;
--
2.17.1
Powered by blists - more mailing lists