[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <Pine.LNX.4.64.0903282320271.15432@blonde.anvils>
Date: Sat, 28 Mar 2009 23:21:27 +0000 (GMT)
From: Hugh Dickins <hugh@...itas.com>
To: Al Viro <viro@...iv.linux.org.uk>
cc: Linus Torvalds <torvalds@...ux-foundation.org>,
Andrew Morton <akpm@...ux-foundation.org>,
Joe Malicki <jmalicki@...acarta.com>,
Michael Itz <mitz@...acarta.com>,
Kenneth Baker <bakerk@...acarta.com>,
Chris Wright <chrisw@...s-sol.org>,
David Howells <dhowells@...hat.com>,
Alexey Dobriyan <adobriyan@...il.com>,
Oleg Nesterov <oleg@...hat.com>,
Greg Kroah-Hartman <gregkh@...e.de>,
linux-fsdevel@...r.kernel.org, linux-kernel@...r.kernel.org
Subject: [PATCH 3/4] fix setuid sometimes wouldn't
check_unsafe_exec() also notes whether the fs_struct is being
shared by more threads than will get killed by the exec, and if so
sets LSM_UNSAFE_SHARE to make bprm_set_creds() careful about euid.
But /proc/<pid>/cwd and /proc/<pid>/root lookups make transient
use of get_fs_struct(), which also raises that sharing count.
This might occasionally cause a setuid program not to change euid,
in the same way as happened with files->count (check_unsafe_exec
also looks at sighand->count, but /proc doesn't raise that one).
We'd prefer exec not to unshare fs_struct: so fix this in procfs,
replacing get_fs_struct() by get_fs_path(), which does path_get
while still holding task_lock, instead of raising fs->count.
Signed-off-by: Hugh Dickins <hugh@...itas.com>
Cc: stable@...nel.org
___
fs/proc/base.c | 50 +++++++++++++++--------------------------------
1 file changed, 16 insertions(+), 34 deletions(-)
--- 2.6.29/fs/proc/base.c 2009-03-23 23:12:14.000000000 +0000
+++ linux/fs/proc/base.c 2009-03-28 18:06:02.000000000 +0000
@@ -146,15 +146,22 @@ static unsigned int pid_entry_count_dirs
return count;
}
-static struct fs_struct *get_fs_struct(struct task_struct *task)
+static int get_fs_path(struct task_struct *task, struct path *path, bool root)
{
struct fs_struct *fs;
+ int result = -ENOENT;
+
task_lock(task);
fs = task->fs;
- if(fs)
- atomic_inc(&fs->count);
+ if (fs) {
+ read_lock(&fs->lock);
+ *path = root ? fs->root : fs->pwd;
+ path_get(path);
+ read_unlock(&fs->lock);
+ result = 0;
+ }
task_unlock(task);
- return fs;
+ return result;
}
static int get_nr_threads(struct task_struct *tsk)
@@ -172,42 +179,24 @@ static int get_nr_threads(struct task_st
static int proc_cwd_link(struct inode *inode, struct path *path)
{
struct task_struct *task = get_proc_task(inode);
- struct fs_struct *fs = NULL;
int result = -ENOENT;
if (task) {
- fs = get_fs_struct(task);
+ result = get_fs_path(task, path, 0);
put_task_struct(task);
}
- if (fs) {
- read_lock(&fs->lock);
- *path = fs->pwd;
- path_get(&fs->pwd);
- read_unlock(&fs->lock);
- result = 0;
- put_fs_struct(fs);
- }
return result;
}
static int proc_root_link(struct inode *inode, struct path *path)
{
struct task_struct *task = get_proc_task(inode);
- struct fs_struct *fs = NULL;
int result = -ENOENT;
if (task) {
- fs = get_fs_struct(task);
+ result = get_fs_path(task, path, 1);
put_task_struct(task);
}
- if (fs) {
- read_lock(&fs->lock);
- *path = fs->root;
- path_get(&fs->root);
- read_unlock(&fs->lock);
- result = 0;
- put_fs_struct(fs);
- }
return result;
}
@@ -596,7 +585,6 @@ static int mounts_open_common(struct ino
struct task_struct *task = get_proc_task(inode);
struct nsproxy *nsp;
struct mnt_namespace *ns = NULL;
- struct fs_struct *fs = NULL;
struct path root;
struct proc_mounts *p;
int ret = -EINVAL;
@@ -610,22 +598,16 @@ static int mounts_open_common(struct ino
get_mnt_ns(ns);
}
rcu_read_unlock();
- if (ns)
- fs = get_fs_struct(task);
+ if (ns && get_fs_path(task, &root, 1) == 0)
+ ret = 0;
put_task_struct(task);
}
if (!ns)
goto err;
- if (!fs)
+ if (ret)
goto err_put_ns;
- read_lock(&fs->lock);
- root = fs->root;
- path_get(&root);
- read_unlock(&fs->lock);
- put_fs_struct(fs);
-
ret = -ENOMEM;
p = kmalloc(sizeof(struct proc_mounts), GFP_KERNEL);
if (!p)
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/
Powered by blists - more mailing lists