[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <1503654092-15471-2-git-send-email-elena.reshetova@intel.com>
Date: Fri, 25 Aug 2017 12:41:18 +0300
From: Elena Reshetova <elena.reshetova@...el.com>
To: linux-kernel@...r.kernel.org
Cc: linux-fsdevel@...r.kernel.org, peterz@...radead.org,
gregkh@...uxfoundation.org, viro@...iv.linux.org.uk, tj@...nel.org,
mingo@...hat.com, hannes@...xchg.org, lizefan@...wei.com,
acme@...nel.org, alexander.shishkin@...ux.intel.com,
eparis@...hat.com, akpm@...ux-foundation.org, arnd@...db.de,
luto@...nel.org, keescook@...omium.org, tglx@...utronix.de,
dvhart@...radead.org, ebiederm@...ssion.com,
Elena Reshetova <elena.reshetova@...el.com>
Subject: [PATCH 01/15] sched: convert sighand_struct.count to refcount_t
atomic_t variables are currently used to implement reference
counters with the following properties:
- counter is initialized to 1 using atomic_set()
- a resource is freed upon counter reaching zero
- once counter reaches zero, its further
increments aren't allowed
- counter schema uses basic atomic operations
(set, inc, inc_not_zero, dec_and_test, etc.)
Such atomic variables should be converted to a newly provided
refcount_t type and API that prevents accidental counter overflows
and underflows. This is important since overflows and underflows
can lead to use-after-free situation and be exploitable.
The variable sighand_struct.count is used as pure reference counter.
Convert it to refcount_t and fix up the operations.
Suggested-by: Kees Cook <keescook@...omium.org>
Reviewed-by: David Windsor <dwindsor@...il.com>
Reviewed-by: Hans Liljestrand <ishkamiel@...il.com>
Signed-off-by: Elena Reshetova <elena.reshetova@...el.com>
---
fs/exec.c | 4 ++--
fs/proc/task_nommu.c | 2 +-
include/linux/init_task.h | 2 +-
include/linux/sched/signal.h | 3 ++-
kernel/fork.c | 8 ++++----
5 files changed, 10 insertions(+), 9 deletions(-)
diff --git a/fs/exec.c b/fs/exec.c
index daa19d8..5c8960d 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -1200,7 +1200,7 @@ static int de_thread(struct task_struct *tsk)
flush_itimer_signals();
#endif
- if (atomic_read(&oldsighand->count) != 1) {
+ if (refcount_read(&oldsighand->count) != 1) {
struct sighand_struct *newsighand;
/*
* This ->sighand is shared with the CLONE_SIGHAND
@@ -1210,7 +1210,7 @@ static int de_thread(struct task_struct *tsk)
if (!newsighand)
return -ENOMEM;
- atomic_set(&newsighand->count, 1);
+ refcount_set(&newsighand->count, 1);
memcpy(newsighand->action, oldsighand->action,
sizeof(newsighand->action));
diff --git a/fs/proc/task_nommu.c b/fs/proc/task_nommu.c
index 8652209..5038b6b 100644
--- a/fs/proc/task_nommu.c
+++ b/fs/proc/task_nommu.c
@@ -63,7 +63,7 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
else
bytes += kobjsize(current->files);
- if (current->sighand && atomic_read(¤t->sighand->count) > 1)
+ if (current->sighand && refcount_read(¤t->sighand->count) > 1)
sbytes += kobjsize(current->sighand);
else
bytes += kobjsize(current->sighand);
diff --git a/include/linux/init_task.h b/include/linux/init_task.h
index 3c07ace..6c6f520 100644
--- a/include/linux/init_task.h
+++ b/include/linux/init_task.h
@@ -85,7 +85,7 @@ extern struct fs_struct init_fs;
extern struct nsproxy init_nsproxy;
#define INIT_SIGHAND(sighand) { \
- .count = ATOMIC_INIT(1), \
+ .count = REFCOUNT_INIT(1), \
.action = { { { .sa_handler = SIG_DFL, } }, }, \
.siglock = __SPIN_LOCK_UNLOCKED(sighand.siglock), \
.signalfd_wqh = __WAIT_QUEUE_HEAD_INITIALIZER(sighand.signalfd_wqh), \
diff --git a/include/linux/sched/signal.h b/include/linux/sched/signal.h
index 2a0dd40..4d5cdf1 100644
--- a/include/linux/sched/signal.h
+++ b/include/linux/sched/signal.h
@@ -7,13 +7,14 @@
#include <linux/sched/jobctl.h>
#include <linux/sched/task.h>
#include <linux/cred.h>
+#include <linux/refcount.h>
/*
* Types defining task->signal and task->sighand and APIs using them:
*/
struct sighand_struct {
- atomic_t count;
+ refcount_t count;
struct k_sigaction action[_NSIG];
spinlock_t siglock;
wait_queue_head_t signalfd_wqh;
diff --git a/kernel/fork.c b/kernel/fork.c
index 8004fc7..43bcd20 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -1320,7 +1320,7 @@ static int copy_sighand(unsigned long clone_flags, struct task_struct *tsk)
struct sighand_struct *sig;
if (clone_flags & CLONE_SIGHAND) {
- atomic_inc(¤t->sighand->count);
+ refcount_inc(¤t->sighand->count);
return 0;
}
sig = kmem_cache_alloc(sighand_cachep, GFP_KERNEL);
@@ -1328,14 +1328,14 @@ static int copy_sighand(unsigned long clone_flags, struct task_struct *tsk)
if (!sig)
return -ENOMEM;
- atomic_set(&sig->count, 1);
+ refcount_set(&sig->count, 1);
memcpy(sig->action, current->sighand->action, sizeof(sig->action));
return 0;
}
void __cleanup_sighand(struct sighand_struct *sighand)
{
- if (atomic_dec_and_test(&sighand->count)) {
+ if (refcount_dec_and_test(&sighand->count)) {
signalfd_cleanup(sighand);
/*
* sighand_cachep is SLAB_TYPESAFE_BY_RCU so we can free it
@@ -2236,7 +2236,7 @@ static int check_unshare_flags(unsigned long unshare_flags)
return -EINVAL;
}
if (unshare_flags & (CLONE_SIGHAND | CLONE_VM)) {
- if (atomic_read(¤t->sighand->count) > 1)
+ if (refcount_read(¤t->sighand->count) > 1)
return -EINVAL;
}
if (unshare_flags & CLONE_VM) {
--
2.7.4
Powered by blists - more mailing lists