[<prev] [next>] [<thread-prev] [day] [month] [year] [list]
Message-Id: <20180704144554.GK3593@linux.vnet.ibm.com>
Date: Wed, 4 Jul 2018 07:45:54 -0700
From: "Paul E. McKenney" <paulmck@...ux.vnet.ibm.com>
To: Manfred Spraul <manfred@...orfullife.com>
Cc: LKML <linux-kernel@...r.kernel.org>,
Davidlohr Bueso <dave@...olabs.net>, 1vier1@....de,
Andrew Morton <akpm@...ux-foundation.org>,
Kees Cook <keescook@...omium.org>
Subject: Re: [RFC] ipc: refcounting / use after free?
On Wed, Jul 04, 2018 at 01:53:05PM +0200, Manfred Spraul wrote:
> The ipc code uses the equivalent of
>
> rcu_read_lock();
> kfree_rcu(a, rcu);
> if (a->deleted) {
> rcu_read_unlock();
> return FAILURE;
> }
> <...>
>
> Is this safe, or is dereferencing "a" after having called call_rcu()
> a use-after-free?
This is safe because it is being done before the rcu_read_unlock().
> According to rcupdate.h, the kfree is only deferred until the
> other CPUs exit their critical sections:
>
> include/linux/rcupdate.h:
> > * Similarly, if call_rcu() is invoked
> > * on one CPU while other CPUs are within RCU read-side critical
> > * sections, invocation of the corresponding RCU callback is deferred
> > * until after the all the other CPUs exit their critical sections.
This is true, but so are the words that appear earlier in that comment:
* The callback function will be invoked some time after a full grace
* period elapses, in other words after all pre-existing RCU read-side
* critical sections have completed.
In the above case, the kfree_rcu() is executed within a pre-existing
RCU read-side critical section, so the memory cannot be freed until
the rcu_read_lock() is reached.
Thanx, Paul
> <The patch is for review, not fully tested>
> ---
> ipc/msg.c | 11 ++++++++---
> ipc/sem.c | 42 ++++++++++++++++++++++++++++++------------
> ipc/util.c | 35 ++++++++++++++++++++++++++++++++---
> ipc/util.h | 18 ++++++++++++++++--
> 4 files changed, 86 insertions(+), 20 deletions(-)
>
> diff --git a/ipc/msg.c b/ipc/msg.c
> index 3b6545302598..724000c15296 100644
> --- a/ipc/msg.c
> +++ b/ipc/msg.c
> @@ -805,7 +805,7 @@ static long do_msgsnd(int msqid, long mtype, void __user *mtext,
> msq = msq_obtain_object_check(ns, msqid);
> if (IS_ERR(msq)) {
> err = PTR_ERR(msq);
> - goto out_unlock1;
> + goto out_unlock2;
> }
>
> ipc_lock_object(&msq->q_perm);
> @@ -851,8 +851,12 @@ static long do_msgsnd(int msqid, long mtype, void __user *mtext,
> rcu_read_lock();
> ipc_lock_object(&msq->q_perm);
>
> - ipc_rcu_putref(&msq->q_perm, msg_rcu_free);
> /* raced with RMID? */
> + if (!__ipc_rcu_putref(&msq->q_perm)) {
> + ipc_unlock_object(&msq->q_perm);
> + call_rcu(&msq->q_perm.rcu, msg_rcu_free);
> + goto out_unlock1;
> + }
> if (!ipc_valid_object(&msq->q_perm)) {
> err = -EIDRM;
> goto out_unlock0;
> @@ -883,8 +887,9 @@ static long do_msgsnd(int msqid, long mtype, void __user *mtext,
>
> out_unlock0:
> ipc_unlock_object(&msq->q_perm);
> - wake_up_q(&wake_q);
> out_unlock1:
> + wake_up_q(&wake_q);
> +out_unlock2:
> rcu_read_unlock();
> if (msg != NULL)
> free_msg(msg);
> diff --git a/ipc/sem.c b/ipc/sem.c
> index 5af1943ad782..c269fae05b24 100644
> --- a/ipc/sem.c
> +++ b/ipc/sem.c
> @@ -475,10 +475,16 @@ static inline struct sem_array *sem_obtain_object_check(struct ipc_namespace *ns
> return container_of(ipcp, struct sem_array, sem_perm);
> }
>
> -static inline void sem_lock_and_putref(struct sem_array *sma)
> +static int __must_check sem_lock_and_putref(struct sem_array *sma)
> {
> sem_lock(sma, NULL, -1);
> - ipc_rcu_putref(&sma->sem_perm, sem_rcu_free);
> +
> + if (!__ipc_rcu_putref(&sma->sem_perm)) {
> + sem_unlock(sma, -1);
> + call_rcu(&sma->sem_perm.rcu, sem_rcu_free);
> + return 0;
> + }
> + return 1;
> }
>
> static inline void sem_rmid(struct ipc_namespace *ns, struct sem_array *s)
> @@ -1434,7 +1440,10 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
> }
>
> rcu_read_lock();
> - sem_lock_and_putref(sma);
> + if (!sem_lock_and_putref(sma)) {
> + goto out_rcu_wakeup;
> + }
> +
> if (!ipc_valid_object(&sma->sem_perm)) {
> err = -EIDRM;
> goto out_unlock;
> @@ -1483,7 +1492,11 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
> }
> }
> rcu_read_lock();
> - sem_lock_and_putref(sma);
> + if (!sem_lock_and_putref(sma)) {
> + err = -EIDRM;
> + goto out_rcu_wakeup;
> + }
> +
> if (!ipc_valid_object(&sma->sem_perm)) {
> err = -EIDRM;
> goto out_unlock;
> @@ -1898,14 +1911,12 @@ static struct sem_undo *find_alloc_undo(struct ipc_namespace *ns, int semid)
>
> /* step 3: Acquire the lock on semaphore array */
> rcu_read_lock();
> - sem_lock_and_putref(sma);
> - if (!ipc_valid_object(&sma->sem_perm)) {
> - sem_unlock(sma, -1);
> - rcu_read_unlock();
> - kfree(new);
> - un = ERR_PTR(-EIDRM);
> - goto out;
> - }
> + if (!sem_lock_and_putref(sma))
> + goto out_EIDRM_free;
> +
> + if (!ipc_valid_object(&sma->sem_perm))
> + goto out_EIDRM_unlock;
> +
> spin_lock(&ulp->lock);
>
> /*
> @@ -1931,6 +1942,13 @@ static struct sem_undo *find_alloc_undo(struct ipc_namespace *ns, int semid)
> sem_unlock(sma, -1);
> out:
> return un;
> +
> +out_EIDRM_unlock:
> + sem_unlock(sma, -1);
> +out_EIDRM_free:
> + rcu_read_unlock();
> + kfree(new);
> + return ERR_PTR(-EIDRM);
> }
>
> static long do_semtimedop(int semid, struct sembuf __user *tsops,
> diff --git a/ipc/util.c b/ipc/util.c
> index 4e81182fa0ac..ba7f96fc8a61 100644
> --- a/ipc/util.c
> +++ b/ipc/util.c
> @@ -462,18 +462,47 @@ void ipc_set_key_private(struct ipc_ids *ids, struct kern_ipc_perm *ipcp)
> ipcp->key = IPC_PRIVATE;
> }
>
> -int ipc_rcu_getref(struct kern_ipc_perm *ptr)
> +int __must_check ipc_rcu_getref(struct kern_ipc_perm *ptr)
> {
> return refcount_inc_not_zero(&ptr->refcount);
> }
>
> -void ipc_rcu_putref(struct kern_ipc_perm *ptr,
> +/**
> + * __ipc_rcu_putref - reduce reference count of an ipc object.
> + * @ptr: ipc object
> + *
> + * The function reduces the reference count of an ipc object by 1.
> + * If the reference count drops to 0, then the function returns 0.
> + * If this happens, then the caller is responsible for triggering
> + * call_rcu() to free the ipc object.
> + */
> +int __must_check __ipc_rcu_putref(struct kern_ipc_perm *ptr)
> +{
> + if (!refcount_dec_and_test(&ptr->refcount))
> + return 1;
> + return 0;
> +}
> +
> +/**
> + * ipc_rcu_putref - reduce reference count of an ipc object.
> + * @ptr: ipc object
> + * @func: cleanup function to call when the reference is reduced to 0.
> + *
> + * The function reduces the reference count of an ipc object by 1.
> + * If the count drops to 0, then a call_rcu is triggered to free the ipc
> + * object.
> + *
> + * If the reference count drops to 0, then the function returns 0.
> + * If this happens, then the caller must not dereference ptr.
> + */
> +int ipc_rcu_putref(struct kern_ipc_perm *ptr,
> void (*func)(struct rcu_head *head))
> {
> if (!refcount_dec_and_test(&ptr->refcount))
> - return;
> + return 1;
>
> call_rcu(&ptr->rcu, func);
> + return 0;
> }
>
> /**
> diff --git a/ipc/util.h b/ipc/util.h
> index 0aba3230d007..fbd55aeee933 100644
> --- a/ipc/util.h
> +++ b/ipc/util.h
> @@ -134,12 +134,26 @@ static inline int ipc_get_maxid(struct ipc_ids *ids)
> * Objects are reference counted, they start with reference count 1.
> * getref increases the refcount, the putref call that reduces the recount
> * to 0 schedules the rcu destruction. Caller must guarantee locking.
> + * As the reference count is reduced without holding any lock, this means
> + * that the caller must use ipc_valid_object() after acquiring ipc_lock_xx().
> *
> * refcount is initialized by ipc_addid(), before that point call_rcu()
> * must be used.
> + *
> + * Most callers must check the return codes:
> + * - ipc_rcu_getref() fails if the reference count is already 0.
> + * - ipc_rcu_putref() calls call_rcu() if the reference count drops to 0,
> + * dereferencing the ipc object if the function returns 0 is a
> + * use-after-free.
> + * - with __ipc_rcu_putref(), the caller is responible for call_rcu().
> + * This function is intended to be used if dereferencing must be done,
> + * e.g. to drop a lock.
> + * ipc_rcu_putref does not use __must_check, because error paths or the
> + * IPC_RMID codepaths can safely ignore the return code.
> */
> -int ipc_rcu_getref(struct kern_ipc_perm *ptr);
> -void ipc_rcu_putref(struct kern_ipc_perm *ptr,
> +int __must_check ipc_rcu_getref(struct kern_ipc_perm *ptr);
> +int __must_check __ipc_rcu_putref(struct kern_ipc_perm *ptr);
> +int ipc_rcu_putref(struct kern_ipc_perm *ptr,
> void (*func)(struct rcu_head *head));
>
> struct kern_ipc_perm *ipc_lock(struct ipc_ids *, int);
> --
> 2.14.4
>
Powered by blists - more mailing lists