[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <1262644428.6469.7.camel@wall-e>
Date: Mon, 04 Jan 2010 23:33:48 +0100
From: Stefani Seibold <stefani@...bold.net>
To: Andi Kleen <andi@...stfloor.org>
Cc: linux-kernel@...r.kernel.org, akpm@...l.org
Subject: Re: [PATCH] [3/6] kfifo: Sanitize *_user error handling
Andrew ask me for a review... Don't do multiplexing again, lenout should
return the number of copied bytes and not an error code.
Am Sonntag, den 27.12.2009, 22:03 +0100 schrieb Andi Kleen:
> Right now for kfifo_*_user it's not easily possible to distingush between a
> user copy failing and the FIFO not containing enough data. The problem
> is that both conditions are multiplexed into the same return code.
>
> Avoid this by moving the "copy length" into a separate output parameter
> and only return 0/-EFAULT in the main return value.
>
> I didn't fully adapt the weird "record" variants, those seem
> to be unused anyways and were rather messy (should they be just removed?)
>
> I would appreciate some double checking if I did all the conversions
> correctly.
>
> Signed-off-by: Andi Kleen <ak@...ux.intel.com>
>
> ---
> include/linux/kfifo.h | 8 ++---
> kernel/kfifo.c | 76 ++++++++++++++++++++++++++++++++------------------
> 2 files changed, 53 insertions(+), 31 deletions(-)
>
> Index: linux/include/linux/kfifo.h
> ===================================================================
> --- linux.orig/include/linux/kfifo.h
> +++ linux/include/linux/kfifo.h
> @@ -243,11 +243,11 @@ static inline __must_check unsigned int
>
> extern void kfifo_skip(struct kfifo *fifo, unsigned int len);
>
> -extern __must_check unsigned int kfifo_from_user(struct kfifo *fifo,
> - const void __user *from, unsigned int n);
> +extern __must_check int kfifo_from_user(struct kfifo *fifo,
> + const void __user *from, unsigned int n, unsigned *lenout);
>
> -extern __must_check unsigned int kfifo_to_user(struct kfifo *fifo,
> - void __user *to, unsigned int n);
> +extern __must_check int kfifo_to_user(struct kfifo *fifo,
> + void __user *to, unsigned int n, unsigned *lenout);
>
> /**
> * __kfifo_add_out internal helper function for updating the out offset
> Index: linux/kernel/kfifo.c
> ===================================================================
> --- linux.orig/kernel/kfifo.c
> +++ linux/kernel/kfifo.c
> @@ -159,8 +159,9 @@ static inline void __kfifo_out_data(stru
> memcpy(to + l, fifo->buffer, len - l);
> }
>
> -static inline unsigned int __kfifo_from_user_data(struct kfifo *fifo,
> - const void __user *from, unsigned int len, unsigned int off)
> +static inline int __kfifo_from_user_data(struct kfifo *fifo,
> + const void __user *from, unsigned int len, unsigned int off,
> + unsigned *lenout)
> {
> unsigned int l;
> int ret;
> @@ -177,16 +178,20 @@ static inline unsigned int __kfifo_from_
> /* first put the data starting from fifo->in to buffer end */
> l = min(len, fifo->size - off);
> ret = copy_from_user(fifo->buffer + off, from, l);
> -
> - if (unlikely(ret))
> - return ret + len - l;
> + if (unlikely(ret)) {
> + *lenout = ret;
Should lenout not contain the number of copied bytes? So i would prefer
*lenout = l -ret;
> + return -EFAULT;
> + }
> + *lenout = l;
Unnecessary.... Would
>
> /* then put the rest (if any) at the beginning of the buffer */
> - return copy_from_user(fifo->buffer, from + l, len - l);
> + ret = copy_from_user(fifo->buffer, from + l, len - l);
> + *lenout += ret ? ret : len - l;
if (unlikely(ret)) {
*lenout = len - ret;
return -EFAULT;
}
>
> + return ret ? -EFAULT : 0;
Sould be return 0;
> }
>
> -static inline unsigned int __kfifo_to_user_data(struct kfifo *fifo,
> - void __user *to, unsigned int len, unsigned int off)
> +static inline int __kfifo_to_user_data(struct kfifo *fifo,
> + void __user *to, unsigned int len, unsigned int off, unsigned *lenout)
> {
> unsigned int l;
> int ret;
> @@ -203,12 +208,21 @@ static inline unsigned int __kfifo_to_us
> /* first get the data from fifo->out until the end of the buffer */
> l = min(len, fifo->size - off);
> ret = copy_to_user(to, fifo->buffer + off, l);
> -
> - if (unlikely(ret))
> - return ret + len - l;
> + *lenout = l;
> + if (unlikely(ret)) {
> + *lenout -= ret;
> + return -EFAULT;
> + }
>
> /* then get the rest (if any) from the beginning of the buffer */
> - return copy_to_user(to + l, fifo->buffer, len - l);
> + len -= l;
> + ret = copy_to_user(to + l, fifo->buffer, len);
> + if (unlikely(ret)) {
> + *lenout += len - ret;
> + return -EFAULT;
> + }
> + *lenout += len;
> + return 0;
> }
>
> unsigned int __kfifo_in_n(struct kfifo *fifo,
> @@ -298,10 +312,13 @@ EXPORT_SYMBOL(__kfifo_out_generic);
> unsigned int __kfifo_from_user_n(struct kfifo *fifo,
> const void __user *from, unsigned int len, unsigned int recsize)
> {
> + unsigned total;
> +
> if (kfifo_avail(fifo) < len + recsize)
> return len + 1;
>
> - return __kfifo_from_user_data(fifo, from, len, recsize);
> + __kfifo_from_user_data(fifo, from, len, recsize, &total);
> + return total;
> }
> EXPORT_SYMBOL(__kfifo_from_user_n);
>
> @@ -312,18 +329,21 @@ EXPORT_SYMBOL(__kfifo_from_user_n);
> * @len: the length of the data to be added.
> *
> * This function copies at most @len bytes from the @from into the
> - * FIFO depending and returns the number of copied bytes.
> + * FIFO depending and returns -EFAULT/0.
> *
> * Note that with only one concurrent reader and one concurrent
> * writer, you don't need extra locking to use these functions.
> */
> -unsigned int kfifo_from_user(struct kfifo *fifo,
> - const void __user *from, unsigned int len)
> +int kfifo_from_user(struct kfifo *fifo,
> + const void __user *from, unsigned int len, unsigned *total)
> {
> + int ret;
> len = min(kfifo_avail(fifo), len);
> - len -= __kfifo_from_user_data(fifo, from, len, 0);
> + ret = __kfifo_from_user_data(fifo, from, len, 0, total);
> + if (ret)
> + return ret;
> __kfifo_add_in(fifo, len);
> - return len;
> + return 0;
> }
> EXPORT_SYMBOL(kfifo_from_user);
>
> @@ -338,17 +358,17 @@ unsigned int __kfifo_to_user_n(struct kf
> void __user *to, unsigned int len, unsigned int reclen,
> unsigned int recsize)
> {
> - unsigned int ret;
> + unsigned int ret, total;
>
> if (kfifo_len(fifo) < reclen + recsize)
> return len;
>
> - ret = __kfifo_to_user_data(fifo, to, reclen, recsize);
> + ret = __kfifo_to_user_data(fifo, to, reclen, recsize, &total);
>
> if (likely(ret == 0))
> __kfifo_add_out(fifo, reclen + recsize);
>
> - return ret;
> + return total;
> }
> EXPORT_SYMBOL(__kfifo_to_user_n);
>
> @@ -357,20 +377,22 @@ EXPORT_SYMBOL(__kfifo_to_user_n);
> * @fifo: the fifo to be used.
> * @to: where the data must be copied.
> * @len: the size of the destination buffer.
> + @ @lenout: pointer to output variable with copied data
> *
> * This function copies at most @len bytes from the FIFO into the
> - * @to buffer and returns the number of copied bytes.
> + * @to buffer and 0 or -EFAULT.
> *
> * Note that with only one concurrent reader and one concurrent
> * writer, you don't need extra locking to use these functions.
> */
> -unsigned int kfifo_to_user(struct kfifo *fifo,
> - void __user *to, unsigned int len)
> +int kfifo_to_user(struct kfifo *fifo,
> + void __user *to, unsigned int len, unsigned *lenout)
> {
> + int ret;
> len = min(kfifo_len(fifo), len);
> - len -= __kfifo_to_user_data(fifo, to, len, 0);
> - __kfifo_add_out(fifo, len);
> - return len;
> + ret = __kfifo_to_user_data(fifo, to, len, 0, lenout);
> + __kfifo_add_out(fifo, *lenout);
> + return ret;
> }
> EXPORT_SYMBOL(kfifo_to_user);
>
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/
Powered by blists - more mailing lists