lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:	Fri, 22 Aug 2014 12:26:30 -0400
From:	Benjamin LaHaise <bcrl@...ck.org>
To:	Dan Aloni <dan@...nelim.com>
Cc:	Linus Torvalds <torvalds@...ux-foundation.org>,
	security@...nel.org, linux-aio@...ck.org,
	linux-kernel@...r.kernel.org, Mateusz Guzik <mguzik@...hat.com>,
	Petr Matousek <pmatouse@...hat.com>,
	Kent Overstreet <kmo@...erainc.com>,
	Jeff Moyer <jmoyer@...hat.com>, stable@...r.kernel.org
Subject: Re: Revert "aio: fix aio request leak when events are reaped by user space"

On Fri, Aug 22, 2014 at 07:15:02PM +0300, Dan Aloni wrote:
> Sorry, I was waiting for a new patch from your direction, I should
> have replied earlier. What bothered me about the patch you sent is that
> completed_events is added as a new field but nothing assigns to it, so I 
> wonder how it can be effective.

Ah, that was missing a hunk then.  Try this version instead.

		-ben
-- 
"Thought is the essence of where you are now."


diff --git a/fs/aio.c b/fs/aio.c
index ae63587..fbdcc47 100644
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -142,6 +142,7 @@ struct kioctx {
 	struct {
 		unsigned	tail;
 		spinlock_t	completion_lock;
+		unsigned	completed_events;
 	} ____cacheline_aligned_in_smp;
 
 	struct page		*internal_pages[AIO_RING_PAGES];
@@ -857,6 +858,31 @@ out:
 	return ret;
 }
 
+static void refill_reqs_available(struct kioctx *ctx)
+{
+	spin_lock_irq(&ctx->completion_lock);
+	if (ctx->completed_events) {
+		unsigned head, tail, avail, completed;
+		struct aio_ring *ring;
+
+		ring = kmap_atomic(ctx->ring_pages[0]);
+		head = ACCESS_ONCE(ring->head);
+		tail = ACCESS_ONCE(ring->tail);
+		kunmap_atomic(ring);
+
+		avail = (head <= tail ?  tail : ctx->nr_events) - head;
+		completed = ctx->completed_events;
+		if (avail < completed)
+			completed -= avail;
+		else
+			completed = 0;
+		put_reqs_available(ctx, completed);
+	}
+
+	spin_unlock_irq(&ctx->completion_lock);
+}
+
+
 /* aio_get_req
  *	Allocate a slot for an aio request.
  * Returns NULL if no requests are free.
@@ -865,8 +891,11 @@ static inline struct kiocb *aio_get_req(struct kioctx *ctx)
 {
 	struct kiocb *req;
 
-	if (!get_reqs_available(ctx))
-		return NULL;
+	if (!get_reqs_available(ctx)) {
+		refill_reqs_available(ctx);
+		if (!get_reqs_available(ctx))
+			return NULL;
+	}
 
 	req = kmem_cache_alloc(kiocb_cachep, GFP_KERNEL|__GFP_ZERO);
 	if (unlikely(!req))
@@ -958,6 +987,7 @@ void aio_complete(struct kiocb *iocb, long res, long res2)
 	 */
 	spin_lock_irqsave(&ctx->completion_lock, flags);
 
+	ctx->completed_events++;
 	tail = ctx->tail;
 	pos = tail + AIO_EVENTS_OFFSET;
 
@@ -1005,7 +1035,6 @@ void aio_complete(struct kiocb *iocb, long res, long res2)
 
 	/* everything turned out well, dispose of the aiocb. */
 	kiocb_free(iocb);
-	put_reqs_available(ctx, 1);
 
 	/*
 	 * We have to order our ring_info tail store above and test
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ