lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite for Android: free password hash cracker in your pocket
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:   Fri, 08 Nov 2019 19:20:27 +0100
From:   Jesper Dangaard Brouer <brouer@...hat.com>
To:     unlisted-recipients:; (no To-header on input)
Cc:     Toke Høiland-Jørgensen <toke@...hat.com>,
        netdev@...r.kernel.org,
        Ilias Apalodimas <ilias.apalodimas@...aro.org>,
        Jesper Dangaard Brouer <brouer@...hat.com>,
        Saeed Mahameed <saeedm@...lanox.com>,
        Matteo Croce <mcroce@...hat.com>,
        Jonathan Lemon <jonathan.lemon@...il.com>,
        Lorenzo Bianconi <lorenzo@...nel.org>,
        Tariq Toukan <tariqt@...lanox.com>
Subject: [net-next v1 PATCH 2/2] page_pool: make inflight returns more
 robust via blocking alloc cache

When requesting page_pool shutdown, it's a requirement that consumer
RX-side have been disconnected, but __page_pool_request_shutdown()
have a loop that empty RX alloc cache each time. Producers can still
be inflight, but they MUST NOT return pages into RX alloc cache. Thus,
the RX alloc cache MUST remain empty after the first clearing, else it
is considered a bug. Lets make it more clear this is only cleared once.

This patch only empty RX alloc cache once and then block alloc cache.
The alloc cache is blocked via pretending it is full, and then also
poisoning the last element. This blocks producers from using fast-path,
and consumer (which is not allowed) will see a NULL pointer.

Signed-off-by: Jesper Dangaard Brouer <brouer@...hat.com>
---
 include/net/page_pool.h |    2 ++
 net/core/page_pool.c    |   28 +++++++++++++++++++++-------
 2 files changed, 23 insertions(+), 7 deletions(-)

diff --git a/include/net/page_pool.h b/include/net/page_pool.h
index 2cbcdbdec254..ab39b7955f9b 100644
--- a/include/net/page_pool.h
+++ b/include/net/page_pool.h
@@ -107,6 +107,8 @@ struct page_pool {
 	 * refcnt serves purpose is to simplify drivers error handling.
 	 */
 	refcount_t user_cnt;
+
+	bool shutdown_in_progress;
 };
 
 struct page *page_pool_alloc_pages(struct page_pool *pool, gfp_t gfp);
diff --git a/net/core/page_pool.c b/net/core/page_pool.c
index 226f2eb30418..89feee635d08 100644
--- a/net/core/page_pool.c
+++ b/net/core/page_pool.c
@@ -357,7 +357,7 @@ void __page_pool_free(struct page_pool *pool)
 	if (!page_pool_put(pool))
 		return;
 
-	WARN(pool->alloc.count, "API usage violation");
+	WARN(!pool->shutdown_in_progress, "API usage violation");
 	WARN(!ptr_ring_empty(&pool->ring), "ptr_ring is not empty");
 
 	if (!__page_pool_safe_to_destroy(pool))
@@ -367,8 +367,6 @@ void __page_pool_free(struct page_pool *pool)
 
 	/* Make sure kernel will crash on use-after-free */
 	pool->ring.queue = NULL;
-	pool->alloc.cache[PP_ALLOC_CACHE_SIZE - 1] = NULL;
-	pool->alloc.count = PP_ALLOC_CACHE_SIZE;
 
 	if (pool->p.flags & PP_FLAG_DMA_MAP)
 		put_device(pool->p.dev);
@@ -377,22 +375,38 @@ void __page_pool_free(struct page_pool *pool)
 }
 EXPORT_SYMBOL(__page_pool_free);
 
-/* Request to shutdown: release pages cached by page_pool, and check
- * for in-flight pages
- */
-bool __page_pool_request_shutdown(struct page_pool *pool)
+/* Empty alloc cache once and block it */
+void page_pool_empty_alloc_cache_once(struct page_pool *pool)
 {
 	struct page *page;
 
+	if (pool->shutdown_in_progress)
+		return;
+
 	/* Empty alloc cache, assume caller made sure this is
 	 * no-longer in use, and page_pool_alloc_pages() cannot be
 	 * call concurrently.
 	 */
 	while (pool->alloc.count) {
 		page = pool->alloc.cache[--pool->alloc.count];
+		pool->alloc.cache[pool->alloc.count] = NULL;
 		__page_pool_return_page(pool, page);
 	}
 
+	/* Block alloc cache, pretend it's full and poison last element */
+	pool->alloc.cache[PP_ALLOC_CACHE_SIZE - 1] = NULL;
+	pool->alloc.count = PP_ALLOC_CACHE_SIZE;
+
+	pool->shutdown_in_progress = true;
+}
+
+/* Request to shutdown: release pages cached by page_pool, and check
+ * for in-flight pages. RX-alloc side MUST be stopped prior to this.
+ */
+bool __page_pool_request_shutdown(struct page_pool *pool)
+{
+	page_pool_empty_alloc_cache_once(pool);
+
 	/* No more consumers should exist, but producers could still
 	 * be in-flight.
 	 */

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ