lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20170927015323.GA19100@Big-Sky.local>
Date:   Tue, 26 Sep 2017 20:54:00 -0500
From:   Dennis Zhou <dennisszhou@...il.com>
To:     Luis Henriques <lhenriques@...e.com>
Cc:     Tejun Heo <tj@...nel.org>, Christoph Lameter <cl@...ux.com>,
        linux-mm@...ck.org, linux-kernel@...r.kernel.org
Subject: Re: percpu allocation failures

Hi Luis,

This seems to be an issue with the reserved chunk being unable to
allocate memory when loading kernel modules. Unfortunately, I have not 
been successful in reproducing this with the reserved chunk allocation
path exposed or by inserting the nft_meta module.

Could you please send me the output when ran with the following patch
and the output of the percpu memory statistics file before and after
inserting the module (PERCPU_STATS)? The stats are in
/sys/kernel/debug/percpu_stats.

Thanks,
Dennis

---
 mm/percpu.c | 32 ++++++++++++++++++++++++++++++--
 1 file changed, 30 insertions(+), 2 deletions(-)

diff --git a/mm/percpu.c b/mm/percpu.c
index 59d44d6..031fd91 100644
--- a/mm/percpu.c
+++ b/mm/percpu.c
@@ -1335,6 +1335,7 @@ static void __percpu *pcpu_alloc(size_t size, size_t align, bool reserved,
 {
 	static int warn_limit = 10;
 	struct pcpu_chunk *chunk;
+	struct pcpu_block_md *block;
 	const char *err;
 	bool is_atomic = (gfp & GFP_KERNEL) != GFP_KERNEL;
 	int slot, off, cpu, ret;
@@ -1371,17 +1372,43 @@ static void __percpu *pcpu_alloc(size_t size, size_t align, bool reserved,
 	if (reserved && pcpu_reserved_chunk) {
 		chunk = pcpu_reserved_chunk;
 
+		printk(KERN_DEBUG "percpu: reserved chunk: %d, %d, %d, %d, %d, %d, %d",
+		       chunk->free_bytes, chunk->contig_bits,
+		       chunk->contig_bits_start, chunk->first_bit,
+		       chunk->start_offset, chunk->end_offset,
+		       chunk->nr_pages);
+
+		printk(KERN_DEBUG "percpu: rchunk md blocks");
+		for (block = chunk->md_blocks;
+		     block < chunk->md_blocks + pcpu_chunk_nr_blocks(chunk);
+		     block++) {
+			printk(KERN_DEBUG "   percpu: %d, %d, %d, %d, %d",
+			       block->contig_hint,
+			       block->contig_hint_start,
+			       block->left_free,
+			       block->right_free,
+			       block->first_free);
+		}
+
 		off = pcpu_find_block_fit(chunk, bits, bit_align, is_atomic);
+
+		printk(KERN_DEBUG "percpu: pcpu_find_block_fit: %d, %zu, %zu",
+		       off, bits, bit_align);
+
 		if (off < 0) {
-			err = "alloc from reserved chunk failed";
+			err = "alloc from reserved chunk failed to find fit";
 			goto fail_unlock;
 		}
 
 		off = pcpu_alloc_area(chunk, bits, bit_align, off);
+
+		printk(KERN_DEBUG "percpu: pcpu_alloc_area: %d, %zu, %zu",
+		       off, bits, bit_align);
+
 		if (off >= 0)
 			goto area_found;
 
-		err = "alloc from reserved chunk failed";
+		err = "alloc from reserved chunk failed to alloc area";
 		goto fail_unlock;
 	}
 
@@ -1547,6 +1574,7 @@ void __percpu *__alloc_reserved_percpu(size_t size, size_t align)
 {
 	return pcpu_alloc(size, align, true, GFP_KERNEL);
 }
+EXPORT_SYMBOL_GPL(__alloc_reserved_percpu);
 
 /**
  * pcpu_balance_workfn - manage the amount of free chunks and populated pages
-- 
1.8.3.1

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ