lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite for Android: free password hash cracker in your pocket
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20200417150151.17239-2-david@redhat.com>
Date:   Fri, 17 Apr 2020 17:01:50 +0200
From:   David Hildenbrand <david@...hat.com>
To:     linux-kernel@...r.kernel.org
Cc:     linux-mm@...ck.org, linux-s390@...r.kernel.org,
        David Hildenbrand <david@...hat.com>,
        Heiko Carstens <heiko.carstens@...ibm.com>,
        Vasily Gorbik <gor@...ux.ibm.com>,
        Christian Borntraeger <borntraeger@...ibm.com>,
        Philipp Rudo <prudo@...ux.ibm.com>,
        Kirill Smelkov <kirr@...edi.com>,
        Michael Holzheu <holzheu@...ux.vnet.ibm.com>
Subject: [PATCH RFC 1/2] s390/zcore: traverse resources instead of memblocks

The zcore memmap basically contains the first level of all system RAM from
/proc/iomem. We want to disable CONFIG_ARCH_KEEP_MEMBLOCK (e.g., to not
create memblocks for hotplugged/standby memory and save space), switch to
traversing system ram resources instead. During early boot, we create
resources for all early memblocks (including the crash kernel area). When
adding standby memory, we currently create both, memblocks and resources.

Note: As we don't have memory hotplug after boot (standby memory is added
via sclp during boot), we don't have to worry about races.

I am only able to test under KVM (where I hacked up zcore to still
create the memmap file)

root@vm0:~# cat /proc/iomem
00000000-2fffffff : System RAM
  10424000-10ec6fff : Kernel code
  10ec7000-1139a0e3 : Kernel data
  1177a000-11850fff : Kernel bss
30000000-3fffffff : Crash kernel

Result without this patch:
root@vm0:~# cat /sys/kernel/debug/zcore/memmap
0000000000000000 0000000040000000

Result with this patch:
root@vm0:~# cat /sys/kernel/debug/zcore/memmap
0000000000000000 0000000030000000 0000000030000000 0000000010000000

The difference is due to memblocks getting merged, resources (currently)
not. So we might have some more entries, but they describe the same
memory map.

Cc: Heiko Carstens <heiko.carstens@...ibm.com>
Cc: Vasily Gorbik <gor@...ux.ibm.com>
Cc: Christian Borntraeger <borntraeger@...ibm.com>
Cc: Philipp Rudo <prudo@...ux.ibm.com>
Cc: Kirill Smelkov <kirr@...edi.com>
Cc: Michael Holzheu <holzheu@...ux.vnet.ibm.com>
Signed-off-by: David Hildenbrand <david@...hat.com>
---
 drivers/s390/char/zcore.c | 61 ++++++++++++++++++++++++++++++---------
 1 file changed, 48 insertions(+), 13 deletions(-)

diff --git a/drivers/s390/char/zcore.c b/drivers/s390/char/zcore.c
index 08f812475f5e..c40ac7d548d8 100644
--- a/drivers/s390/char/zcore.c
+++ b/drivers/s390/char/zcore.c
@@ -16,7 +16,7 @@
 #include <linux/init.h>
 #include <linux/slab.h>
 #include <linux/debugfs.h>
-#include <linux/memblock.h>
+#include <linux/ioport.h>
 
 #include <asm/asm-offsets.h>
 #include <asm/ipl.h>
@@ -139,35 +139,70 @@ static void release_hsa(void)
 	hsa_available = 0;
 }
 
+struct zcore_memmap_info {
+	char *buf;
+	size_t length;
+};
+
 static ssize_t zcore_memmap_read(struct file *filp, char __user *buf,
 				 size_t count, loff_t *ppos)
 {
-	return simple_read_from_buffer(buf, count, ppos, filp->private_data,
-				       memblock.memory.cnt * CHUNK_INFO_SIZE);
+	struct zcore_memmap_info *info = filp->private_data;
+
+	return simple_read_from_buffer(buf, count, ppos, info->buf,
+				       info->length);
+}
+
+static int zcore_count_ram_resource(struct resource *res, void *arg)
+{
+	size_t *count = arg;
+
+	*count += 1;
+	return 0;
+}
+
+static int zcore_process_ram_resource(struct resource *res, void *arg)
+{
+	char **buf = arg;
+
+	sprintf(*buf, "%016lx %016lx ", (unsigned long) res->start,
+		(unsigned long) resource_size(res));
+
+	*buf += CHUNK_INFO_SIZE;
+	return 0;
 }
 
 static int zcore_memmap_open(struct inode *inode, struct file *filp)
 {
-	struct memblock_region *reg;
+	struct zcore_memmap_info *info;
+	size_t count = 0;
 	char *buf;
-	int i = 0;
 
-	buf = kcalloc(memblock.memory.cnt, CHUNK_INFO_SIZE, GFP_KERNEL);
+	walk_system_ram_res(0, ULONG_MAX, &count, zcore_count_ram_resource);
+
+	info = kmalloc(sizeof(*info), GFP_KERNEL);
+	if (!info)
+		return -ENOMEM;
+	buf = kcalloc(count, CHUNK_INFO_SIZE, GFP_KERNEL);
 	if (!buf) {
+		kfree(info);
 		return -ENOMEM;
 	}
-	for_each_memblock(memory, reg) {
-		sprintf(buf + (i++ * CHUNK_INFO_SIZE), "%016llx %016llx ",
-			(unsigned long long) reg->base,
-			(unsigned long long) reg->size);
-	}
-	filp->private_data = buf;
+	info->length = count * CHUNK_INFO_SIZE;
+	info->buf = buf;
+
+	walk_system_ram_res(0, ULONG_MAX, &buf, zcore_process_ram_resource);
+
+	filp->private_data = info;
 	return nonseekable_open(inode, filp);
 }
 
 static int zcore_memmap_release(struct inode *inode, struct file *filp)
 {
-	kfree(filp->private_data);
+	struct zcore_memmap_info *info = filp->private_data;
+
+	kfree(info->buf);
+	kfree(info);
 	return 0;
 }
 
-- 
2.25.1

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ