[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20221217015435.73889-4-bhe@redhat.com>
Date: Sat, 17 Dec 2022 09:54:31 +0800
From: Baoquan He <bhe@...hat.com>
To: linux-kernel@...r.kernel.org
Cc: linux-mm@...ck.org, urezki@...il.com, stephen.s.brennan@...cle.com,
willy@...radead.org, akpm@...ux-foundation.org, hch@...radead.org,
Baoquan He <bhe@...hat.com>
Subject: [PATCH v2 3/7] mm/vmalloc.c: allow vread() to read out vm_map_ram areas
Currently, vread can read out vmalloc areas which is associated with
a vm_struct. While this doesn't work for areas created by vm_map_ram()
interface because it doesn't have an associated vm_struct. Then in vread(),
these areas will be skipped.
Here, add a new function vb_vread() to read out areas managed by
vmap_block specifically. Then recognize vm_map_ram areas via vmap->flags
and handle them respectively.
Signed-off-by: Baoquan He <bhe@...hat.com>
---
mm/vmalloc.c | 66 ++++++++++++++++++++++++++++++++++++++++++++++------
1 file changed, 59 insertions(+), 7 deletions(-)
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index 190f29bbaaa7..6612914459cf 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -3515,6 +3515,51 @@ static int aligned_vread(char *buf, char *addr, unsigned long count)
return copied;
}
+static void vb_vread(char *buf, char *addr, int count)
+{
+ char *start;
+ struct vmap_block *vb;
+ unsigned long offset;
+ unsigned int rs, re, n;
+
+ vb = xa_load(&vmap_blocks, addr_to_vb_idx((unsigned long)addr));
+
+ spin_lock(&vb->lock);
+ if (bitmap_empty(vb->used_map, VMAP_BBMAP_BITS)) {
+ spin_unlock(&vb->lock);
+ memset(buf, 0, count);
+ return;
+ }
+ for_each_set_bitrange(rs, re, vb->used_map, VMAP_BBMAP_BITS) {
+ if (!count)
+ break;
+ start = vmap_block_vaddr(vb->va->va_start, rs);
+ if (addr < start) {
+ if (count == 0)
+ break;
+ *buf = '\0';
+ buf++;
+ addr++;
+ count--;
+ }
+ /*it could start reading from the middle of used region*/
+ offset = offset_in_page(addr);
+ n = (re - rs + 1) << PAGE_SHIFT - offset;
+ if (n > count)
+ n = count;
+ aligned_vread(buf, start+offset, n);
+
+ buf += n;
+ addr += n;
+ count -= n;
+ }
+ spin_unlock(&vb->lock);
+
+ /* zero-fill the left dirty or free regions */
+ if (count)
+ memset(buf, 0, count);
+}
+
/**
* vread() - read vmalloc area in a safe way.
* @buf: buffer for reading data
@@ -3545,7 +3590,7 @@ long vread(char *buf, char *addr, unsigned long count)
struct vm_struct *vm;
char *vaddr, *buf_start = buf;
unsigned long buflen = count;
- unsigned long n;
+ unsigned long n, size, flags;
addr = kasan_reset_tag(addr);
@@ -3566,12 +3611,16 @@ long vread(char *buf, char *addr, unsigned long count)
if (!count)
break;
- if (!va->vm)
+ vm = va->vm;
+ flags = va->flags & VMAP_FLAGS_MASK;
+
+ if (!vm && !flags)
continue;
- vm = va->vm;
- vaddr = (char *) vm->addr;
- if (addr >= vaddr + get_vm_area_size(vm))
+ vaddr = (char *) va->va_start;
+ size = flags ? va_size(va) : get_vm_area_size(vm);
+
+ if (addr >= vaddr + size)
continue;
while (addr < vaddr) {
if (count == 0)
@@ -3581,10 +3630,13 @@ long vread(char *buf, char *addr, unsigned long count)
addr++;
count--;
}
- n = vaddr + get_vm_area_size(vm) - addr;
+ n = vaddr + size - addr;
if (n > count)
n = count;
- if (!(vm->flags & VM_IOREMAP))
+
+ if ((flags & (VMAP_RAM|VMAP_BLOCK)) == (VMAP_RAM|VMAP_BLOCK))
+ vb_vread(buf, addr, n);
+ else if ((flags & VMAP_RAM) || !(vm->flags & VM_IOREMAP))
aligned_vread(buf, addr, n);
else /* IOREMAP area is treated as memory hole */
memset(buf, 0, n);
--
2.34.1
Powered by blists - more mailing lists