[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <Znq7EtiOnV8YLHDK@pc638.lan>
Date: Tue, 25 Jun 2024 14:41:54 +0200
From: Uladzislau Rezki <urezki@...il.com>
To: Baoquan He <bhe@...hat.com>
Cc: Uladzislau Rezki <urezki@...il.com>, Nick Bowler <nbowler@...conx.ca>,
Hailong Liu <hailong.liu@...o.com>, linux-kernel@...r.kernel.org,
Linux regressions mailing list <regressions@...ts.linux.dev>,
linux-mm@...ck.org, sparclinux@...r.kernel.org,
Andrew Morton <akpm@...ux-foundation.org>
Subject: Re: PROBLEM: kernel crashes when running xfsdump since ~6.4
On Tue, Jun 25, 2024 at 11:30:33AM +0800, Baoquan He wrote:
> On 06/24/24 at 02:16pm, Uladzislau Rezki wrote:
> > On Fri, Jun 21, 2024 at 10:02:50PM +0800, Baoquan He wrote:
> > > On 06/21/24 at 11:44am, Uladzislau Rezki wrote:
> > > > On Fri, Jun 21, 2024 at 03:07:16PM +0800, Baoquan He wrote:
> > > > > On 06/21/24 at 11:30am, Hailong Liu wrote:
> > > > > > On Thu, 20. Jun 14:02, Nick Bowler wrote:
> > > > > > > On 2024-06-20 02:19, Nick Bowler wrote:
> > > ......
> > > > > diff --git a/mm/vmalloc.c b/mm/vmalloc.c
> > > > > index be2dd281ea76..18e87cafbaf2 100644
> > > > > --- a/mm/vmalloc.c
> > > > > +++ b/mm/vmalloc.c
> > > > > @@ -2542,7 +2542,7 @@ static DEFINE_PER_CPU(struct vmap_block_queue, vmap_block_queue);
> > > > > static struct xarray *
> > > > > addr_to_vb_xa(unsigned long addr)
> > > > > {
> > > > > - int index = (addr / VMAP_BLOCK_SIZE) % num_possible_cpus();
> > > > > + int index = (addr / VMAP_BLOCK_SIZE) % nr_cpu_ids;
> > > > >
> > > > > return &per_cpu(vmap_block_queue, index).vmap_blocks;
> > > > > }
> > > > >
> > > > The problem i see is about not-initializing of the:
> > > > <snip>
> > > > for_each_possible_cpu(i) {
> > > > struct vmap_block_queue *vbq;
> > > > struct vfree_deferred *p;
> > > >
> > > > vbq = &per_cpu(vmap_block_queue, i);
> > > > spin_lock_init(&vbq->lock);
> > > > INIT_LIST_HEAD(&vbq->free);
> > > > p = &per_cpu(vfree_deferred, i);
> > > > init_llist_head(&p->list);
> > > > INIT_WORK(&p->wq, delayed_vfree_work);
> > > > xa_init(&vbq->vmap_blocks);
> > > > }
> > > > <snip>
> > > >
> > > > correctly or fully. It is my bad i did not think that CPUs in a possible mask
> > > > can be non sequential :-/
> > > >
> > > > nr_cpu_ids - is not the max possible CPU. For example, in Nick case,
> > > > when he has two CPUs, num_possible_cpus() and nr_cpu_ids are the same.
> > >
> > > I checked the generic version of setup_nr_cpu_ids(), from codes, they
> > > are different with my understanding.
> > >
> > > kernel/smp.c
> > > void __init setup_nr_cpu_ids(void)
> > > {
> > > set_nr_cpu_ids(find_last_bit(cpumask_bits(cpu_possible_mask), NR_CPUS) + 1);
> > > }
> > >
> > I see that it is not a weak function, so it is generic, thus the
> > behavior can not be overwritten, which is great. This does what we
> > need.
> >
> > Thank you for checking this you are right!
>
> Thanks for confirming this.
>
> >
> > Then it is just a matter of proper initialization of the hash:
> >
> > <snip>
> > diff --git a/mm/vmalloc.c b/mm/vmalloc.c
> > index 5d3aa2dc88a8..1733946f7a12 100644
> > --- a/mm/vmalloc.c
> > +++ b/mm/vmalloc.c
> > @@ -5087,7 +5087,13 @@ void __init vmalloc_init(void)
> > */
> > vmap_area_cachep = KMEM_CACHE(vmap_area, SLAB_PANIC);
> >
> > - for_each_possible_cpu(i) {
> > + /*
> > + * We use "nr_cpu_ids" here because some architectures
> > + * may have "gaps" in cpu-possible-mask. It is OK for
> > + * per-cpu approaches but is not OK for cases where it
> > + * can be used as hashes also.
> > + */
> > + for (i = 0; i < nr_cpu_ids; i++) {
>
> I was wrong about earlier comments. Percpu variables are only available
> on possible CPUs. For those nonexistent possible CPUs of static percpu
> variable vmap_block_queue, there isn't memory allocated and mapped for
> them. So accessing into them will cause problem.
>
> In Nick's case, there are only CPU0, CPU2. If you access
> &per_cpu(vmap_block_queue, 1), problem occurs. So I think we may need to
> change to take other way for vbq. E.g:
> 1) Storing the vb in the nearest neighbouring vbq on possible CPU as
> below draft patch;
> 2) create an normal array to store vbq of size nr_cpu_ids, then we can
> store/fetch each vbq on non-possible CPU?
>
See below how the patch look like if we switch to hash array:
<snip>
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index 45e1506d58c3..a8bcd9ceec2d 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -2501,7 +2501,8 @@ struct vmap_block {
};
/* Queue of free and dirty vmap blocks, for allocation and flushing purposes */
-static DEFINE_PER_CPU(struct vmap_block_queue, vmap_block_queue);
+static struct vmap_block_queue *vmap_block_queue;
+static bool vmap_block_queue_initialized;
/*
* In order to fast access to any "vmap_block" associated with a
@@ -2542,9 +2543,9 @@ static DEFINE_PER_CPU(struct vmap_block_queue, vmap_block_queue);
static struct xarray *
addr_to_vb_xa(unsigned long addr)
{
- int index = (addr / VMAP_BLOCK_SIZE) % num_possible_cpus();
+ int index = (addr / VMAP_BLOCK_SIZE) % nr_cpu_ids;
- return &per_cpu(vmap_block_queue, index).vmap_blocks;
+ return &vmap_block_queue[index].vmap_blocks;
}
/*
@@ -2626,7 +2627,7 @@ static void *new_vmap_block(unsigned int order, gfp_t gfp_mask)
return ERR_PTR(err);
}
- vbq = raw_cpu_ptr(&vmap_block_queue);
+ vbq = &vmap_block_queue[raw_smp_processor_id()];
spin_lock(&vbq->lock);
list_add_tail_rcu(&vb->free_list, &vbq->free);
spin_unlock(&vbq->lock);
@@ -2657,6 +2658,9 @@ static bool purge_fragmented_block(struct vmap_block *vb,
struct vmap_block_queue *vbq, struct list_head *purge_list,
bool force_purge)
{
+ if (!vmap_block_queue_initialized)
+ return false;
+
if (vb->free + vb->dirty != VMAP_BBMAP_BITS ||
vb->dirty == VMAP_BBMAP_BITS)
return false;
@@ -2692,7 +2696,12 @@ static void purge_fragmented_blocks(int cpu)
{
LIST_HEAD(purge);
struct vmap_block *vb;
- struct vmap_block_queue *vbq = &per_cpu(vmap_block_queue, cpu);
+ struct vmap_block_queue *vbq;
+
+ if (!vmap_block_queue_initialized)
+ return;
+
+ vbq = &vmap_block_queue[cpu];
rcu_read_lock();
list_for_each_entry_rcu(vb, &vbq->free, free_list) {
@@ -2715,7 +2724,7 @@ static void purge_fragmented_blocks_allcpus(void)
{
int cpu;
- for_each_possible_cpu(cpu)
+ for (cpu = 0; cpu < nr_cpu_ids; cpu++)
purge_fragmented_blocks(cpu);
}
@@ -2739,7 +2748,7 @@ static void *vb_alloc(unsigned long size, gfp_t gfp_mask)
order = get_order(size);
rcu_read_lock();
- vbq = raw_cpu_ptr(&vmap_block_queue);
+ vbq = &vmap_block_queue[raw_smp_processor_id()];
list_for_each_entry_rcu(vb, &vbq->free, free_list) {
unsigned long pages_off;
@@ -2822,13 +2831,13 @@ static void _vm_unmap_aliases(unsigned long start, unsigned long end, int flush)
LIST_HEAD(purge_list);
int cpu;
- if (unlikely(!vmap_initialized))
+ if (unlikely(!vmap_initialized || !vmap_block_queue_initialized))
return;
mutex_lock(&vmap_purge_lock);
- for_each_possible_cpu(cpu) {
- struct vmap_block_queue *vbq = &per_cpu(vmap_block_queue, cpu);
+ for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
+ struct vmap_block_queue *vbq = &vmap_block_queue[cpu];
struct vmap_block *vb;
unsigned long idx;
@@ -2910,7 +2919,7 @@ void vm_unmap_ram(const void *mem, unsigned int count)
kasan_poison_vmalloc(mem, size);
- if (likely(count <= VMAP_MAX_ALLOC)) {
+ if (likely(count <= VMAP_MAX_ALLOC) && vmap_block_queue_initialized) {
debug_check_no_locks_freed(mem, size);
vb_free(addr, size);
return;
@@ -2946,7 +2955,7 @@ void *vm_map_ram(struct page **pages, unsigned int count, int node)
unsigned long addr;
void *mem;
- if (likely(count <= VMAP_MAX_ALLOC)) {
+ if (likely(count <= VMAP_MAX_ALLOC && vmap_block_queue_initialized)) {
mem = vb_alloc(size, GFP_KERNEL);
if (IS_ERR(mem))
return NULL;
@@ -5087,17 +5096,28 @@ void __init vmalloc_init(void)
*/
vmap_area_cachep = KMEM_CACHE(vmap_area, SLAB_PANIC);
+ vmap_block_queue = kmalloc_array(
+ nr_cpu_ids, sizeof(struct vmap_block_queue), GFP_NOWAIT);
+
+ if (vmap_block_queue) {
+ for (i = 0; i < nr_cpu_ids; i++) {
+ struct vmap_block_queue *vbq =
+ &vmap_block_queue[i];
+
+ spin_lock_init(&vbq->lock);
+ INIT_LIST_HEAD(&vbq->free);
+ xa_init(&vbq->vmap_blocks);
+ }
+ } else {
+ pr_err("Failed to allocate vmap_block_queue array, use fallback path!\n");
+ }
+
for_each_possible_cpu(i) {
- struct vmap_block_queue *vbq;
- struct vfree_deferred *p;
+ struct vfree_deferred *p =
+ &per_cpu(vfree_deferred, i);
- vbq = &per_cpu(vmap_block_queue, i);
- spin_lock_init(&vbq->lock);
- INIT_LIST_HEAD(&vbq->free);
- p = &per_cpu(vfree_deferred, i);
init_llist_head(&p->list);
INIT_WORK(&p->wq, delayed_vfree_work);
- xa_init(&vbq->vmap_blocks);
}
/*
@@ -5125,6 +5145,9 @@ void __init vmalloc_init(void)
vmap_init_free_space();
vmap_initialized = true;
+ if (vmap_block_queue)
+ vmap_block_queue_initialized = true;
+
vmap_node_shrinker = shrinker_alloc(0, "vmap-node");
if (!vmap_node_shrinker) {
pr_err("Failed to allocate vmap-node shrinker!\n");
<snip>
Any thoughts?
--
Uladzislau Rezki
Powered by blists - more mailing lists