lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [day] [month] [year] [list]
Date:	Mon, 28 Jan 2013 23:29:17 +1100
From:	Stephen Rothwell <sfr@...b.auug.org.au>
To:	Andrew Morton <akpm@...ux-foundation.org>
Cc:	linux-next@...r.kernel.org, linux-kernel@...r.kernel.org,
	Shaohua Li <shli@...nel.org>,
	Wang YanQing <udknight@...il.com>,
	Thomas Gleixner <tglx@...utronix.de>,
	Ingo Molnar <mingo@...e.hu>, "H. Peter Anvin" <hpa@...or.com>,
	Peter Zijlstra <peterz@...radead.org>
Subject: linux-next: manual merge of the akpm tree with the tip tree

Hi Andrew,

Today's linux-next merge of the akpm tree got a conflict in kernel/smp.c between commit c7b798525b50 ("smp: Fix SMP function call empty cpu mask race") from the tip tree and commit "smp: make smp_call_function_many() use logic similar to smp_call_function_single()" from the akpm tree.

I fixed it up (maybe - see below) and can carry the fix as necessary (no
action is required).

-- 
Cheers,
Stephen Rothwell                    sfr@...b.auug.org.au

diff --cc kernel/smp.c
index 93e576e,51a81b0..0000000
--- a/kernel/smp.c
+++ b/kernel/smp.c
@@@ -30,10 -21,8 +21,9 @@@ enum 
  };
  
  struct call_function_data {
- 	struct call_single_data	csd;
- 	atomic_t		refs;
+ 	struct call_single_data	__percpu *csd;
  	cpumask_var_t		cpumask;
 +	cpumask_var_t		cpumask_ipi;
  };
  
  static DEFINE_PER_CPU_SHARED_ALIGNED(struct call_function_data, cfd_data);
@@@ -482,85 -397,39 +398,45 @@@ void smp_call_function_many(const struc
  	}
  
  	data = &__get_cpu_var(cfd_data);
- 	csd_lock(&data->csd);
- 
- 	/* This BUG_ON verifies our reuse assertions and can be removed */
- 	BUG_ON(atomic_read(&data->refs) || !cpumask_empty(data->cpumask));
- 
- 	/*
- 	 * The global call function queue list add and delete are protected
- 	 * by a lock, but the list is traversed without any lock, relying
- 	 * on the rcu list add and delete to allow safe concurrent traversal.
- 	 * We reuse the call function data without waiting for any grace
- 	 * period after some other cpu removes it from the global queue.
- 	 * This means a cpu might find our data block as it is being
- 	 * filled out.
- 	 *
- 	 * We hold off the interrupt handler on the other cpu by
- 	 * ordering our writes to the cpu mask vs our setting of the
- 	 * refs counter.  We assert only the cpu owning the data block
- 	 * will set a bit in cpumask, and each bit will only be cleared
- 	 * by the subject cpu.  Each cpu must first find its bit is
- 	 * set and then check that refs is set indicating the element is
- 	 * ready to be processed, otherwise it must skip the entry.
- 	 *
- 	 * On the previous iteration refs was set to 0 by another cpu.
- 	 * To avoid the use of transitivity, set the counter to 0 here
- 	 * so the wmb will pair with the rmb in the interrupt handler.
- 	 */
- 	atomic_set(&data->refs, 0);	/* convert 3rd to 1st party write */
  
- 	data->csd.func = func;
- 	data->csd.info = info;
- 
- 	/* Ensure 0 refs is visible before mask.  Also orders func and info */
- 	smp_wmb();
- 
- 	/* We rely on the "and" being processed before the store */
  	cpumask_and(data->cpumask, mask, cpu_online_mask);
  	cpumask_clear_cpu(this_cpu, data->cpumask);
- 	refs = cpumask_weight(data->cpumask);
  
  	/* Some callers race with other cpus changing the passed mask */
- 	if (unlikely(!refs)) {
- 		csd_unlock(&data->csd);
+ 	if (unlikely(!cpumask_weight(data->cpumask)))
  		return;
- 	}
  
 +	/*
 +	 * After we put an entry into the list, data->cpumask
 +	 * may be cleared again when another CPU sends another IPI for
 +	 * a SMP function call, so data->cpumask will be zero.
 +	 */
 +	cpumask_copy(data->cpumask_ipi, data->cpumask);
- 	raw_spin_lock_irqsave(&call_function.lock, flags);
- 	/*
- 	 * Place entry at the _HEAD_ of the list, so that any cpu still
- 	 * observing the entry in generic_smp_call_function_interrupt()
- 	 * will not miss any other list entries:
- 	 */
- 	list_add_rcu(&data->csd.list, &call_function.queue);
- 	/*
- 	 * We rely on the wmb() in list_add_rcu to complete our writes
- 	 * to the cpumask before this write to refs, which indicates
- 	 * data is on the list and is ready to be processed.
- 	 */
- 	atomic_set(&data->refs, refs);
- 	raw_spin_unlock_irqrestore(&call_function.lock, flags);
- 
- 	/*
- 	 * Make the list addition visible before sending the ipi.
- 	 * (IPIs must obey or appear to obey normal Linux cache
- 	 * coherency rules -- see comment in generic_exec_single).
- 	 */
- 	smp_mb();
+ 	for_each_cpu(cpu, data->cpumask) {
+ 		struct call_single_data *csd = per_cpu_ptr(data->csd, cpu);
+ 		struct call_single_queue *dst =
+ 					&per_cpu(call_single_queue, cpu);
+ 		unsigned long flags;
+ 
+ 		csd_lock(csd);
+ 		csd->func = func;
+ 		csd->info = info;
+ 
+ 		raw_spin_lock_irqsave(&dst->lock, flags);
+ 		list_add_tail(&csd->list, &dst->list);
+ 		raw_spin_unlock_irqrestore(&dst->lock, flags);
+ 	}
  
  	/* Send a message to all CPUs in the map */
 -	arch_send_call_function_ipi_mask(data->cpumask);
 +	arch_send_call_function_ipi_mask(data->cpumask_ipi);
  
- 	/* Optionally wait for the CPUs to complete */
- 	if (wait)
- 		csd_lock_wait(&data->csd);
+ 	if (wait) {
+ 		for_each_cpu(cpu, data->cpumask) {
+ 			struct call_single_data *csd =
+ 					per_cpu_ptr(data->csd, cpu);
+ 			csd_lock_wait(csd);
+ 		}
+ 	}
  }
  EXPORT_SYMBOL(smp_call_function_many);
  

Content of type "application/pgp-signature" skipped

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ