lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <1464843787-4378-5-git-send-email-xinhui.pan@linux.vnet.ibm.com>
Date:	Thu,  2 Jun 2016 13:03:05 +0800
From:	Pan Xinhui <xinhui.pan@...ux.vnet.ibm.com>
To:	linux-kernel@...r.kernel.org, linuxppc-dev@...ts.ozlabs.orgv,
	irtualization@...ts.linux-foundation.org
Cc:	benh@...nel.crashing.org, paulus@...ba.org, mpe@...erman.id.au,
	peterz@...radead.org, mingo@...hat.com, paulmck@...ux.vnet.ibm.com,
	waiman.long@....com, Pan Xinhui <xinhui.pan@...ux.vnet.ibm.com>
Subject: [PATCH RESEND v4 4/6] pv-qspinlock: powerpc support pv-qspinlock

As we need let pv-qspinlock-kernel run on all environment which might
have no powervm, we should runtime choose which qspinlock version to
use.

The default pv-qspinlock use native version. pv_lock initialization
should be done in bootstage with irq disabled. And if there is PHYP,
restore pv_lock_ops callbacks to pv version.

There is also a hash table, we store cpu number into it and the key is
lock. So everytime pv_wait can know who is the lock holder by searching
the lock. Also store the lock in a per_cpu struct, and remove it when we
own the lock. pv_wait need know which lock we are spinning on.

Signed-off-by: Pan Xinhui <xinhui.pan@...ux.vnet.ibm.com>
---
 arch/powerpc/include/asm/qspinlock.h               |  15 +++
 arch/powerpc/include/asm/qspinlock_paravirt.h      |  38 +++++++
 .../powerpc/include/asm/qspinlock_paravirt_types.h |  13 +++
 arch/powerpc/kernel/paravirt.c                     | 121 +++++++++++++++++++++
 arch/powerpc/platforms/pseries/setup.c             |   5 +
 5 files changed, 192 insertions(+)
 create mode 100644 arch/powerpc/include/asm/qspinlock_paravirt.h
 create mode 100644 arch/powerpc/include/asm/qspinlock_paravirt_types.h
 create mode 100644 arch/powerpc/kernel/paravirt.c

diff --git a/arch/powerpc/include/asm/qspinlock.h b/arch/powerpc/include/asm/qspinlock.h
index 5883954..35ac65a 100644
--- a/arch/powerpc/include/asm/qspinlock.h
+++ b/arch/powerpc/include/asm/qspinlock.h
@@ -12,10 +12,25 @@ static inline void native_queued_spin_unlock(struct qspinlock *lock)
 	smp_store_release((u8 *)lock, 0);
 }
 
+#ifdef CONFIG_PARAVIRT_SPINLOCKS
+
+#include <asm/qspinlock_paravirt.h>
+
+static inline void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val)
+{
+	pv_queued_spin_lock(lock, val);
+}
+
+static inline void queued_spin_unlock(struct qspinlock *lock)
+{
+	pv_queued_spin_unlock(lock);
+}
+#else
 static inline void queued_spin_unlock(struct qspinlock *lock)
 {
 	native_queued_spin_unlock(lock);
 }
+#endif
 
 #include <asm-generic/qspinlock.h>
 
diff --git a/arch/powerpc/include/asm/qspinlock_paravirt.h b/arch/powerpc/include/asm/qspinlock_paravirt.h
new file mode 100644
index 0000000..cd17a79
--- /dev/null
+++ b/arch/powerpc/include/asm/qspinlock_paravirt.h
@@ -0,0 +1,38 @@
+#ifndef CONFIG_PARAVIRT_SPINLOCKS
+#error "do not include this file"
+#endif
+
+#ifndef _ASM_QSPINLOCK_PARAVIRT_H
+#define _ASM_QSPINLOCK_PARAVIRT_H
+
+#include  <asm/qspinlock_paravirt_types.h>
+
+extern void pv_lock_init(void);
+extern void native_queued_spin_lock_slowpath(struct qspinlock *lock, u32 val);
+extern void __pv_init_lock_hash(void);
+extern void __pv_queued_spin_lock_slowpath(struct qspinlock *lock, u32 val);
+extern void __pv_queued_spin_unlock(struct qspinlock *lock);
+
+static inline void pv_queued_spin_lock(struct qspinlock *lock, u32 val)
+{
+	CLEAR_IO_SYNC;
+	pv_lock_op.lock(lock, val);
+}
+
+static inline void pv_queued_spin_unlock(struct qspinlock *lock)
+{
+	SYNC_IO;
+	pv_lock_op.unlock(lock);
+}
+
+static inline void pv_wait(u8 *ptr, u8 val, int lockcpu)
+{
+	pv_lock_op.wait(ptr, val, lockcpu);
+}
+
+static inline void pv_kick(int cpu)
+{
+	pv_lock_op.kick(cpu);
+}
+
+#endif
diff --git a/arch/powerpc/include/asm/qspinlock_paravirt_types.h b/arch/powerpc/include/asm/qspinlock_paravirt_types.h
new file mode 100644
index 0000000..e1fdeb0
--- /dev/null
+++ b/arch/powerpc/include/asm/qspinlock_paravirt_types.h
@@ -0,0 +1,13 @@
+#ifndef _ASM_QSPINLOCK_PARAVIRT_TYPES_H
+#define _ASM_QSPINLOCK_PARAVIRT_TYPES_H
+
+struct pv_lock_ops {
+	void (*lock)(struct qspinlock *lock, u32 val);
+	void (*unlock)(struct qspinlock *lock);
+	void (*wait)(u8 *ptr, u8 val, int cpu);
+	void (*kick)(int cpu);
+};
+
+extern struct pv_lock_ops pv_lock_op;
+
+#endif
diff --git a/arch/powerpc/kernel/paravirt.c b/arch/powerpc/kernel/paravirt.c
new file mode 100644
index 0000000..2e87fa6
--- /dev/null
+++ b/arch/powerpc/kernel/paravirt.c
@@ -0,0 +1,121 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/spinlock.h>
+#include <linux/smp.h>
+#include <linux/hash.h>
+#include <linux/bootmem.h>
+
+#define NUM_LOCK_CPU_ENTRY_SHIFT 16
+#define NUM_LOCK_CPU_ENTRY (1 << NUM_LOCK_CPU_ENTRY_SHIFT)
+#define NUM_LOCKS_PER_CPU 4
+
+static u16 *hash_lock_cpu_ptr;
+
+struct locks_on_cpu {
+	void *l[NUM_LOCKS_PER_CPU];
+	int count;
+};
+
+static DEFINE_PER_CPU(struct locks_on_cpu, node);
+
+static u16 *hash(void *l)
+{
+	int val = hash_ptr(l, NUM_LOCK_CPU_ENTRY_SHIFT);
+
+	return &hash_lock_cpu_ptr[val];
+}
+
+static void __init init_hash(void)
+{
+	int size = NUM_LOCK_CPU_ENTRY * sizeof(*hash_lock_cpu_ptr);
+
+	hash_lock_cpu_ptr = memblock_virt_alloc(size, 0);
+	memset(hash_lock_cpu_ptr, 0, size);
+}
+
+#define lock_get_holder(l)	\
+		((int)*hash(l) - 1)
+
+#define lock_set_holder(l)	\
+		(*hash(l) = raw_smp_processor_id() + 1)
+
+static void *this_cpu_lock(void)
+{
+	struct locks_on_cpu *this_node = this_cpu_ptr(&node);
+	int i = this_node->count - 1;
+
+	return this_node->l[i];
+}
+
+static void cpu_save_lock(void *l)
+{
+	struct locks_on_cpu *this_node = this_cpu_ptr(&node);
+	int i = this_node->count++;
+
+	this_node->l[i] = l;
+}
+
+static void cpu_remove_lock(void *l)
+{
+	this_cpu_dec(node.count);
+}
+
+static void __native_queued_spin_unlock(struct qspinlock *lock)
+{
+	native_queued_spin_unlock(lock);
+}
+
+static void __pv_lock(struct qspinlock *lock, u32 val)
+{
+	cpu_save_lock(lock);
+	__pv_queued_spin_lock_slowpath(lock, val);
+	cpu_remove_lock(lock);
+	lock_set_holder(lock);
+}
+
+static void __pv_unlock(struct qspinlock *lock)
+{
+	__pv_queued_spin_unlock(lock);
+}
+
+static void __pv_wait(u8 *ptr, u8 val, int cpu)
+{
+	void *l = this_cpu_lock();
+
+	HMT_low();
+	while (READ_ONCE(*ptr) == val) {
+		cpu = lock_get_holder(l);
+		__spin_yield_cpu(cpu);
+	}
+	HMT_medium();
+}
+
+static void __pv_kick(int cpu)
+{
+	__spin_wake_cpu(cpu);
+}
+
+struct pv_lock_ops pv_lock_op = {
+	.lock = native_queued_spin_lock_slowpath,
+	.unlock = __native_queued_spin_unlock,
+	.wait = NULL,
+	.kick = NULL,
+};
+EXPORT_SYMBOL(pv_lock_op);
+
+void __init pv_lock_init(void)
+{
+	if (SHARED_PROCESSOR) {
+		init_hash();
+		__pv_init_lock_hash();
+		pv_lock_op.lock = __pv_lock;
+		pv_lock_op.unlock = __pv_unlock;
+		pv_lock_op.wait = __pv_wait;
+		pv_lock_op.kick = __pv_kick;
+	}
+}
diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c
index 6e944fc..c9f056e 100644
--- a/arch/powerpc/platforms/pseries/setup.c
+++ b/arch/powerpc/platforms/pseries/setup.c
@@ -547,6 +547,11 @@ static void __init pSeries_setup_arch(void)
 				"%ld\n", rc);
 		}
 	}
+
+#ifdef CONFIG_PARAVIRT_SPINLOCKS
+	pv_lock_init();
+#endif
+
 }
 
 static int __init pSeries_init_panel(void)
-- 
1.9.1

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ