lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [day] [month] [year] [list]
Date:	Fri, 10 Jun 2016 18:31:22 +0200
From:	Sebastian Andrzej Siewior <bigeasy@...utronix.de>
To:	Thomas Gleixner <tglx@...utronix.de>
Cc:	LKML <linux-kernel@...r.kernel.org>,
	linux-rt-users <linux-rt-users@...r.kernel.org>,
	Steven Rostedt <rostedt@...dmis.org>
Subject: [ANNOUNCE] 4.6.2-rt5

Dear RT folks!

I'm pleased to announce the v4.6.2-rt5 patch set. 

Changes since v4.6.2-rt4:

  - "Schedule while atomic" fixup in cgroup / memcontrol. Patch by Mike
    Galbraith.

  - Rename of "work-simple" to "swork" to align with "swait". Patch by
    Mike Galbraith.

Known issues
	- CPU hotplug got a little better but can deadlock.

The delta patch against 4.6.2-rt4 is appended below and can be found here:
 
     https://cdn.kernel.org/pub/linux/kernel/projects/rt/4.6/incr/patch-4.6.2-rt4-rt5.patch.xz

You can get this release via the git tree at:

    git://git.kernel.org/pub/scm/linux/kernel/git/rt/linux-rt-devel.git v4.6.2-rt5

The RT patch against 4.6.2 can be found here:

    https://cdn.kernel.org/pub/linux/kernel/projects/rt/4.6/patch-4.6.2-rt5.patch.xz

The split quilt queue is available at:

    https://cdn.kernel.org/pub/linux/kernel/projects/rt/4.6/patches-4.6.2-rt5.tar.xz

Sebastian
diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
index 48aec5d83900..f3bd7b8302ef 100644
--- a/arch/x86/kernel/cpu/mcheck/mce.c
+++ b/arch/x86/kernel/cpu/mcheck/mce.c
@@ -42,7 +42,7 @@
 #include <linux/irq_work.h>
 #include <linux/export.h>
 #include <linux/jiffies.h>
-#include <linux/work-simple.h>
+#include <linux/swork.h>
 
 #include <asm/processor.h>
 #include <asm/traps.h>
diff --git a/drivers/thermal/x86_pkg_temp_thermal.c b/drivers/thermal/x86_pkg_temp_thermal.c
index a774f0c8d22b..e03fa17b8670 100644
--- a/drivers/thermal/x86_pkg_temp_thermal.c
+++ b/drivers/thermal/x86_pkg_temp_thermal.c
@@ -29,7 +29,7 @@
 #include <linux/pm.h>
 #include <linux/thermal.h>
 #include <linux/debugfs.h>
-#include <linux/work-simple.h>
+#include <linux/swork.h>
 #include <asm/cpu_device_id.h>
 #include <asm/mce.h>
 
diff --git a/fs/aio.c b/fs/aio.c
index 14af01540288..dd8d6f234a0b 100644
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -40,7 +40,7 @@
 #include <linux/ramfs.h>
 #include <linux/percpu-refcount.h>
 #include <linux/mount.h>
-#include <linux/work-simple.h>
+#include <linux/swork.h>
 
 #include <asm/kmap_types.h>
 #include <asm/uaccess.h>
diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h
index c311309f7da9..56027cc01a56 100644
--- a/include/linux/cgroup-defs.h
+++ b/include/linux/cgroup-defs.h
@@ -16,7 +16,7 @@
 #include <linux/percpu-refcount.h>
 #include <linux/percpu-rwsem.h>
 #include <linux/workqueue.h>
-#include <linux/work-simple.h>
+#include <linux/swork.h>
 
 #ifdef CONFIG_CGROUPS
 
diff --git a/include/linux/swork.h b/include/linux/swork.h
new file mode 100644
index 000000000000..f175fa9a6016
--- /dev/null
+++ b/include/linux/swork.h
@@ -0,0 +1,24 @@
+#ifndef _LINUX_SWORK_H
+#define _LINUX_SWORK_H
+
+#include <linux/list.h>
+
+struct swork_event {
+	struct list_head item;
+	unsigned long flags;
+	void (*func)(struct swork_event *);
+};
+
+static inline void INIT_SWORK(struct swork_event *event,
+			      void (*func)(struct swork_event *))
+{
+	event->flags = 0;
+	event->func = func;
+}
+
+bool swork_queue(struct swork_event *sev);
+
+int swork_get(void);
+void swork_put(void);
+
+#endif /* _LINUX_SWORK_H */
diff --git a/include/linux/work-simple.h b/include/linux/work-simple.h
deleted file mode 100644
index f175fa9a6016..000000000000
--- a/include/linux/work-simple.h
+++ /dev/null
@@ -1,24 +0,0 @@
-#ifndef _LINUX_SWORK_H
-#define _LINUX_SWORK_H
-
-#include <linux/list.h>
-
-struct swork_event {
-	struct list_head item;
-	unsigned long flags;
-	void (*func)(struct swork_event *);
-};
-
-static inline void INIT_SWORK(struct swork_event *event,
-			      void (*func)(struct swork_event *))
-{
-	event->flags = 0;
-	event->func = func;
-}
-
-bool swork_queue(struct swork_event *sev);
-
-int swork_get(void);
-void swork_put(void);
-
-#endif /* _LINUX_SWORK_H */
diff --git a/kernel/sched/Makefile b/kernel/sched/Makefile
index b9e72e2fa9b7..5aaddf5838e9 100644
--- a/kernel/sched/Makefile
+++ b/kernel/sched/Makefile
@@ -17,7 +17,7 @@ endif
 
 obj-y += core.o loadavg.o clock.o cputime.o
 obj-y += idle_task.o fair.o rt.o deadline.o stop_task.o
-obj-y += wait.o swait.o work-simple.o completion.o idle.o
+obj-y += wait.o swait.o swork.o completion.o idle.o
 obj-$(CONFIG_SMP) += cpupri.o cpudeadline.o
 obj-$(CONFIG_SCHED_AUTOGROUP) += auto_group.o
 obj-$(CONFIG_SCHEDSTATS) += stats.o
diff --git a/kernel/sched/swork.c b/kernel/sched/swork.c
new file mode 100644
index 000000000000..1950f40ca725
--- /dev/null
+++ b/kernel/sched/swork.c
@@ -0,0 +1,173 @@
+/*
+ * Copyright (C) 2014 BMW Car IT GmbH, Daniel Wagner daniel.wagner@...-carit.de
+ *
+ * Provides a framework for enqueuing callbacks from irq context
+ * PREEMPT_RT_FULL safe. The callbacks are executed in kthread context.
+ */
+
+#include <linux/swait.h>
+#include <linux/swork.h>
+#include <linux/kthread.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/export.h>
+
+#define SWORK_EVENT_PENDING     (1 << 0)
+
+static DEFINE_MUTEX(worker_mutex);
+static struct sworker *glob_worker;
+
+struct sworker {
+	struct list_head events;
+	struct swait_queue_head wq;
+
+	raw_spinlock_t lock;
+
+	struct task_struct *task;
+	int refs;
+};
+
+static bool swork_readable(struct sworker *worker)
+{
+	bool r;
+
+	if (kthread_should_stop())
+		return true;
+
+	raw_spin_lock_irq(&worker->lock);
+	r = !list_empty(&worker->events);
+	raw_spin_unlock_irq(&worker->lock);
+
+	return r;
+}
+
+static int swork_kthread(void *arg)
+{
+	struct sworker *worker = arg;
+
+	for (;;) {
+		swait_event_interruptible(worker->wq,
+					swork_readable(worker));
+		if (kthread_should_stop())
+			break;
+
+		raw_spin_lock_irq(&worker->lock);
+		while (!list_empty(&worker->events)) {
+			struct swork_event *sev;
+
+			sev = list_first_entry(&worker->events,
+					struct swork_event, item);
+			list_del(&sev->item);
+			raw_spin_unlock_irq(&worker->lock);
+
+			WARN_ON_ONCE(!test_and_clear_bit(SWORK_EVENT_PENDING,
+							 &sev->flags));
+			sev->func(sev);
+			raw_spin_lock_irq(&worker->lock);
+		}
+		raw_spin_unlock_irq(&worker->lock);
+	}
+	return 0;
+}
+
+static struct sworker *swork_create(void)
+{
+	struct sworker *worker;
+
+	worker = kzalloc(sizeof(*worker), GFP_KERNEL);
+	if (!worker)
+		return ERR_PTR(-ENOMEM);
+
+	INIT_LIST_HEAD(&worker->events);
+	raw_spin_lock_init(&worker->lock);
+	init_swait_queue_head(&worker->wq);
+
+	worker->task = kthread_run(swork_kthread, worker, "kswork");
+	if (IS_ERR(worker->task)) {
+		kfree(worker);
+		return ERR_PTR(-ENOMEM);
+	}
+
+	return worker;
+}
+
+static void swork_destroy(struct sworker *worker)
+{
+	kthread_stop(worker->task);
+
+	WARN_ON(!list_empty(&worker->events));
+	kfree(worker);
+}
+
+/**
+ * swork_queue - queue swork
+ *
+ * Returns %false if @work was already on a queue, %true otherwise.
+ *
+ * The work is queued and processed on a random CPU
+ */
+bool swork_queue(struct swork_event *sev)
+{
+	unsigned long flags;
+
+	if (test_and_set_bit(SWORK_EVENT_PENDING, &sev->flags))
+		return false;
+
+	raw_spin_lock_irqsave(&glob_worker->lock, flags);
+	list_add_tail(&sev->item, &glob_worker->events);
+	raw_spin_unlock_irqrestore(&glob_worker->lock, flags);
+
+	swake_up(&glob_worker->wq);
+	return true;
+}
+EXPORT_SYMBOL_GPL(swork_queue);
+
+/**
+ * swork_get - get an instance of the sworker
+ *
+ * Returns an negative error code if the initialization if the worker did not
+ * work, %0 otherwise.
+ *
+ */
+int swork_get(void)
+{
+	struct sworker *worker;
+
+	mutex_lock(&worker_mutex);
+	if (!glob_worker) {
+		worker = swork_create();
+		if (IS_ERR(worker)) {
+			mutex_unlock(&worker_mutex);
+			return -ENOMEM;
+		}
+
+		glob_worker = worker;
+	}
+
+	glob_worker->refs++;
+	mutex_unlock(&worker_mutex);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(swork_get);
+
+/**
+ * swork_put - puts an instance of the sworker
+ *
+ * Will destroy the sworker thread. This function must not be called until all
+ * queued events have been completed.
+ */
+void swork_put(void)
+{
+	mutex_lock(&worker_mutex);
+
+	glob_worker->refs--;
+	if (glob_worker->refs > 0)
+		goto out;
+
+	swork_destroy(glob_worker);
+	glob_worker = NULL;
+out:
+	mutex_unlock(&worker_mutex);
+}
+EXPORT_SYMBOL_GPL(swork_put);
diff --git a/kernel/sched/work-simple.c b/kernel/sched/work-simple.c
deleted file mode 100644
index 9ffe40543c81..000000000000
--- a/kernel/sched/work-simple.c
+++ /dev/null
@@ -1,173 +0,0 @@
-/*
- * Copyright (C) 2014 BMW Car IT GmbH, Daniel Wagner daniel.wagner@...-carit.de
- *
- * Provides a framework for enqueuing callbacks from irq context
- * PREEMPT_RT_FULL safe. The callbacks are executed in kthread context.
- */
-
-#include <linux/swait.h>
-#include <linux/work-simple.h>
-#include <linux/kthread.h>
-#include <linux/slab.h>
-#include <linux/spinlock.h>
-#include <linux/export.h>
-
-#define SWORK_EVENT_PENDING     (1 << 0)
-
-static DEFINE_MUTEX(worker_mutex);
-static struct sworker *glob_worker;
-
-struct sworker {
-	struct list_head events;
-	struct swait_queue_head wq;
-
-	raw_spinlock_t lock;
-
-	struct task_struct *task;
-	int refs;
-};
-
-static bool swork_readable(struct sworker *worker)
-{
-	bool r;
-
-	if (kthread_should_stop())
-		return true;
-
-	raw_spin_lock_irq(&worker->lock);
-	r = !list_empty(&worker->events);
-	raw_spin_unlock_irq(&worker->lock);
-
-	return r;
-}
-
-static int swork_kthread(void *arg)
-{
-	struct sworker *worker = arg;
-
-	for (;;) {
-		swait_event_interruptible(worker->wq,
-					swork_readable(worker));
-		if (kthread_should_stop())
-			break;
-
-		raw_spin_lock_irq(&worker->lock);
-		while (!list_empty(&worker->events)) {
-			struct swork_event *sev;
-
-			sev = list_first_entry(&worker->events,
-					struct swork_event, item);
-			list_del(&sev->item);
-			raw_spin_unlock_irq(&worker->lock);
-
-			WARN_ON_ONCE(!test_and_clear_bit(SWORK_EVENT_PENDING,
-							 &sev->flags));
-			sev->func(sev);
-			raw_spin_lock_irq(&worker->lock);
-		}
-		raw_spin_unlock_irq(&worker->lock);
-	}
-	return 0;
-}
-
-static struct sworker *swork_create(void)
-{
-	struct sworker *worker;
-
-	worker = kzalloc(sizeof(*worker), GFP_KERNEL);
-	if (!worker)
-		return ERR_PTR(-ENOMEM);
-
-	INIT_LIST_HEAD(&worker->events);
-	raw_spin_lock_init(&worker->lock);
-	init_swait_queue_head(&worker->wq);
-
-	worker->task = kthread_run(swork_kthread, worker, "kswork");
-	if (IS_ERR(worker->task)) {
-		kfree(worker);
-		return ERR_PTR(-ENOMEM);
-	}
-
-	return worker;
-}
-
-static void swork_destroy(struct sworker *worker)
-{
-	kthread_stop(worker->task);
-
-	WARN_ON(!list_empty(&worker->events));
-	kfree(worker);
-}
-
-/**
- * swork_queue - queue swork
- *
- * Returns %false if @work was already on a queue, %true otherwise.
- *
- * The work is queued and processed on a random CPU
- */
-bool swork_queue(struct swork_event *sev)
-{
-	unsigned long flags;
-
-	if (test_and_set_bit(SWORK_EVENT_PENDING, &sev->flags))
-		return false;
-
-	raw_spin_lock_irqsave(&glob_worker->lock, flags);
-	list_add_tail(&sev->item, &glob_worker->events);
-	raw_spin_unlock_irqrestore(&glob_worker->lock, flags);
-
-	swake_up(&glob_worker->wq);
-	return true;
-}
-EXPORT_SYMBOL_GPL(swork_queue);
-
-/**
- * swork_get - get an instance of the sworker
- *
- * Returns an negative error code if the initialization if the worker did not
- * work, %0 otherwise.
- *
- */
-int swork_get(void)
-{
-	struct sworker *worker;
-
-	mutex_lock(&worker_mutex);
-	if (!glob_worker) {
-		worker = swork_create();
-		if (IS_ERR(worker)) {
-			mutex_unlock(&worker_mutex);
-			return -ENOMEM;
-		}
-
-		glob_worker = worker;
-	}
-
-	glob_worker->refs++;
-	mutex_unlock(&worker_mutex);
-
-	return 0;
-}
-EXPORT_SYMBOL_GPL(swork_get);
-
-/**
- * swork_put - puts an instance of the sworker
- *
- * Will destroy the sworker thread. This function must not be called until all
- * queued events have been completed.
- */
-void swork_put(void)
-{
-	mutex_lock(&worker_mutex);
-
-	glob_worker->refs--;
-	if (glob_worker->refs > 0)
-		goto out;
-
-	swork_destroy(glob_worker);
-	glob_worker = NULL;
-out:
-	mutex_unlock(&worker_mutex);
-}
-EXPORT_SYMBOL_GPL(swork_put);
diff --git a/localversion-rt b/localversion-rt
index ad3da1bcab7e..0efe7ba1930e 100644
--- a/localversion-rt
+++ b/localversion-rt
@@ -1 +1 @@
--rt4
+-rt5
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 74c500e7ed2a..bd1fab104ace 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -5557,10 +5557,10 @@ void mem_cgroup_migrate(struct page *oldpage, struct page *newpage)
 
 	commit_charge(newpage, memcg, false);
 
-	local_irq_disable();
+	local_lock_irq(event_lock);
 	mem_cgroup_charge_statistics(memcg, newpage, compound, nr_pages);
 	memcg_check_events(memcg, newpage);
-	local_irq_enable();
+	local_unlock_irq(event_lock);
 }
 
 DEFINE_STATIC_KEY_FALSE(memcg_sockets_enabled_key);

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ