[<prev] [next>] [<thread-prev] [day] [month] [year] [list]
Message-Id: <159059490324591@kroah.com>
Date: Wed, 27 May 2020 17:55:27 +0200
From: Greg Kroah-Hartman <gregkh@...uxfoundation.org>
To: linux-kernel@...r.kernel.org, akpm@...ux-foundation.org,
torvalds@...ux-foundation.org, stable@...r.kernel.org
Cc: lwn@....net, jslaby@...e.cz,
Greg Kroah-Hartman <gregkh@...uxfoundation.org>
Subject: Re: Linux 4.4.225
diff --git a/Documentation/networking/l2tp.txt b/Documentation/networking/l2tp.txt
index 4650a00ed012..9bc271cdc9a8 100644
--- a/Documentation/networking/l2tp.txt
+++ b/Documentation/networking/l2tp.txt
@@ -177,10 +177,10 @@ setsockopt on the PPPoX socket to set a debug mask.
The following debug mask bits are available:
-PPPOL2TP_MSG_DEBUG verbose debug (if compiled in)
-PPPOL2TP_MSG_CONTROL userspace - kernel interface
-PPPOL2TP_MSG_SEQ sequence numbers handling
-PPPOL2TP_MSG_DATA data packets
+L2TP_MSG_DEBUG verbose debug (if compiled in)
+L2TP_MSG_CONTROL userspace - kernel interface
+L2TP_MSG_SEQ sequence numbers handling
+L2TP_MSG_DATA data packets
If enabled, files under a l2tp debugfs directory can be used to dump
kernel state about L2TP tunnels and sessions. To access it, the
diff --git a/Makefile b/Makefile
index f381af71fa32..4e5362707405 100644
--- a/Makefile
+++ b/Makefile
@@ -1,6 +1,6 @@
VERSION = 4
PATCHLEVEL = 4
-SUBLEVEL = 224
+SUBLEVEL = 225
EXTRAVERSION =
NAME = Blurry Fish Butt
diff --git a/arch/arm/include/asm/futex.h b/arch/arm/include/asm/futex.h
index cc414382dab4..561b2ba6bc28 100644
--- a/arch/arm/include/asm/futex.h
+++ b/arch/arm/include/asm/futex.h
@@ -162,8 +162,13 @@ arch_futex_atomic_op_inuser(int op, int oparg, int *oval, u32 __user *uaddr)
preempt_enable();
#endif
- if (!ret)
- *oval = oldval;
+ /*
+ * Store unconditionally. If ret != 0 the extra store is the least
+ * of the worries but GCC cannot figure out that __futex_atomic_op()
+ * is either setting ret to -EFAULT or storing the old value in
+ * oldval which results in a uninitialized warning at the call site.
+ */
+ *oval = oldval;
return ret;
}
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index e1807296a1a0..33d2b5948d7f 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -319,6 +319,7 @@
#define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_7349 0x7349
#define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_73F7 0x73f7
#define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_A001 0xa001
+#define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_C002 0xc002
#define USB_VENDOR_ID_ELAN 0x04f3
diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c
index 9de379c1b3fd..56c4a81d3ea2 100644
--- a/drivers/hid/hid-multitouch.c
+++ b/drivers/hid/hid-multitouch.c
@@ -1300,6 +1300,9 @@ static const struct hid_device_id mt_devices[] = {
{ .driver_data = MT_CLS_EGALAX_SERIAL,
MT_USB_DEVICE(USB_VENDOR_ID_DWAV,
USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_A001) },
+ { .driver_data = MT_CLS_EGALAX,
+ MT_USB_DEVICE(USB_VENDOR_ID_DWAV,
+ USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_C002) },
/* Elitegroup panel */
{ .driver_data = MT_CLS_SERIAL,
diff --git a/drivers/i2c/i2c-dev.c b/drivers/i2c/i2c-dev.c
index e56b774e7cf9..7584f292e2fd 100644
--- a/drivers/i2c/i2c-dev.c
+++ b/drivers/i2c/i2c-dev.c
@@ -22,6 +22,7 @@
/* The I2C_RDWR ioctl code is written by Kolja Waschk <waschk@...os.de> */
+#include <linux/cdev.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/device.h>
@@ -46,10 +47,11 @@
struct i2c_dev {
struct list_head list;
struct i2c_adapter *adap;
- struct device *dev;
+ struct device dev;
+ struct cdev cdev;
};
-#define I2C_MINORS 256
+#define I2C_MINORS MINORMASK
static LIST_HEAD(i2c_dev_list);
static DEFINE_SPINLOCK(i2c_dev_list_lock);
@@ -89,12 +91,14 @@ static struct i2c_dev *get_free_i2c_dev(struct i2c_adapter *adap)
return i2c_dev;
}
-static void return_i2c_dev(struct i2c_dev *i2c_dev)
+static void put_i2c_dev(struct i2c_dev *i2c_dev, bool del_cdev)
{
spin_lock(&i2c_dev_list_lock);
list_del(&i2c_dev->list);
spin_unlock(&i2c_dev_list_lock);
- kfree(i2c_dev);
+ if (del_cdev)
+ cdev_device_del(&i2c_dev->cdev, &i2c_dev->dev);
+ put_device(&i2c_dev->dev);
}
static ssize_t name_show(struct device *dev,
@@ -490,13 +494,8 @@ static int i2cdev_open(struct inode *inode, struct file *file)
unsigned int minor = iminor(inode);
struct i2c_client *client;
struct i2c_adapter *adap;
- struct i2c_dev *i2c_dev;
-
- i2c_dev = i2c_dev_get_by_minor(minor);
- if (!i2c_dev)
- return -ENODEV;
- adap = i2c_get_adapter(i2c_dev->adap->nr);
+ adap = i2c_get_adapter(minor);
if (!adap)
return -ENODEV;
@@ -545,6 +544,14 @@ static const struct file_operations i2cdev_fops = {
static struct class *i2c_dev_class;
+static void i2cdev_dev_release(struct device *dev)
+{
+ struct i2c_dev *i2c_dev;
+
+ i2c_dev = container_of(dev, struct i2c_dev, dev);
+ kfree(i2c_dev);
+}
+
static int i2cdev_attach_adapter(struct device *dev, void *dummy)
{
struct i2c_adapter *adap;
@@ -559,21 +566,25 @@ static int i2cdev_attach_adapter(struct device *dev, void *dummy)
if (IS_ERR(i2c_dev))
return PTR_ERR(i2c_dev);
- /* register this i2c device with the driver core */
- i2c_dev->dev = device_create(i2c_dev_class, &adap->dev,
- MKDEV(I2C_MAJOR, adap->nr), NULL,
- "i2c-%d", adap->nr);
- if (IS_ERR(i2c_dev->dev)) {
- res = PTR_ERR(i2c_dev->dev);
- goto error;
+ cdev_init(&i2c_dev->cdev, &i2cdev_fops);
+ i2c_dev->cdev.owner = THIS_MODULE;
+
+ device_initialize(&i2c_dev->dev);
+ i2c_dev->dev.devt = MKDEV(I2C_MAJOR, adap->nr);
+ i2c_dev->dev.class = i2c_dev_class;
+ i2c_dev->dev.parent = &adap->dev;
+ i2c_dev->dev.release = i2cdev_dev_release;
+ dev_set_name(&i2c_dev->dev, "i2c-%d", adap->nr);
+
+ res = cdev_device_add(&i2c_dev->cdev, &i2c_dev->dev);
+ if (res) {
+ put_i2c_dev(i2c_dev, false);
+ return res;
}
pr_debug("i2c-dev: adapter [%s] registered as minor %d\n",
adap->name, adap->nr);
return 0;
-error:
- return_i2c_dev(i2c_dev);
- return res;
}
static int i2cdev_detach_adapter(struct device *dev, void *dummy)
@@ -589,8 +600,7 @@ static int i2cdev_detach_adapter(struct device *dev, void *dummy)
if (!i2c_dev) /* attach_adapter must have failed */
return 0;
- return_i2c_dev(i2c_dev);
- device_destroy(i2c_dev_class, MKDEV(I2C_MAJOR, adap->nr));
+ put_i2c_dev(i2c_dev, true);
pr_debug("i2c-dev: adapter [%s] unregistered\n", adap->name);
return 0;
@@ -627,7 +637,7 @@ static int __init i2c_dev_init(void)
printk(KERN_INFO "i2c /dev entries driver\n");
- res = register_chrdev(I2C_MAJOR, "i2c", &i2cdev_fops);
+ res = register_chrdev_region(MKDEV(I2C_MAJOR, 0), I2C_MINORS, "i2c");
if (res)
goto out;
@@ -651,7 +661,7 @@ static int __init i2c_dev_init(void)
out_unreg_class:
class_destroy(i2c_dev_class);
out_unreg_chrdev:
- unregister_chrdev(I2C_MAJOR, "i2c");
+ unregister_chrdev_region(MKDEV(I2C_MAJOR, 0), I2C_MINORS);
out:
printk(KERN_ERR "%s: Driver Initialisation failed\n", __FILE__);
return res;
@@ -662,7 +672,7 @@ static void __exit i2c_dev_exit(void)
bus_unregister_notifier(&i2c_bus_type, &i2cdev_notifier);
i2c_for_each_dev(NULL, i2cdev_detach_adapter);
class_destroy(i2c_dev_class);
- unregister_chrdev(I2C_MAJOR, "i2c");
+ unregister_chrdev_region(MKDEV(I2C_MAJOR, 0), I2C_MINORS);
}
MODULE_AUTHOR("Frodo Looijaard <frodol@....nl> and "
diff --git a/drivers/media/media-device.c b/drivers/media/media-device.c
index 7b39440192d6..0ca9506f4654 100644
--- a/drivers/media/media-device.c
+++ b/drivers/media/media-device.c
@@ -24,6 +24,7 @@
#include <linux/export.h>
#include <linux/ioctl.h>
#include <linux/media.h>
+#include <linux/slab.h>
#include <linux/types.h>
#include <media/media-device.h>
@@ -234,7 +235,7 @@ static long media_device_ioctl(struct file *filp, unsigned int cmd,
unsigned long arg)
{
struct media_devnode *devnode = media_devnode_data(filp);
- struct media_device *dev = to_media_device(devnode);
+ struct media_device *dev = devnode->media_dev;
long ret;
switch (cmd) {
@@ -303,7 +304,7 @@ static long media_device_compat_ioctl(struct file *filp, unsigned int cmd,
unsigned long arg)
{
struct media_devnode *devnode = media_devnode_data(filp);
- struct media_device *dev = to_media_device(devnode);
+ struct media_device *dev = devnode->media_dev;
long ret;
switch (cmd) {
@@ -344,7 +345,8 @@ static const struct media_file_operations media_device_fops = {
static ssize_t show_model(struct device *cd,
struct device_attribute *attr, char *buf)
{
- struct media_device *mdev = to_media_device(to_media_devnode(cd));
+ struct media_devnode *devnode = to_media_devnode(cd);
+ struct media_device *mdev = devnode->media_dev;
return sprintf(buf, "%.*s\n", (int)sizeof(mdev->model), mdev->model);
}
@@ -372,6 +374,7 @@ static void media_device_release(struct media_devnode *mdev)
int __must_check __media_device_register(struct media_device *mdev,
struct module *owner)
{
+ struct media_devnode *devnode;
int ret;
if (WARN_ON(mdev->dev == NULL || mdev->model[0] == 0))
@@ -382,17 +385,28 @@ int __must_check __media_device_register(struct media_device *mdev,
spin_lock_init(&mdev->lock);
mutex_init(&mdev->graph_mutex);
+ devnode = kzalloc(sizeof(*devnode), GFP_KERNEL);
+ if (!devnode)
+ return -ENOMEM;
+
/* Register the device node. */
- mdev->devnode.fops = &media_device_fops;
- mdev->devnode.parent = mdev->dev;
- mdev->devnode.release = media_device_release;
- ret = media_devnode_register(&mdev->devnode, owner);
- if (ret < 0)
+ mdev->devnode = devnode;
+ devnode->fops = &media_device_fops;
+ devnode->parent = mdev->dev;
+ devnode->release = media_device_release;
+ ret = media_devnode_register(mdev, devnode, owner);
+ if (ret < 0) {
+ /* devnode free is handled in media_devnode_*() */
+ mdev->devnode = NULL;
return ret;
+ }
- ret = device_create_file(&mdev->devnode.dev, &dev_attr_model);
+ ret = device_create_file(&devnode->dev, &dev_attr_model);
if (ret < 0) {
- media_devnode_unregister(&mdev->devnode);
+ /* devnode free is handled in media_devnode_*() */
+ mdev->devnode = NULL;
+ media_devnode_unregister_prepare(devnode);
+ media_devnode_unregister(devnode);
return ret;
}
@@ -410,11 +424,16 @@ void media_device_unregister(struct media_device *mdev)
struct media_entity *entity;
struct media_entity *next;
+ /* Clear the devnode register bit to avoid races with media dev open */
+ media_devnode_unregister_prepare(mdev->devnode);
+
list_for_each_entry_safe(entity, next, &mdev->entities, list)
media_device_unregister_entity(entity);
- device_remove_file(&mdev->devnode.dev, &dev_attr_model);
- media_devnode_unregister(&mdev->devnode);
+ device_remove_file(&mdev->devnode->dev, &dev_attr_model);
+ media_devnode_unregister(mdev->devnode);
+ /* devnode free is handled in media_devnode_*() */
+ mdev->devnode = NULL;
}
EXPORT_SYMBOL_GPL(media_device_unregister);
diff --git a/drivers/media/media-devnode.c b/drivers/media/media-devnode.c
index ebf9626e5ae5..e887120d19aa 100644
--- a/drivers/media/media-devnode.c
+++ b/drivers/media/media-devnode.c
@@ -44,6 +44,7 @@
#include <linux/uaccess.h>
#include <media/media-devnode.h>
+#include <media/media-device.h>
#define MEDIA_NUM_DEVICES 256
#define MEDIA_NAME "media"
@@ -59,21 +60,19 @@ static DECLARE_BITMAP(media_devnode_nums, MEDIA_NUM_DEVICES);
/* Called when the last user of the media device exits. */
static void media_devnode_release(struct device *cd)
{
- struct media_devnode *mdev = to_media_devnode(cd);
+ struct media_devnode *devnode = to_media_devnode(cd);
mutex_lock(&media_devnode_lock);
-
- /* Delete the cdev on this minor as well */
- cdev_del(&mdev->cdev);
-
/* Mark device node number as free */
- clear_bit(mdev->minor, media_devnode_nums);
-
+ clear_bit(devnode->minor, media_devnode_nums);
mutex_unlock(&media_devnode_lock);
/* Release media_devnode and perform other cleanups as needed. */
- if (mdev->release)
- mdev->release(mdev);
+ if (devnode->release)
+ devnode->release(devnode);
+
+ kfree(devnode);
+ pr_debug("%s: Media Devnode Deallocated\n", __func__);
}
static struct bus_type media_bus_type = {
@@ -83,37 +82,37 @@ static struct bus_type media_bus_type = {
static ssize_t media_read(struct file *filp, char __user *buf,
size_t sz, loff_t *off)
{
- struct media_devnode *mdev = media_devnode_data(filp);
+ struct media_devnode *devnode = media_devnode_data(filp);
- if (!mdev->fops->read)
+ if (!devnode->fops->read)
return -EINVAL;
- if (!media_devnode_is_registered(mdev))
+ if (!media_devnode_is_registered(devnode))
return -EIO;
- return mdev->fops->read(filp, buf, sz, off);
+ return devnode->fops->read(filp, buf, sz, off);
}
static ssize_t media_write(struct file *filp, const char __user *buf,
size_t sz, loff_t *off)
{
- struct media_devnode *mdev = media_devnode_data(filp);
+ struct media_devnode *devnode = media_devnode_data(filp);
- if (!mdev->fops->write)
+ if (!devnode->fops->write)
return -EINVAL;
- if (!media_devnode_is_registered(mdev))
+ if (!media_devnode_is_registered(devnode))
return -EIO;
- return mdev->fops->write(filp, buf, sz, off);
+ return devnode->fops->write(filp, buf, sz, off);
}
static unsigned int media_poll(struct file *filp,
struct poll_table_struct *poll)
{
- struct media_devnode *mdev = media_devnode_data(filp);
+ struct media_devnode *devnode = media_devnode_data(filp);
- if (!media_devnode_is_registered(mdev))
+ if (!media_devnode_is_registered(devnode))
return POLLERR | POLLHUP;
- if (!mdev->fops->poll)
+ if (!devnode->fops->poll)
return DEFAULT_POLLMASK;
- return mdev->fops->poll(filp, poll);
+ return devnode->fops->poll(filp, poll);
}
static long
@@ -121,12 +120,12 @@ __media_ioctl(struct file *filp, unsigned int cmd, unsigned long arg,
long (*ioctl_func)(struct file *filp, unsigned int cmd,
unsigned long arg))
{
- struct media_devnode *mdev = media_devnode_data(filp);
+ struct media_devnode *devnode = media_devnode_data(filp);
if (!ioctl_func)
return -ENOTTY;
- if (!media_devnode_is_registered(mdev))
+ if (!media_devnode_is_registered(devnode))
return -EIO;
return ioctl_func(filp, cmd, arg);
@@ -134,9 +133,9 @@ __media_ioctl(struct file *filp, unsigned int cmd, unsigned long arg,
static long media_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
- struct media_devnode *mdev = media_devnode_data(filp);
+ struct media_devnode *devnode = media_devnode_data(filp);
- return __media_ioctl(filp, cmd, arg, mdev->fops->ioctl);
+ return __media_ioctl(filp, cmd, arg, devnode->fops->ioctl);
}
#ifdef CONFIG_COMPAT
@@ -144,9 +143,9 @@ static long media_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
static long media_compat_ioctl(struct file *filp, unsigned int cmd,
unsigned long arg)
{
- struct media_devnode *mdev = media_devnode_data(filp);
+ struct media_devnode *devnode = media_devnode_data(filp);
- return __media_ioctl(filp, cmd, arg, mdev->fops->compat_ioctl);
+ return __media_ioctl(filp, cmd, arg, devnode->fops->compat_ioctl);
}
#endif /* CONFIG_COMPAT */
@@ -154,7 +153,7 @@ static long media_compat_ioctl(struct file *filp, unsigned int cmd,
/* Override for the open function */
static int media_open(struct inode *inode, struct file *filp)
{
- struct media_devnode *mdev;
+ struct media_devnode *devnode;
int ret;
/* Check if the media device is available. This needs to be done with
@@ -164,23 +163,24 @@ static int media_open(struct inode *inode, struct file *filp)
* a crash.
*/
mutex_lock(&media_devnode_lock);
- mdev = container_of(inode->i_cdev, struct media_devnode, cdev);
+ devnode = container_of(inode->i_cdev, struct media_devnode, cdev);
/* return ENXIO if the media device has been removed
already or if it is not registered anymore. */
- if (!media_devnode_is_registered(mdev)) {
+ if (!media_devnode_is_registered(devnode)) {
mutex_unlock(&media_devnode_lock);
return -ENXIO;
}
/* and increase the device refcount */
- get_device(&mdev->dev);
+ get_device(&devnode->dev);
mutex_unlock(&media_devnode_lock);
- filp->private_data = mdev;
+ filp->private_data = devnode;
- if (mdev->fops->open) {
- ret = mdev->fops->open(filp);
+ if (devnode->fops->open) {
+ ret = devnode->fops->open(filp);
if (ret) {
- put_device(&mdev->dev);
+ put_device(&devnode->dev);
+ filp->private_data = NULL;
return ret;
}
}
@@ -191,15 +191,18 @@ static int media_open(struct inode *inode, struct file *filp)
/* Override for the release function */
static int media_release(struct inode *inode, struct file *filp)
{
- struct media_devnode *mdev = media_devnode_data(filp);
+ struct media_devnode *devnode = media_devnode_data(filp);
+
+ if (devnode->fops->release)
+ devnode->fops->release(filp);
- if (mdev->fops->release)
- mdev->fops->release(filp);
+ filp->private_data = NULL;
/* decrease the refcount unconditionally since the release()
return value is ignored. */
- put_device(&mdev->dev);
- filp->private_data = NULL;
+ put_device(&devnode->dev);
+
+ pr_debug("%s: Media Release\n", __func__);
return 0;
}
@@ -219,7 +222,8 @@ static const struct file_operations media_devnode_fops = {
/**
* media_devnode_register - register a media device node
- * @mdev: media device node structure we want to register
+ * @media_dev: struct media_device we want to register a device node
+ * @devnode: media device node structure we want to register
*
* The registration code assigns minor numbers and registers the new device node
* with the kernel. An error is returned if no free minor number can be found,
@@ -231,7 +235,8 @@ static const struct file_operations media_devnode_fops = {
* the media_devnode structure is *not* called, so the caller is responsible for
* freeing any data.
*/
-int __must_check media_devnode_register(struct media_devnode *mdev,
+int __must_check media_devnode_register(struct media_device *mdev,
+ struct media_devnode *devnode,
struct module *owner)
{
int minor;
@@ -243,68 +248,89 @@ int __must_check media_devnode_register(struct media_devnode *mdev,
if (minor == MEDIA_NUM_DEVICES) {
mutex_unlock(&media_devnode_lock);
pr_err("could not get a free minor\n");
+ kfree(devnode);
return -ENFILE;
}
set_bit(minor, media_devnode_nums);
mutex_unlock(&media_devnode_lock);
- mdev->minor = minor;
+ devnode->minor = minor;
+ devnode->media_dev = mdev;
+
+ /* Part 1: Initialize dev now to use dev.kobj for cdev.kobj.parent */
+ devnode->dev.bus = &media_bus_type;
+ devnode->dev.devt = MKDEV(MAJOR(media_dev_t), devnode->minor);
+ devnode->dev.release = media_devnode_release;
+ if (devnode->parent)
+ devnode->dev.parent = devnode->parent;
+ dev_set_name(&devnode->dev, "media%d", devnode->minor);
+ device_initialize(&devnode->dev);
/* Part 2: Initialize and register the character device */
- cdev_init(&mdev->cdev, &media_devnode_fops);
- mdev->cdev.owner = owner;
+ cdev_init(&devnode->cdev, &media_devnode_fops);
+ devnode->cdev.owner = owner;
+ devnode->cdev.kobj.parent = &devnode->dev.kobj;
- ret = cdev_add(&mdev->cdev, MKDEV(MAJOR(media_dev_t), mdev->minor), 1);
+ ret = cdev_add(&devnode->cdev, MKDEV(MAJOR(media_dev_t), devnode->minor), 1);
if (ret < 0) {
pr_err("%s: cdev_add failed\n", __func__);
- goto error;
+ goto cdev_add_error;
}
- /* Part 3: Register the media device */
- mdev->dev.bus = &media_bus_type;
- mdev->dev.devt = MKDEV(MAJOR(media_dev_t), mdev->minor);
- mdev->dev.release = media_devnode_release;
- if (mdev->parent)
- mdev->dev.parent = mdev->parent;
- dev_set_name(&mdev->dev, "media%d", mdev->minor);
- ret = device_register(&mdev->dev);
+ /* Part 3: Add the media device */
+ ret = device_add(&devnode->dev);
if (ret < 0) {
- pr_err("%s: device_register failed\n", __func__);
- goto error;
+ pr_err("%s: device_add failed\n", __func__);
+ goto device_add_error;
}
/* Part 4: Activate this minor. The char device can now be used. */
- set_bit(MEDIA_FLAG_REGISTERED, &mdev->flags);
+ set_bit(MEDIA_FLAG_REGISTERED, &devnode->flags);
return 0;
-error:
- cdev_del(&mdev->cdev);
- clear_bit(mdev->minor, media_devnode_nums);
+device_add_error:
+ cdev_del(&devnode->cdev);
+cdev_add_error:
+ mutex_lock(&media_devnode_lock);
+ clear_bit(devnode->minor, media_devnode_nums);
+ devnode->media_dev = NULL;
+ mutex_unlock(&media_devnode_lock);
+
+ put_device(&devnode->dev);
return ret;
}
+void media_devnode_unregister_prepare(struct media_devnode *devnode)
+{
+ /* Check if devnode was ever registered at all */
+ if (!media_devnode_is_registered(devnode))
+ return;
+
+ mutex_lock(&media_devnode_lock);
+ clear_bit(MEDIA_FLAG_REGISTERED, &devnode->flags);
+ mutex_unlock(&media_devnode_lock);
+}
+
/**
* media_devnode_unregister - unregister a media device node
- * @mdev: the device node to unregister
+ * @devnode: the device node to unregister
*
* This unregisters the passed device. Future open calls will be met with
* errors.
*
- * This function can safely be called if the device node has never been
- * registered or has already been unregistered.
+ * Should be called after media_devnode_unregister_prepare()
*/
-void media_devnode_unregister(struct media_devnode *mdev)
+void media_devnode_unregister(struct media_devnode *devnode)
{
- /* Check if mdev was ever registered at all */
- if (!media_devnode_is_registered(mdev))
- return;
-
mutex_lock(&media_devnode_lock);
- clear_bit(MEDIA_FLAG_REGISTERED, &mdev->flags);
+ /* Delete the cdev on this minor as well */
+ cdev_del(&devnode->cdev);
mutex_unlock(&media_devnode_lock);
- device_unregister(&mdev->dev);
+ device_del(&devnode->dev);
+ devnode->media_dev = NULL;
+ put_device(&devnode->dev);
}
/*
diff --git a/drivers/media/usb/uvc/uvc_driver.c b/drivers/media/usb/uvc/uvc_driver.c
index 9cd0268b2767..f353ab569b8e 100644
--- a/drivers/media/usb/uvc/uvc_driver.c
+++ b/drivers/media/usb/uvc/uvc_driver.c
@@ -1800,7 +1800,7 @@ static void uvc_delete(struct uvc_device *dev)
if (dev->vdev.dev)
v4l2_device_unregister(&dev->vdev);
#ifdef CONFIG_MEDIA_CONTROLLER
- if (media_devnode_is_registered(&dev->mdev.devnode))
+ if (media_devnode_is_registered(dev->mdev.devnode))
media_device_unregister(&dev->mdev);
#endif
diff --git a/drivers/misc/mei/client.c b/drivers/misc/mei/client.c
index df268365e04e..c8e3995b8cb7 100644
--- a/drivers/misc/mei/client.c
+++ b/drivers/misc/mei/client.c
@@ -276,6 +276,7 @@ void mei_me_cl_rm_by_uuid(struct mei_device *dev, const uuid_le *uuid)
down_write(&dev->me_clients_rwsem);
me_cl = __mei_me_cl_by_uuid(dev, uuid);
__mei_me_cl_del(dev, me_cl);
+ mei_me_cl_put(me_cl);
up_write(&dev->me_clients_rwsem);
}
@@ -297,6 +298,7 @@ void mei_me_cl_rm_by_uuid_id(struct mei_device *dev, const uuid_le *uuid, u8 id)
down_write(&dev->me_clients_rwsem);
me_cl = __mei_me_cl_by_uuid_id(dev, uuid, id);
__mei_me_cl_del(dev, me_cl);
+ mei_me_cl_put(me_cl);
up_write(&dev->me_clients_rwsem);
}
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index 9404f38d9d0d..2cf5c581c7e0 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -3296,7 +3296,7 @@ void igb_configure_tx_ring(struct igb_adapter *adapter,
tdba & 0x00000000ffffffffULL);
wr32(E1000_TDBAH(reg_idx), tdba >> 32);
- ring->tail = hw->hw_addr + E1000_TDT(reg_idx);
+ ring->tail = adapter->io_addr + E1000_TDT(reg_idx);
wr32(E1000_TDH(reg_idx), 0);
writel(0, ring->tail);
@@ -3652,7 +3652,7 @@ void igb_configure_rx_ring(struct igb_adapter *adapter,
ring->count * sizeof(union e1000_adv_rx_desc));
/* initialize head and tail */
- ring->tail = hw->hw_addr + E1000_RDT(reg_idx);
+ ring->tail = adapter->io_addr + E1000_RDT(reg_idx);
wr32(E1000_RDH(reg_idx), 0);
writel(0, ring->tail);
diff --git a/drivers/nvdimm/btt.c b/drivers/nvdimm/btt.c
index 957234272ef7..727eaf203463 100644
--- a/drivers/nvdimm/btt.c
+++ b/drivers/nvdimm/btt.c
@@ -443,9 +443,9 @@ static int btt_log_init(struct arena_info *arena)
static int btt_freelist_init(struct arena_info *arena)
{
- int old, new, ret;
+ int new, ret;
u32 i, map_entry;
- struct log_entry log_new, log_old;
+ struct log_entry log_new;
arena->freelist = kcalloc(arena->nfree, sizeof(struct free_entry),
GFP_KERNEL);
@@ -453,10 +453,6 @@ static int btt_freelist_init(struct arena_info *arena)
return -ENOMEM;
for (i = 0; i < arena->nfree; i++) {
- old = btt_log_read(arena, i, &log_old, LOG_OLD_ENT);
- if (old < 0)
- return old;
-
new = btt_log_read(arena, i, &log_new, LOG_NEW_ENT);
if (new < 0)
return new;
diff --git a/drivers/platform/x86/alienware-wmi.c b/drivers/platform/x86/alienware-wmi.c
index f5585740a765..95121bff2d3e 100644
--- a/drivers/platform/x86/alienware-wmi.c
+++ b/drivers/platform/x86/alienware-wmi.c
@@ -449,23 +449,22 @@ static acpi_status alienware_hdmi_command(struct hdmi_args *in_args,
input.length = (acpi_size) sizeof(*in_args);
input.pointer = in_args;
- if (out_data != NULL) {
+ if (out_data) {
output.length = ACPI_ALLOCATE_BUFFER;
output.pointer = NULL;
status = wmi_evaluate_method(WMAX_CONTROL_GUID, 1,
command, &input, &output);
- } else
+ if (ACPI_SUCCESS(status)) {
+ obj = (union acpi_object *)output.pointer;
+ if (obj && obj->type == ACPI_TYPE_INTEGER)
+ *out_data = (u32)obj->integer.value;
+ }
+ kfree(output.pointer);
+ } else {
status = wmi_evaluate_method(WMAX_CONTROL_GUID, 1,
command, &input, NULL);
-
- if (ACPI_SUCCESS(status) && out_data != NULL) {
- obj = (union acpi_object *)output.pointer;
- if (obj && obj->type == ACPI_TYPE_INTEGER)
- *out_data = (u32) obj->integer.value;
}
- kfree(output.pointer);
return status;
-
}
static ssize_t show_hdmi_cable(struct device *dev,
diff --git a/drivers/platform/x86/asus-nb-wmi.c b/drivers/platform/x86/asus-nb-wmi.c
index cccf250cd1e3..ee64c9512a3a 100644
--- a/drivers/platform/x86/asus-nb-wmi.c
+++ b/drivers/platform/x86/asus-nb-wmi.c
@@ -551,9 +551,33 @@ static struct asus_wmi_driver asus_nb_wmi_driver = {
.detect_quirks = asus_nb_wmi_quirks,
};
+static const struct dmi_system_id asus_nb_wmi_blacklist[] __initconst = {
+ {
+ /*
+ * asus-nb-wm adds no functionality. The T100TA has a detachable
+ * USB kbd, so no hotkeys and it has no WMI rfkill; and loading
+ * asus-nb-wm causes the camera LED to turn and _stay_ on.
+ */
+ .matches = {
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "T100TA"),
+ },
+ },
+ {
+ /* The Asus T200TA has the same issue as the T100TA */
+ .matches = {
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "T200TA"),
+ },
+ },
+ {} /* Terminating entry */
+};
static int __init asus_nb_wmi_init(void)
{
+ if (dmi_check_system(asus_nb_wmi_blacklist))
+ return -ENODEV;
+
return asus_wmi_register_driver(&asus_nb_wmi_driver);
}
diff --git a/drivers/staging/iio/accel/sca3000_ring.c b/drivers/staging/iio/accel/sca3000_ring.c
index 20b878d35ea2..fc8b6f179ec6 100644
--- a/drivers/staging/iio/accel/sca3000_ring.c
+++ b/drivers/staging/iio/accel/sca3000_ring.c
@@ -56,7 +56,7 @@ static int sca3000_read_data(struct sca3000_state *st,
st->tx[0] = SCA3000_READ_REG(reg_address_high);
ret = spi_sync_transfer(st->us, xfer, ARRAY_SIZE(xfer));
if (ret) {
- dev_err(get_device(&st->us->dev), "problem reading register");
+ dev_err(&st->us->dev, "problem reading register");
goto error_free_rx;
}
diff --git a/drivers/staging/iio/resolver/ad2s1210.c b/drivers/staging/iio/resolver/ad2s1210.c
index 8eb7179da342..4a12a3ea3f25 100644
--- a/drivers/staging/iio/resolver/ad2s1210.c
+++ b/drivers/staging/iio/resolver/ad2s1210.c
@@ -125,17 +125,24 @@ static int ad2s1210_config_write(struct ad2s1210_state *st, u8 data)
static int ad2s1210_config_read(struct ad2s1210_state *st,
unsigned char address)
{
- struct spi_transfer xfer = {
- .len = 2,
- .rx_buf = st->rx,
- .tx_buf = st->tx,
+ struct spi_transfer xfers[] = {
+ {
+ .len = 1,
+ .rx_buf = &st->rx[0],
+ .tx_buf = &st->tx[0],
+ .cs_change = 1,
+ }, {
+ .len = 1,
+ .rx_buf = &st->rx[1],
+ .tx_buf = &st->tx[1],
+ },
};
int ret = 0;
ad2s1210_set_mode(MOD_CONFIG, st);
st->tx[0] = address | AD2S1210_MSB_IS_HIGH;
st->tx[1] = AD2S1210_REG_FAULT;
- ret = spi_sync_transfer(st->sdev, &xfer, 1);
+ ret = spi_sync_transfer(st->sdev, xfers, 2);
if (ret < 0)
return ret;
st->old_data = true;
diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c
index 747343c61398..f083ecfddd1b 100644
--- a/drivers/usb/core/message.c
+++ b/drivers/usb/core/message.c
@@ -1080,11 +1080,11 @@ void usb_disable_endpoint(struct usb_device *dev, unsigned int epaddr,
if (usb_endpoint_out(epaddr)) {
ep = dev->ep_out[epnum];
- if (reset_hardware)
+ if (reset_hardware && epnum != 0)
dev->ep_out[epnum] = NULL;
} else {
ep = dev->ep_in[epnum];
- if (reset_hardware)
+ if (reset_hardware && epnum != 0)
dev->ep_in[epnum] = NULL;
}
if (ep) {
diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c
index efdf81ea3b5f..3d0497421e62 100644
--- a/fs/ceph/caps.c
+++ b/fs/ceph/caps.c
@@ -3293,6 +3293,7 @@ retry:
WARN_ON(1);
tsession = NULL;
target = -1;
+ mutex_lock(&session->s_mutex);
}
goto retry;
diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c
index 53679716baca..18b9213ce0bd 100644
--- a/fs/ext4/xattr.c
+++ b/fs/ext4/xattr.c
@@ -139,31 +139,26 @@ static __le32 ext4_xattr_block_csum(struct inode *inode,
}
static int ext4_xattr_block_csum_verify(struct inode *inode,
- sector_t block_nr,
- struct ext4_xattr_header *hdr)
+ struct buffer_head *bh)
{
- if (ext4_has_metadata_csum(inode->i_sb) &&
- (hdr->h_checksum != ext4_xattr_block_csum(inode, block_nr, hdr)))
- return 0;
- return 1;
-}
-
-static void ext4_xattr_block_csum_set(struct inode *inode,
- sector_t block_nr,
- struct ext4_xattr_header *hdr)
-{
- if (!ext4_has_metadata_csum(inode->i_sb))
- return;
+ struct ext4_xattr_header *hdr = BHDR(bh);
+ int ret = 1;
- hdr->h_checksum = ext4_xattr_block_csum(inode, block_nr, hdr);
+ if (ext4_has_metadata_csum(inode->i_sb)) {
+ lock_buffer(bh);
+ ret = (hdr->h_checksum == ext4_xattr_block_csum(inode,
+ bh->b_blocknr, hdr));
+ unlock_buffer(bh);
+ }
+ return ret;
}
-static inline int ext4_handle_dirty_xattr_block(handle_t *handle,
- struct inode *inode,
- struct buffer_head *bh)
+static void ext4_xattr_block_csum_set(struct inode *inode,
+ struct buffer_head *bh)
{
- ext4_xattr_block_csum_set(inode, bh->b_blocknr, BHDR(bh));
- return ext4_handle_dirty_metadata(handle, inode, bh);
+ if (ext4_has_metadata_csum(inode->i_sb))
+ BHDR(bh)->h_checksum = ext4_xattr_block_csum(inode,
+ bh->b_blocknr, BHDR(bh));
}
static inline const struct xattr_handler *
@@ -226,7 +221,7 @@ ext4_xattr_check_block(struct inode *inode, struct buffer_head *bh)
if (buffer_verified(bh))
return 0;
- if (!ext4_xattr_block_csum_verify(inode, bh->b_blocknr, BHDR(bh)))
+ if (!ext4_xattr_block_csum_verify(inode, bh))
return -EFSBADCRC;
error = ext4_xattr_check_names(BFIRST(bh), bh->b_data + bh->b_size,
bh->b_data);
@@ -590,23 +585,23 @@ ext4_xattr_release_block(handle_t *handle, struct inode *inode,
le32_add_cpu(&BHDR(bh)->h_refcount, -1);
if (ce)
mb_cache_entry_release(ce);
+
+ ext4_xattr_block_csum_set(inode, bh);
/*
* Beware of this ugliness: Releasing of xattr block references
* from different inodes can race and so we have to protect
* from a race where someone else frees the block (and releases
* its journal_head) before we are done dirtying the buffer. In
* nojournal mode this race is harmless and we actually cannot
- * call ext4_handle_dirty_xattr_block() with locked buffer as
+ * call ext4_handle_dirty_metadata() with locked buffer as
* that function can call sync_dirty_buffer() so for that case
* we handle the dirtying after unlocking the buffer.
*/
if (ext4_handle_valid(handle))
- error = ext4_handle_dirty_xattr_block(handle, inode,
- bh);
+ error = ext4_handle_dirty_metadata(handle, inode, bh);
unlock_buffer(bh);
if (!ext4_handle_valid(handle))
- error = ext4_handle_dirty_xattr_block(handle, inode,
- bh);
+ error = ext4_handle_dirty_metadata(handle, inode, bh);
if (IS_SYNC(inode))
ext4_handle_sync(handle);
dquot_free_block(inode, EXT4_C2B(EXT4_SB(inode->i_sb), 1));
@@ -837,13 +832,14 @@ ext4_xattr_block_set(handle_t *handle, struct inode *inode,
ext4_xattr_rehash(header(s->base),
s->here);
}
+ ext4_xattr_block_csum_set(inode, bs->bh);
unlock_buffer(bs->bh);
if (error == -EFSCORRUPTED)
goto bad_block;
if (!error)
- error = ext4_handle_dirty_xattr_block(handle,
- inode,
- bs->bh);
+ error = ext4_handle_dirty_metadata(handle,
+ inode,
+ bs->bh);
if (error)
goto cleanup;
goto inserted;
@@ -912,10 +908,11 @@ inserted:
le32_add_cpu(&BHDR(new_bh)->h_refcount, 1);
ea_bdebug(new_bh, "reusing; refcount now=%d",
le32_to_cpu(BHDR(new_bh)->h_refcount));
+ ext4_xattr_block_csum_set(inode, new_bh);
unlock_buffer(new_bh);
- error = ext4_handle_dirty_xattr_block(handle,
- inode,
- new_bh);
+ error = ext4_handle_dirty_metadata(handle,
+ inode,
+ new_bh);
if (error)
goto cleanup_dquot;
}
@@ -965,11 +962,12 @@ getblk_failed:
goto getblk_failed;
}
memcpy(new_bh->b_data, s->base, new_bh->b_size);
+ ext4_xattr_block_csum_set(inode, new_bh);
set_buffer_uptodate(new_bh);
unlock_buffer(new_bh);
ext4_xattr_cache_insert(ext4_mb_cache, new_bh);
- error = ext4_handle_dirty_xattr_block(handle,
- inode, new_bh);
+ error = ext4_handle_dirty_metadata(handle, inode,
+ new_bh);
if (error)
goto cleanup;
}
diff --git a/fs/file.c b/fs/file.c
index 7e9eb65a2912..090015401c55 100644
--- a/fs/file.c
+++ b/fs/file.c
@@ -88,7 +88,7 @@ static void copy_fd_bitmaps(struct fdtable *nfdt, struct fdtable *ofdt,
*/
static void copy_fdtable(struct fdtable *nfdt, struct fdtable *ofdt)
{
- unsigned int cpy, set;
+ size_t cpy, set;
BUG_ON(nfdt->max_fds < ofdt->max_fds);
diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c
index f80ffccb0316..1eb737c466dd 100644
--- a/fs/gfs2/glock.c
+++ b/fs/gfs2/glock.c
@@ -541,9 +541,6 @@ __acquires(&gl->gl_lockref.lock)
goto out_unlock;
if (nonblock)
goto out_sched;
- smp_mb();
- if (atomic_read(&gl->gl_revokes) != 0)
- goto out_sched;
set_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags);
GLOCK_BUG_ON(gl, gl->gl_demote_state == LM_ST_EXCLUSIVE);
gl->gl_target = gl->gl_demote_state;
diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h
index bb3a4bb35183..f0a3fc723ae4 100644
--- a/include/linux/cpumask.h
+++ b/include/linux/cpumask.h
@@ -160,6 +160,8 @@ static inline unsigned int cpumask_local_spread(unsigned int i, int node)
for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)mask)
#define for_each_cpu_not(cpu, mask) \
for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)mask)
+#define for_each_cpu_wrap(cpu, mask, start) \
+ for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)mask, (void)(start))
#define for_each_cpu_and(cpu, mask, and) \
for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)mask, (void)and)
#else
@@ -232,6 +234,23 @@ unsigned int cpumask_local_spread(unsigned int i, int node);
(cpu) = cpumask_next_zero((cpu), (mask)), \
(cpu) < nr_cpu_ids;)
+extern int cpumask_next_wrap(int n, const struct cpumask *mask, int start, bool wrap);
+
+/**
+ * for_each_cpu_wrap - iterate over every cpu in a mask, starting at a specified location
+ * @cpu: the (optionally unsigned) integer iterator
+ * @mask: the cpumask poiter
+ * @start: the start location
+ *
+ * The implementation does not assume any bit in @mask is set (including @start).
+ *
+ * After the loop, cpu is >= nr_cpu_ids.
+ */
+#define for_each_cpu_wrap(cpu, mask, start) \
+ for ((cpu) = cpumask_next_wrap((start)-1, (mask), (start), false); \
+ (cpu) < nr_cpumask_bits; \
+ (cpu) = cpumask_next_wrap((cpu), (mask), (start), true))
+
/**
* for_each_cpu_and - iterate over every cpu in both masks
* @cpu: the (optionally unsigned) integer iterator
diff --git a/include/linux/net.h b/include/linux/net.h
index c00b8d182226..6de18ead3dfe 100644
--- a/include/linux/net.h
+++ b/include/linux/net.h
@@ -291,6 +291,9 @@ int kernel_sendpage(struct socket *sock, struct page *page, int offset,
int kernel_sock_ioctl(struct socket *sock, int cmd, unsigned long arg);
int kernel_sock_shutdown(struct socket *sock, enum sock_shutdown_cmd how);
+/* Routine returns the IP overhead imposed by a (caller-protected) socket. */
+u32 kernel_sock_ip_overhead(struct sock *sk);
+
#define MODULE_ALIAS_NETPROTO(proto) \
MODULE_ALIAS("net-pf-" __stringify(proto))
diff --git a/include/linux/padata.h b/include/linux/padata.h
index 438694650471..547a8d1e4a3b 100644
--- a/include/linux/padata.h
+++ b/include/linux/padata.h
@@ -24,7 +24,6 @@
#include <linux/workqueue.h>
#include <linux/spinlock.h>
#include <linux/list.h>
-#include <linux/timer.h>
#include <linux/notifier.h>
#include <linux/kobject.h>
@@ -37,6 +36,7 @@
* @list: List entry, to attach to the padata lists.
* @pd: Pointer to the internal control structure.
* @cb_cpu: Callback cpu for serializatioon.
+ * @cpu: Cpu for parallelization.
* @seq_nr: Sequence number of the parallelized data object.
* @info: Used to pass information from the parallel to the serial function.
* @parallel: Parallel execution function.
@@ -46,6 +46,7 @@ struct padata_priv {
struct list_head list;
struct parallel_data *pd;
int cb_cpu;
+ int cpu;
int info;
void (*parallel)(struct padata_priv *padata);
void (*serial)(struct padata_priv *padata);
@@ -83,7 +84,6 @@ struct padata_serial_queue {
* @serial: List to wait for serialization after reordering.
* @pwork: work struct for parallelization.
* @swork: work struct for serialization.
- * @pd: Backpointer to the internal control structure.
* @work: work struct for parallelization.
* @num_obj: Number of objects that are processed by this cpu.
* @cpu_index: Index of the cpu.
@@ -91,7 +91,6 @@ struct padata_serial_queue {
struct padata_parallel_queue {
struct padata_list parallel;
struct padata_list reorder;
- struct parallel_data *pd;
struct work_struct work;
atomic_t num_obj;
int cpu_index;
@@ -118,10 +117,10 @@ struct padata_cpumask {
* @reorder_objects: Number of objects waiting in the reorder queues.
* @refcnt: Number of objects holding a reference on this parallel_data.
* @max_seq_nr: Maximal used sequence number.
+ * @cpu: Next CPU to be processed.
* @cpumask: The cpumasks in use for parallel and serial workers.
+ * @reorder_work: work struct for reordering.
* @lock: Reorder lock.
- * @processed: Number of already processed objects.
- * @timer: Reorder timer.
*/
struct parallel_data {
struct padata_instance *pinst;
@@ -130,10 +129,10 @@ struct parallel_data {
atomic_t reorder_objects;
atomic_t refcnt;
atomic_t seq_nr;
+ int cpu;
struct padata_cpumask cpumask;
+ struct work_struct reorder_work;
spinlock_t lock ____cacheline_aligned;
- unsigned int processed;
- struct timer_list timer;
};
/**
diff --git a/include/media/media-device.h b/include/media/media-device.h
index 6e6db78f1ee2..00bbd679864a 100644
--- a/include/media/media-device.h
+++ b/include/media/media-device.h
@@ -60,7 +60,7 @@ struct device;
struct media_device {
/* dev->driver_data points to this struct. */
struct device *dev;
- struct media_devnode devnode;
+ struct media_devnode *devnode;
char model[32];
char serial[40];
@@ -84,9 +84,6 @@ struct media_device {
#define MEDIA_DEV_NOTIFY_PRE_LINK_CH 0
#define MEDIA_DEV_NOTIFY_POST_LINK_CH 1
-/* media_devnode to media_device */
-#define to_media_device(node) container_of(node, struct media_device, devnode)
-
int __must_check __media_device_register(struct media_device *mdev,
struct module *owner);
#define media_device_register(mdev) __media_device_register(mdev, THIS_MODULE)
diff --git a/include/media/media-devnode.h b/include/media/media-devnode.h
index 17ddae32060d..d5ff95bf2d4b 100644
--- a/include/media/media-devnode.h
+++ b/include/media/media-devnode.h
@@ -33,6 +33,8 @@
#include <linux/device.h>
#include <linux/cdev.h>
+struct media_device;
+
/*
* Flag to mark the media_devnode struct as registered. Drivers must not touch
* this flag directly, it will be set and cleared by media_devnode_register and
@@ -67,6 +69,8 @@ struct media_file_operations {
* before registering the node.
*/
struct media_devnode {
+ struct media_device *media_dev;
+
/* device ops */
const struct media_file_operations *fops;
@@ -80,24 +84,42 @@ struct media_devnode {
unsigned long flags; /* Use bitops to access flags */
/* callbacks */
- void (*release)(struct media_devnode *mdev);
+ void (*release)(struct media_devnode *devnode);
};
/* dev to media_devnode */
#define to_media_devnode(cd) container_of(cd, struct media_devnode, dev)
-int __must_check media_devnode_register(struct media_devnode *mdev,
+int __must_check media_devnode_register(struct media_device *mdev,
+ struct media_devnode *devnode,
struct module *owner);
-void media_devnode_unregister(struct media_devnode *mdev);
+
+/**
+ * media_devnode_unregister_prepare - clear the media device node register bit
+ * @devnode: the device node to prepare for unregister
+ *
+ * This clears the passed device register bit. Future open calls will be met
+ * with errors. Should be called before media_devnode_unregister() to avoid
+ * races with unregister and device file open calls.
+ *
+ * This function can safely be called if the device node has never been
+ * registered or has already been unregistered.
+ */
+void media_devnode_unregister_prepare(struct media_devnode *devnode);
+
+void media_devnode_unregister(struct media_devnode *devnode);
static inline struct media_devnode *media_devnode_data(struct file *filp)
{
return filp->private_data;
}
-static inline int media_devnode_is_registered(struct media_devnode *mdev)
+static inline int media_devnode_is_registered(struct media_devnode *devnode)
{
- return test_bit(MEDIA_FLAG_REGISTERED, &mdev->flags);
+ if (!devnode)
+ return false;
+
+ return test_bit(MEDIA_FLAG_REGISTERED, &devnode->flags);
}
#endif /* _MEDIA_DEVNODE_H */
diff --git a/include/net/ipv6.h b/include/net/ipv6.h
index 6258264a0bf7..94880f07bc06 100644
--- a/include/net/ipv6.h
+++ b/include/net/ipv6.h
@@ -915,6 +915,8 @@ int compat_ipv6_setsockopt(struct sock *sk, int level, int optname,
int compat_ipv6_getsockopt(struct sock *sk, int level, int optname,
char __user *optval, int __user *optlen);
+int __ip6_datagram_connect(struct sock *sk, struct sockaddr *addr,
+ int addr_len);
int ip6_datagram_connect(struct sock *sk, struct sockaddr *addr, int addr_len);
int ip6_datagram_connect_v6_only(struct sock *sk, struct sockaddr *addr,
int addr_len);
diff --git a/include/uapi/linux/if_pppol2tp.h b/include/uapi/linux/if_pppol2tp.h
index 163e8adac2d6..de246e9f4974 100644
--- a/include/uapi/linux/if_pppol2tp.h
+++ b/include/uapi/linux/if_pppol2tp.h
@@ -17,6 +17,7 @@
#include <linux/types.h>
+#include <linux/l2tp.h>
/* Structure used to connect() the socket to a particular tunnel UDP
* socket over IPv4.
@@ -89,14 +90,12 @@ enum {
PPPOL2TP_SO_REORDERTO = 5,
};
-/* Debug message categories for the DEBUG socket option */
+/* Debug message categories for the DEBUG socket option (deprecated) */
enum {
- PPPOL2TP_MSG_DEBUG = (1 << 0), /* verbose debug (if
- * compiled in) */
- PPPOL2TP_MSG_CONTROL = (1 << 1), /* userspace - kernel
- * interface */
- PPPOL2TP_MSG_SEQ = (1 << 2), /* sequence numbers */
- PPPOL2TP_MSG_DATA = (1 << 3), /* data packets */
+ PPPOL2TP_MSG_DEBUG = L2TP_MSG_DEBUG,
+ PPPOL2TP_MSG_CONTROL = L2TP_MSG_CONTROL,
+ PPPOL2TP_MSG_SEQ = L2TP_MSG_SEQ,
+ PPPOL2TP_MSG_DATA = L2TP_MSG_DATA,
};
diff --git a/include/uapi/linux/l2tp.h b/include/uapi/linux/l2tp.h
index 347ef22a964e..dedfb2b1832a 100644
--- a/include/uapi/linux/l2tp.h
+++ b/include/uapi/linux/l2tp.h
@@ -108,7 +108,7 @@ enum {
L2TP_ATTR_VLAN_ID, /* u16 */
L2TP_ATTR_COOKIE, /* 0, 4 or 8 bytes */
L2TP_ATTR_PEER_COOKIE, /* 0, 4 or 8 bytes */
- L2TP_ATTR_DEBUG, /* u32 */
+ L2TP_ATTR_DEBUG, /* u32, enum l2tp_debug_flags */
L2TP_ATTR_RECV_SEQ, /* u8 */
L2TP_ATTR_SEND_SEQ, /* u8 */
L2TP_ATTR_LNS_MODE, /* u8 */
@@ -173,6 +173,21 @@ enum l2tp_seqmode {
L2TP_SEQ_ALL = 2,
};
+/**
+ * enum l2tp_debug_flags - debug message categories for L2TP tunnels/sessions
+ *
+ * @L2TP_MSG_DEBUG: verbose debug (if compiled in)
+ * @L2TP_MSG_CONTROL: userspace - kernel interface
+ * @L2TP_MSG_SEQ: sequence numbers
+ * @L2TP_MSG_DATA: data packets
+ */
+enum l2tp_debug_flags {
+ L2TP_MSG_DEBUG = (1 << 0),
+ L2TP_MSG_CONTROL = (1 << 1),
+ L2TP_MSG_SEQ = (1 << 2),
+ L2TP_MSG_DATA = (1 << 3),
+};
+
/*
* NETLINK_GENERIC related info
*/
diff --git a/kernel/padata.c b/kernel/padata.c
index ae036af3f012..c50975f43b34 100644
--- a/kernel/padata.c
+++ b/kernel/padata.c
@@ -65,15 +65,11 @@ static int padata_cpu_hash(struct parallel_data *pd)
static void padata_parallel_worker(struct work_struct *parallel_work)
{
struct padata_parallel_queue *pqueue;
- struct parallel_data *pd;
- struct padata_instance *pinst;
LIST_HEAD(local_list);
local_bh_disable();
pqueue = container_of(parallel_work,
struct padata_parallel_queue, work);
- pd = pqueue->pd;
- pinst = pd->pinst;
spin_lock(&pqueue->parallel.lock);
list_replace_init(&pqueue->parallel.list, &local_list);
@@ -136,6 +132,7 @@ int padata_do_parallel(struct padata_instance *pinst,
padata->cb_cpu = cb_cpu;
target_cpu = padata_cpu_hash(pd);
+ padata->cpu = target_cpu;
queue = per_cpu_ptr(pd->pqueue, target_cpu);
spin_lock(&queue->parallel.lock);
@@ -159,8 +156,6 @@ EXPORT_SYMBOL(padata_do_parallel);
* A pointer to the control struct of the next object that needs
* serialization, if present in one of the percpu reorder queues.
*
- * NULL, if all percpu reorder queues are empty.
- *
* -EINPROGRESS, if the next object that needs serialization will
* be parallel processed by another cpu and is not yet present in
* the cpu's reorder queue.
@@ -170,25 +165,12 @@ EXPORT_SYMBOL(padata_do_parallel);
*/
static struct padata_priv *padata_get_next(struct parallel_data *pd)
{
- int cpu, num_cpus;
- unsigned int next_nr, next_index;
struct padata_parallel_queue *next_queue;
struct padata_priv *padata;
struct padata_list *reorder;
+ int cpu = pd->cpu;
- num_cpus = cpumask_weight(pd->cpumask.pcpu);
-
- /*
- * Calculate the percpu reorder queue and the sequence
- * number of the next object.
- */
- next_nr = pd->processed;
- next_index = next_nr % num_cpus;
- cpu = padata_index_to_cpu(pd, next_index);
next_queue = per_cpu_ptr(pd->pqueue, cpu);
-
- padata = NULL;
-
reorder = &next_queue->reorder;
spin_lock(&reorder->lock);
@@ -199,7 +181,8 @@ static struct padata_priv *padata_get_next(struct parallel_data *pd)
list_del_init(&padata->list);
atomic_dec(&pd->reorder_objects);
- pd->processed++;
+ pd->cpu = cpumask_next_wrap(cpu, pd->cpumask.pcpu, -1,
+ false);
spin_unlock(&reorder->lock);
goto out;
@@ -222,6 +205,7 @@ static void padata_reorder(struct parallel_data *pd)
struct padata_priv *padata;
struct padata_serial_queue *squeue;
struct padata_instance *pinst = pd->pinst;
+ struct padata_parallel_queue *next_queue;
/*
* We need to ensure that only one cpu can work on dequeueing of
@@ -240,12 +224,11 @@ static void padata_reorder(struct parallel_data *pd)
padata = padata_get_next(pd);
/*
- * All reorder queues are empty, or the next object that needs
- * serialization is parallel processed by another cpu and is
- * still on it's way to the cpu's reorder queue, nothing to
- * do for now.
+ * If the next object that needs serialization is parallel
+ * processed by another cpu and is still on it's way to the
+ * cpu's reorder queue, nothing to do for now.
*/
- if (!padata || PTR_ERR(padata) == -EINPROGRESS)
+ if (PTR_ERR(padata) == -EINPROGRESS)
break;
/*
@@ -254,7 +237,6 @@ static void padata_reorder(struct parallel_data *pd)
* so exit immediately.
*/
if (PTR_ERR(padata) == -ENODATA) {
- del_timer(&pd->timer);
spin_unlock_bh(&pd->lock);
return;
}
@@ -273,28 +255,27 @@ static void padata_reorder(struct parallel_data *pd)
/*
* The next object that needs serialization might have arrived to
- * the reorder queues in the meantime, we will be called again
- * from the timer function if no one else cares for it.
+ * the reorder queues in the meantime.
*
- * Ensure reorder_objects is read after pd->lock is dropped so we see
- * an increment from another task in padata_do_serial. Pairs with
+ * Ensure reorder queue is read after pd->lock is dropped so we see
+ * new objects from another task in padata_do_serial. Pairs with
* smp_mb__after_atomic in padata_do_serial.
*/
smp_mb();
- if (atomic_read(&pd->reorder_objects)
- && !(pinst->flags & PADATA_RESET))
- mod_timer(&pd->timer, jiffies + HZ);
- else
- del_timer(&pd->timer);
- return;
+ next_queue = per_cpu_ptr(pd->pqueue, pd->cpu);
+ if (!list_empty(&next_queue->reorder.list))
+ queue_work(pinst->wq, &pd->reorder_work);
}
-static void padata_reorder_timer(unsigned long arg)
+static void invoke_padata_reorder(struct work_struct *work)
{
- struct parallel_data *pd = (struct parallel_data *)arg;
+ struct parallel_data *pd;
+ local_bh_disable();
+ pd = container_of(work, struct parallel_data, reorder_work);
padata_reorder(pd);
+ local_bh_enable();
}
static void padata_serial_worker(struct work_struct *serial_work)
@@ -341,29 +322,22 @@ static void padata_serial_worker(struct work_struct *serial_work)
*/
void padata_do_serial(struct padata_priv *padata)
{
- int cpu;
- struct padata_parallel_queue *pqueue;
- struct parallel_data *pd;
-
- pd = padata->pd;
-
- cpu = get_cpu();
- pqueue = per_cpu_ptr(pd->pqueue, cpu);
+ struct parallel_data *pd = padata->pd;
+ struct padata_parallel_queue *pqueue = per_cpu_ptr(pd->pqueue,
+ padata->cpu);
spin_lock(&pqueue->reorder.lock);
- atomic_inc(&pd->reorder_objects);
list_add_tail(&padata->list, &pqueue->reorder.list);
+ atomic_inc(&pd->reorder_objects);
spin_unlock(&pqueue->reorder.lock);
/*
- * Ensure the atomic_inc of reorder_objects above is ordered correctly
+ * Ensure the addition to the reorder list is ordered correctly
* with the trylock of pd->lock in padata_reorder. Pairs with smp_mb
* in padata_reorder.
*/
smp_mb__after_atomic();
- put_cpu();
-
padata_reorder(pd);
}
EXPORT_SYMBOL(padata_do_serial);
@@ -412,9 +386,14 @@ static void padata_init_pqueues(struct parallel_data *pd)
struct padata_parallel_queue *pqueue;
cpu_index = 0;
- for_each_cpu(cpu, pd->cpumask.pcpu) {
+ for_each_possible_cpu(cpu) {
pqueue = per_cpu_ptr(pd->pqueue, cpu);
- pqueue->pd = pd;
+
+ if (!cpumask_test_cpu(cpu, pd->cpumask.pcpu)) {
+ pqueue->cpu_index = -1;
+ continue;
+ }
+
pqueue->cpu_index = cpu_index;
cpu_index++;
@@ -448,12 +427,13 @@ static struct parallel_data *padata_alloc_pd(struct padata_instance *pinst,
padata_init_pqueues(pd);
padata_init_squeues(pd);
- setup_timer(&pd->timer, padata_reorder_timer, (unsigned long)pd);
atomic_set(&pd->seq_nr, -1);
atomic_set(&pd->reorder_objects, 0);
atomic_set(&pd->refcnt, 1);
pd->pinst = pinst;
spin_lock_init(&pd->lock);
+ pd->cpu = cpumask_first(pd->cpumask.pcpu);
+ INIT_WORK(&pd->reorder_work, invoke_padata_reorder);
return pd;
diff --git a/lib/cpumask.c b/lib/cpumask.c
index 5a70f6196f57..24f06e7abf92 100644
--- a/lib/cpumask.c
+++ b/lib/cpumask.c
@@ -42,6 +42,38 @@ int cpumask_any_but(const struct cpumask *mask, unsigned int cpu)
return i;
}
+/**
+ * cpumask_next_wrap - helper to implement for_each_cpu_wrap
+ * @n: the cpu prior to the place to search
+ * @mask: the cpumask pointer
+ * @start: the start point of the iteration
+ * @wrap: assume @n crossing @start terminates the iteration
+ *
+ * Returns >= nr_cpu_ids on completion
+ *
+ * Note: the @wrap argument is required for the start condition when
+ * we cannot assume @start is set in @mask.
+ */
+int cpumask_next_wrap(int n, const struct cpumask *mask, int start, bool wrap)
+{
+ int next;
+
+again:
+ next = cpumask_next(n, mask);
+
+ if (wrap && n < start && next >= start) {
+ return nr_cpumask_bits;
+
+ } else if (next >= nr_cpumask_bits) {
+ wrap = true;
+ n = -1;
+ goto again;
+ }
+
+ return next;
+}
+EXPORT_SYMBOL(cpumask_next_wrap);
+
/* These are not inline because of header tangles. */
#ifdef CONFIG_CPUMASK_OFFSTACK
/**
diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c
index f33154365b64..389b6367a810 100644
--- a/net/ipv6/datagram.c
+++ b/net/ipv6/datagram.c
@@ -40,7 +40,8 @@ static bool ipv6_mapped_addr_any(const struct in6_addr *a)
return ipv6_addr_v4mapped(a) && (a->s6_addr32[3] == 0);
}
-static int __ip6_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
+int __ip6_datagram_connect(struct sock *sk, struct sockaddr *uaddr,
+ int addr_len)
{
struct sockaddr_in6 *usin = (struct sockaddr_in6 *) uaddr;
struct inet_sock *inet = inet_sk(sk);
@@ -213,6 +214,7 @@ out:
fl6_sock_release(flowlabel);
return err;
}
+EXPORT_SYMBOL_GPL(__ip6_datagram_connect);
int ip6_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
{
diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c
index 8cbccddc0b1e..0233c496fc51 100644
--- a/net/l2tp/l2tp_core.c
+++ b/net/l2tp/l2tp_core.c
@@ -112,53 +112,19 @@ struct l2tp_net {
spinlock_t l2tp_session_hlist_lock;
};
-static void l2tp_tunnel_free(struct l2tp_tunnel *tunnel);
static inline struct l2tp_tunnel *l2tp_tunnel(struct sock *sk)
{
return sk->sk_user_data;
}
-static inline struct l2tp_net *l2tp_pernet(struct net *net)
+static inline struct l2tp_net *l2tp_pernet(const struct net *net)
{
BUG_ON(!net);
return net_generic(net, l2tp_net_id);
}
-/* Tunnel reference counts. Incremented per session that is added to
- * the tunnel.
- */
-static inline void l2tp_tunnel_inc_refcount_1(struct l2tp_tunnel *tunnel)
-{
- atomic_inc(&tunnel->ref_count);
-}
-
-static inline void l2tp_tunnel_dec_refcount_1(struct l2tp_tunnel *tunnel)
-{
- if (atomic_dec_and_test(&tunnel->ref_count))
- l2tp_tunnel_free(tunnel);
-}
-#ifdef L2TP_REFCNT_DEBUG
-#define l2tp_tunnel_inc_refcount(_t) \
-do { \
- pr_debug("l2tp_tunnel_inc_refcount: %s:%d %s: cnt=%d\n", \
- __func__, __LINE__, (_t)->name, \
- atomic_read(&_t->ref_count)); \
- l2tp_tunnel_inc_refcount_1(_t); \
-} while (0)
-#define l2tp_tunnel_dec_refcount(_t) \
-do { \
- pr_debug("l2tp_tunnel_dec_refcount: %s:%d %s: cnt=%d\n", \
- __func__, __LINE__, (_t)->name, \
- atomic_read(&_t->ref_count)); \
- l2tp_tunnel_dec_refcount_1(_t); \
-} while (0)
-#else
-#define l2tp_tunnel_inc_refcount(t) l2tp_tunnel_inc_refcount_1(t)
-#define l2tp_tunnel_dec_refcount(t) l2tp_tunnel_dec_refcount_1(t)
-#endif
-
/* Session hash global list for L2TPv3.
* The session_id SHOULD be random according to RFC3931, but several
* L2TP implementations use incrementing session_ids. So we do a real
@@ -216,27 +182,6 @@ static void l2tp_tunnel_sock_put(struct sock *sk)
sock_put(sk);
}
-/* Lookup a session by id in the global session list
- */
-static struct l2tp_session *l2tp_session_find_2(struct net *net, u32 session_id)
-{
- struct l2tp_net *pn = l2tp_pernet(net);
- struct hlist_head *session_list =
- l2tp_session_id_hash_2(pn, session_id);
- struct l2tp_session *session;
-
- rcu_read_lock_bh();
- hlist_for_each_entry_rcu(session, session_list, global_hlist) {
- if (session->session_id == session_id) {
- rcu_read_unlock_bh();
- return session;
- }
- }
- rcu_read_unlock_bh();
-
- return NULL;
-}
-
/* Session hash list.
* The session_id SHOULD be random according to RFC2661, but several
* L2TP implementations (Cisco and Microsoft) use incrementing
@@ -249,38 +194,31 @@ l2tp_session_id_hash(struct l2tp_tunnel *tunnel, u32 session_id)
return &tunnel->session_hlist[hash_32(session_id, L2TP_HASH_BITS)];
}
-/* Lookup a session by id
- */
-struct l2tp_session *l2tp_session_find(struct net *net, struct l2tp_tunnel *tunnel, u32 session_id)
+/* Lookup a tunnel. A new reference is held on the returned tunnel. */
+struct l2tp_tunnel *l2tp_tunnel_get(const struct net *net, u32 tunnel_id)
{
- struct hlist_head *session_list;
- struct l2tp_session *session;
+ const struct l2tp_net *pn = l2tp_pernet(net);
+ struct l2tp_tunnel *tunnel;
- /* In L2TPv3, session_ids are unique over all tunnels and we
- * sometimes need to look them up before we know the
- * tunnel.
- */
- if (tunnel == NULL)
- return l2tp_session_find_2(net, session_id);
+ rcu_read_lock_bh();
+ list_for_each_entry_rcu(tunnel, &pn->l2tp_tunnel_list, list) {
+ if (tunnel->tunnel_id == tunnel_id) {
+ l2tp_tunnel_inc_refcount(tunnel);
+ rcu_read_unlock_bh();
- session_list = l2tp_session_id_hash(tunnel, session_id);
- read_lock_bh(&tunnel->hlist_lock);
- hlist_for_each_entry(session, session_list, hlist) {
- if (session->session_id == session_id) {
- read_unlock_bh(&tunnel->hlist_lock);
- return session;
+ return tunnel;
}
}
- read_unlock_bh(&tunnel->hlist_lock);
+ rcu_read_unlock_bh();
return NULL;
}
-EXPORT_SYMBOL_GPL(l2tp_session_find);
+EXPORT_SYMBOL_GPL(l2tp_tunnel_get);
-/* Like l2tp_session_find() but takes a reference on the returned session.
+/* Lookup a session. A new reference is held on the returned session.
* Optionally calls session->ref() too if do_ref is true.
*/
-struct l2tp_session *l2tp_session_get(struct net *net,
+struct l2tp_session *l2tp_session_get(const struct net *net,
struct l2tp_tunnel *tunnel,
u32 session_id, bool do_ref)
{
@@ -355,7 +293,9 @@ EXPORT_SYMBOL_GPL(l2tp_session_get_nth);
/* Lookup a session by interface name.
* This is very inefficient but is only used by management interfaces.
*/
-struct l2tp_session *l2tp_session_find_by_ifname(struct net *net, char *ifname)
+struct l2tp_session *l2tp_session_get_by_ifname(const struct net *net,
+ const char *ifname,
+ bool do_ref)
{
struct l2tp_net *pn = l2tp_pernet(net);
int hash;
@@ -365,7 +305,11 @@ struct l2tp_session *l2tp_session_find_by_ifname(struct net *net, char *ifname)
for (hash = 0; hash < L2TP_HASH_SIZE_2; hash++) {
hlist_for_each_entry_rcu(session, &pn->l2tp_session_hlist[hash], global_hlist) {
if (!strcmp(session->ifname, ifname)) {
+ l2tp_session_inc_refcount(session);
+ if (do_ref && session->ref)
+ session->ref(session);
rcu_read_unlock_bh();
+
return session;
}
}
@@ -375,22 +319,30 @@ struct l2tp_session *l2tp_session_find_by_ifname(struct net *net, char *ifname)
return NULL;
}
-EXPORT_SYMBOL_GPL(l2tp_session_find_by_ifname);
+EXPORT_SYMBOL_GPL(l2tp_session_get_by_ifname);
-static int l2tp_session_add_to_tunnel(struct l2tp_tunnel *tunnel,
- struct l2tp_session *session)
+int l2tp_session_register(struct l2tp_session *session,
+ struct l2tp_tunnel *tunnel)
{
struct l2tp_session *session_walk;
struct hlist_head *g_head;
struct hlist_head *head;
struct l2tp_net *pn;
+ int err;
head = l2tp_session_id_hash(tunnel, session->session_id);
write_lock_bh(&tunnel->hlist_lock);
+ if (!tunnel->acpt_newsess) {
+ err = -ENODEV;
+ goto err_tlock;
+ }
+
hlist_for_each_entry(session_walk, head, hlist)
- if (session_walk->session_id == session->session_id)
- goto exist;
+ if (session_walk->session_id == session->session_id) {
+ err = -EEXIST;
+ goto err_tlock;
+ }
if (tunnel->version == L2TP_HDR_VER_3) {
pn = l2tp_pernet(tunnel->l2tp_net);
@@ -398,30 +350,44 @@ static int l2tp_session_add_to_tunnel(struct l2tp_tunnel *tunnel,
session->session_id);
spin_lock_bh(&pn->l2tp_session_hlist_lock);
+
hlist_for_each_entry(session_walk, g_head, global_hlist)
- if (session_walk->session_id == session->session_id)
- goto exist_glob;
+ if (session_walk->session_id == session->session_id) {
+ err = -EEXIST;
+ goto err_tlock_pnlock;
+ }
+ l2tp_tunnel_inc_refcount(tunnel);
+ sock_hold(tunnel->sock);
hlist_add_head_rcu(&session->global_hlist, g_head);
+
spin_unlock_bh(&pn->l2tp_session_hlist_lock);
+ } else {
+ l2tp_tunnel_inc_refcount(tunnel);
+ sock_hold(tunnel->sock);
}
hlist_add_head(&session->hlist, head);
write_unlock_bh(&tunnel->hlist_lock);
+ /* Ignore management session in session count value */
+ if (session->session_id != 0)
+ atomic_inc(&l2tp_session_count);
+
return 0;
-exist_glob:
+err_tlock_pnlock:
spin_unlock_bh(&pn->l2tp_session_hlist_lock);
-exist:
+err_tlock:
write_unlock_bh(&tunnel->hlist_lock);
- return -EEXIST;
+ return err;
}
+EXPORT_SYMBOL_GPL(l2tp_session_register);
/* Lookup a tunnel by id
*/
-struct l2tp_tunnel *l2tp_tunnel_find(struct net *net, u32 tunnel_id)
+struct l2tp_tunnel *l2tp_tunnel_find(const struct net *net, u32 tunnel_id)
{
struct l2tp_tunnel *tunnel;
struct l2tp_net *pn = l2tp_pernet(net);
@@ -439,7 +405,7 @@ struct l2tp_tunnel *l2tp_tunnel_find(struct net *net, u32 tunnel_id)
}
EXPORT_SYMBOL_GPL(l2tp_tunnel_find);
-struct l2tp_tunnel *l2tp_tunnel_find_nth(struct net *net, int nth)
+struct l2tp_tunnel *l2tp_tunnel_find_nth(const struct net *net, int nth)
{
struct l2tp_net *pn = l2tp_pernet(net);
struct l2tp_tunnel *tunnel;
@@ -1307,7 +1273,6 @@ static void l2tp_tunnel_destruct(struct sock *sk)
/* Remove hooks into tunnel socket */
sk->sk_destruct = tunnel->old_sk_destruct;
sk->sk_user_data = NULL;
- tunnel->sock = NULL;
/* Remove the tunnel struct from the tunnel list */
pn = l2tp_pernet(tunnel->l2tp_net);
@@ -1317,6 +1282,8 @@ static void l2tp_tunnel_destruct(struct sock *sk)
atomic_dec(&l2tp_tunnel_count);
l2tp_tunnel_closeall(tunnel);
+
+ tunnel->sock = NULL;
l2tp_tunnel_dec_refcount(tunnel);
/* Call the original destructor */
@@ -1341,6 +1308,7 @@ void l2tp_tunnel_closeall(struct l2tp_tunnel *tunnel)
tunnel->name);
write_lock_bh(&tunnel->hlist_lock);
+ tunnel->acpt_newsess = false;
for (hash = 0; hash < L2TP_HASH_SIZE; hash++) {
again:
hlist_for_each_safe(walk, tmp, &tunnel->session_hlist[hash]) {
@@ -1394,17 +1362,6 @@ static void l2tp_udp_encap_destroy(struct sock *sk)
}
}
-/* Really kill the tunnel.
- * Come here only when all sessions have been cleared from the tunnel.
- */
-static void l2tp_tunnel_free(struct l2tp_tunnel *tunnel)
-{
- BUG_ON(atomic_read(&tunnel->ref_count) != 0);
- BUG_ON(tunnel->sock != NULL);
- l2tp_info(tunnel, L2TP_MSG_CONTROL, "%s: free...\n", tunnel->name);
- kfree_rcu(tunnel, rcu);
-}
-
/* Workqueue tunnel deletion function */
static void l2tp_tunnel_del_work(struct work_struct *work)
{
@@ -1655,6 +1612,7 @@ int l2tp_tunnel_create(struct net *net, int fd, int version, u32 tunnel_id, u32
tunnel->magic = L2TP_TUNNEL_MAGIC;
sprintf(&tunnel->name[0], "tunl %u", tunnel_id);
rwlock_init(&tunnel->hlist_lock);
+ tunnel->acpt_newsess = true;
/* The net we belong to */
tunnel->l2tp_net = net;
@@ -1840,7 +1798,6 @@ EXPORT_SYMBOL_GPL(l2tp_session_set_header_len);
struct l2tp_session *l2tp_session_create(int priv_size, struct l2tp_tunnel *tunnel, u32 session_id, u32 peer_session_id, struct l2tp_session_cfg *cfg)
{
struct l2tp_session *session;
- int err;
session = kzalloc(sizeof(struct l2tp_session) + priv_size, GFP_KERNEL);
if (session != NULL) {
@@ -1896,25 +1853,7 @@ struct l2tp_session *l2tp_session_create(int priv_size, struct l2tp_tunnel *tunn
l2tp_session_set_header_len(session, tunnel->version);
- err = l2tp_session_add_to_tunnel(tunnel, session);
- if (err) {
- kfree(session);
-
- return ERR_PTR(err);
- }
-
- /* Bump the reference count. The session context is deleted
- * only when this drops to zero.
- */
l2tp_session_inc_refcount(session);
- l2tp_tunnel_inc_refcount(tunnel);
-
- /* Ensure tunnel socket isn't deleted */
- sock_hold(tunnel->sock);
-
- /* Ignore management session in session count value */
- if (session->session_id != 0)
- atomic_inc(&l2tp_session_count);
return session;
}
diff --git a/net/l2tp/l2tp_core.h b/net/l2tp/l2tp_core.h
index 06323a12d62c..57da0f1d62dd 100644
--- a/net/l2tp/l2tp_core.h
+++ b/net/l2tp/l2tp_core.h
@@ -23,16 +23,6 @@
#define L2TP_HASH_BITS_2 8
#define L2TP_HASH_SIZE_2 (1 << L2TP_HASH_BITS_2)
-/* Debug message categories for the DEBUG socket option */
-enum {
- L2TP_MSG_DEBUG = (1 << 0), /* verbose debug (if
- * compiled in) */
- L2TP_MSG_CONTROL = (1 << 1), /* userspace - kernel
- * interface */
- L2TP_MSG_SEQ = (1 << 2), /* sequence numbers */
- L2TP_MSG_DATA = (1 << 3), /* data packets */
-};
-
struct sk_buff;
struct l2tp_stats {
@@ -175,6 +165,10 @@ struct l2tp_tunnel {
struct rcu_head rcu;
rwlock_t hlist_lock; /* protect session_hlist */
+ bool acpt_newsess; /* Indicates whether this
+ * tunnel accepts new sessions.
+ * Protected by hlist_lock.
+ */
struct hlist_head session_hlist[L2TP_HASH_SIZE];
/* hashed list of sessions,
* hashed by id */
@@ -210,7 +204,9 @@ struct l2tp_tunnel {
};
struct l2tp_nl_cmd_ops {
- int (*session_create)(struct net *net, u32 tunnel_id, u32 session_id, u32 peer_session_id, struct l2tp_session_cfg *cfg);
+ int (*session_create)(struct net *net, struct l2tp_tunnel *tunnel,
+ u32 session_id, u32 peer_session_id,
+ struct l2tp_session_cfg *cfg);
int (*session_delete)(struct l2tp_session *session);
};
@@ -244,17 +240,18 @@ out:
return tunnel;
}
-struct l2tp_session *l2tp_session_get(struct net *net,
+struct l2tp_tunnel *l2tp_tunnel_get(const struct net *net, u32 tunnel_id);
+
+struct l2tp_session *l2tp_session_get(const struct net *net,
struct l2tp_tunnel *tunnel,
u32 session_id, bool do_ref);
-struct l2tp_session *l2tp_session_find(struct net *net,
- struct l2tp_tunnel *tunnel,
- u32 session_id);
struct l2tp_session *l2tp_session_get_nth(struct l2tp_tunnel *tunnel, int nth,
bool do_ref);
-struct l2tp_session *l2tp_session_find_by_ifname(struct net *net, char *ifname);
-struct l2tp_tunnel *l2tp_tunnel_find(struct net *net, u32 tunnel_id);
-struct l2tp_tunnel *l2tp_tunnel_find_nth(struct net *net, int nth);
+struct l2tp_session *l2tp_session_get_by_ifname(const struct net *net,
+ const char *ifname,
+ bool do_ref);
+struct l2tp_tunnel *l2tp_tunnel_find(const struct net *net, u32 tunnel_id);
+struct l2tp_tunnel *l2tp_tunnel_find_nth(const struct net *net, int nth);
int l2tp_tunnel_create(struct net *net, int fd, int version, u32 tunnel_id,
u32 peer_tunnel_id, struct l2tp_tunnel_cfg *cfg,
@@ -265,6 +262,9 @@ struct l2tp_session *l2tp_session_create(int priv_size,
struct l2tp_tunnel *tunnel,
u32 session_id, u32 peer_session_id,
struct l2tp_session_cfg *cfg);
+int l2tp_session_register(struct l2tp_session *session,
+ struct l2tp_tunnel *tunnel);
+
void __l2tp_session_unhash(struct l2tp_session *session);
int l2tp_session_delete(struct l2tp_session *session);
void l2tp_session_free(struct l2tp_session *session);
@@ -283,6 +283,17 @@ int l2tp_nl_register_ops(enum l2tp_pwtype pw_type,
void l2tp_nl_unregister_ops(enum l2tp_pwtype pw_type);
int l2tp_ioctl(struct sock *sk, int cmd, unsigned long arg);
+static inline void l2tp_tunnel_inc_refcount(struct l2tp_tunnel *tunnel)
+{
+ atomic_inc(&tunnel->ref_count);
+}
+
+static inline void l2tp_tunnel_dec_refcount(struct l2tp_tunnel *tunnel)
+{
+ if (atomic_dec_and_test(&tunnel->ref_count))
+ kfree_rcu(tunnel, rcu);
+}
+
/* Session reference counts. Incremented when code obtains a reference
* to a session.
*/
diff --git a/net/l2tp/l2tp_eth.c b/net/l2tp/l2tp_eth.c
index c94160df71af..e0a65ee1e830 100644
--- a/net/l2tp/l2tp_eth.c
+++ b/net/l2tp/l2tp_eth.c
@@ -30,6 +30,9 @@
#include <net/xfrm.h>
#include <net/net_namespace.h>
#include <net/netns/generic.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <linux/udp.h>
#include "l2tp_core.h"
@@ -41,7 +44,6 @@ struct l2tp_eth {
struct net_device *dev;
struct sock *tunnel_sock;
struct l2tp_session *session;
- struct list_head list;
atomic_long_t tx_bytes;
atomic_long_t tx_packets;
atomic_long_t tx_dropped;
@@ -52,20 +54,9 @@ struct l2tp_eth {
/* via l2tp_session_priv() */
struct l2tp_eth_sess {
- struct net_device *dev;
+ struct net_device __rcu *dev;
};
-/* per-net private data for this module */
-static unsigned int l2tp_eth_net_id;
-struct l2tp_eth_net {
- struct list_head l2tp_eth_dev_list;
- spinlock_t l2tp_eth_lock;
-};
-
-static inline struct l2tp_eth_net *l2tp_eth_pernet(struct net *net)
-{
- return net_generic(net, l2tp_eth_net_id);
-}
static struct lock_class_key l2tp_eth_tx_busylock;
static int l2tp_eth_dev_init(struct net_device *dev)
@@ -82,12 +73,13 @@ static int l2tp_eth_dev_init(struct net_device *dev)
static void l2tp_eth_dev_uninit(struct net_device *dev)
{
struct l2tp_eth *priv = netdev_priv(dev);
- struct l2tp_eth_net *pn = l2tp_eth_pernet(dev_net(dev));
+ struct l2tp_eth_sess *spriv;
- spin_lock(&pn->l2tp_eth_lock);
- list_del_init(&priv->list);
- spin_unlock(&pn->l2tp_eth_lock);
- dev_put(dev);
+ spriv = l2tp_session_priv(priv->session);
+ RCU_INIT_POINTER(spriv->dev, NULL);
+ /* No need for synchronize_net() here. We're called by
+ * unregister_netdev*(), which does the synchronisation for us.
+ */
}
static int l2tp_eth_dev_xmit(struct sk_buff *skb, struct net_device *dev)
@@ -141,8 +133,8 @@ static void l2tp_eth_dev_setup(struct net_device *dev)
static void l2tp_eth_dev_recv(struct l2tp_session *session, struct sk_buff *skb, int data_len)
{
struct l2tp_eth_sess *spriv = l2tp_session_priv(session);
- struct net_device *dev = spriv->dev;
- struct l2tp_eth *priv = netdev_priv(dev);
+ struct net_device *dev;
+ struct l2tp_eth *priv;
if (session->debug & L2TP_MSG_DATA) {
unsigned int length;
@@ -166,16 +158,25 @@ static void l2tp_eth_dev_recv(struct l2tp_session *session, struct sk_buff *skb,
skb_dst_drop(skb);
nf_reset(skb);
+ rcu_read_lock();
+ dev = rcu_dereference(spriv->dev);
+ if (!dev)
+ goto error_rcu;
+
+ priv = netdev_priv(dev);
if (dev_forward_skb(dev, skb) == NET_RX_SUCCESS) {
atomic_long_inc(&priv->rx_packets);
atomic_long_add(data_len, &priv->rx_bytes);
} else {
atomic_long_inc(&priv->rx_errors);
}
+ rcu_read_unlock();
+
return;
+error_rcu:
+ rcu_read_unlock();
error:
- atomic_long_inc(&priv->rx_errors);
kfree_skb(skb);
}
@@ -186,11 +187,15 @@ static void l2tp_eth_delete(struct l2tp_session *session)
if (session) {
spriv = l2tp_session_priv(session);
- dev = spriv->dev;
+
+ rtnl_lock();
+ dev = rtnl_dereference(spriv->dev);
if (dev) {
- unregister_netdev(dev);
- spriv->dev = NULL;
+ unregister_netdevice(dev);
+ rtnl_unlock();
module_put(THIS_MODULE);
+ } else {
+ rtnl_unlock();
}
}
}
@@ -200,35 +205,89 @@ static void l2tp_eth_show(struct seq_file *m, void *arg)
{
struct l2tp_session *session = arg;
struct l2tp_eth_sess *spriv = l2tp_session_priv(session);
- struct net_device *dev = spriv->dev;
+ struct net_device *dev;
+
+ rcu_read_lock();
+ dev = rcu_dereference(spriv->dev);
+ if (!dev) {
+ rcu_read_unlock();
+ return;
+ }
+ dev_hold(dev);
+ rcu_read_unlock();
seq_printf(m, " interface %s\n", dev->name);
+
+ dev_put(dev);
}
#endif
-static int l2tp_eth_create(struct net *net, u32 tunnel_id, u32 session_id, u32 peer_session_id, struct l2tp_session_cfg *cfg)
+static void l2tp_eth_adjust_mtu(struct l2tp_tunnel *tunnel,
+ struct l2tp_session *session,
+ struct net_device *dev)
+{
+ unsigned int overhead = 0;
+ struct dst_entry *dst;
+ u32 l3_overhead = 0;
+
+ /* if the encap is UDP, account for UDP header size */
+ if (tunnel->encap == L2TP_ENCAPTYPE_UDP) {
+ overhead += sizeof(struct udphdr);
+ dev->needed_headroom += sizeof(struct udphdr);
+ }
+ if (session->mtu != 0) {
+ dev->mtu = session->mtu;
+ dev->needed_headroom += session->hdr_len;
+ return;
+ }
+ lock_sock(tunnel->sock);
+ l3_overhead = kernel_sock_ip_overhead(tunnel->sock);
+ release_sock(tunnel->sock);
+ if (l3_overhead == 0) {
+ /* L3 Overhead couldn't be identified, this could be
+ * because tunnel->sock was NULL or the socket's
+ * address family was not IPv4 or IPv6,
+ * dev mtu stays at 1500.
+ */
+ return;
+ }
+ /* Adjust MTU, factor overhead - underlay L3, overlay L2 hdr
+ * UDP overhead, if any, was already factored in above.
+ */
+ overhead += session->hdr_len + ETH_HLEN + l3_overhead;
+
+ /* If PMTU discovery was enabled, use discovered MTU on L2TP device */
+ dst = sk_dst_get(tunnel->sock);
+ if (dst) {
+ /* dst_mtu will use PMTU if found, else fallback to intf MTU */
+ u32 pmtu = dst_mtu(dst);
+
+ if (pmtu != 0)
+ dev->mtu = pmtu;
+ dst_release(dst);
+ }
+ session->mtu = dev->mtu - overhead;
+ dev->mtu = session->mtu;
+ dev->needed_headroom += session->hdr_len;
+}
+
+static int l2tp_eth_create(struct net *net, struct l2tp_tunnel *tunnel,
+ u32 session_id, u32 peer_session_id,
+ struct l2tp_session_cfg *cfg)
{
struct net_device *dev;
char name[IFNAMSIZ];
- struct l2tp_tunnel *tunnel;
struct l2tp_session *session;
struct l2tp_eth *priv;
struct l2tp_eth_sess *spriv;
int rc;
- struct l2tp_eth_net *pn;
-
- tunnel = l2tp_tunnel_find(net, tunnel_id);
- if (!tunnel) {
- rc = -ENODEV;
- goto out;
- }
if (cfg->ifname) {
dev = dev_get_by_name(net, cfg->ifname);
if (dev) {
dev_put(dev);
rc = -EEXIST;
- goto out;
+ goto err;
}
strlcpy(name, cfg->ifname, IFNAMSIZ);
} else
@@ -238,26 +297,22 @@ static int l2tp_eth_create(struct net *net, u32 tunnel_id, u32 session_id, u32 p
peer_session_id, cfg);
if (IS_ERR(session)) {
rc = PTR_ERR(session);
- goto out;
+ goto err;
}
dev = alloc_netdev(sizeof(*priv), name, NET_NAME_UNKNOWN,
l2tp_eth_dev_setup);
if (!dev) {
rc = -ENOMEM;
- goto out_del_session;
+ goto err_sess;
}
dev_net_set(dev, net);
- if (session->mtu == 0)
- session->mtu = dev->mtu - session->hdr_len;
- dev->mtu = session->mtu;
- dev->needed_headroom += session->hdr_len;
+ l2tp_eth_adjust_mtu(tunnel, session, dev);
priv = netdev_priv(dev);
priv->dev = dev;
priv->session = session;
- INIT_LIST_HEAD(&priv->list);
priv->tunnel_sock = tunnel->sock;
session->recv_skb = l2tp_eth_dev_recv;
@@ -267,48 +322,50 @@ static int l2tp_eth_create(struct net *net, u32 tunnel_id, u32 session_id, u32 p
#endif
spriv = l2tp_session_priv(session);
- spriv->dev = dev;
- rc = register_netdev(dev);
- if (rc < 0)
- goto out_del_dev;
+ l2tp_session_inc_refcount(session);
- __module_get(THIS_MODULE);
- /* Must be done after register_netdev() */
- strlcpy(session->ifname, dev->name, IFNAMSIZ);
+ rtnl_lock();
- dev_hold(dev);
- pn = l2tp_eth_pernet(dev_net(dev));
- spin_lock(&pn->l2tp_eth_lock);
- list_add(&priv->list, &pn->l2tp_eth_dev_list);
- spin_unlock(&pn->l2tp_eth_lock);
+ /* Register both device and session while holding the rtnl lock. This
+ * ensures that l2tp_eth_delete() will see that there's a device to
+ * unregister, even if it happened to run before we assign spriv->dev.
+ */
+ rc = l2tp_session_register(session, tunnel);
+ if (rc < 0) {
+ rtnl_unlock();
+ goto err_sess_dev;
+ }
- return 0;
+ rc = register_netdevice(dev);
+ if (rc < 0) {
+ rtnl_unlock();
+ l2tp_session_delete(session);
+ l2tp_session_dec_refcount(session);
+ free_netdev(dev);
-out_del_dev:
- free_netdev(dev);
- spriv->dev = NULL;
-out_del_session:
- l2tp_session_delete(session);
-out:
- return rc;
-}
+ return rc;
+ }
-static __net_init int l2tp_eth_init_net(struct net *net)
-{
- struct l2tp_eth_net *pn = net_generic(net, l2tp_eth_net_id);
+ strlcpy(session->ifname, dev->name, IFNAMSIZ);
+ rcu_assign_pointer(spriv->dev, dev);
+
+ rtnl_unlock();
- INIT_LIST_HEAD(&pn->l2tp_eth_dev_list);
- spin_lock_init(&pn->l2tp_eth_lock);
+ l2tp_session_dec_refcount(session);
+
+ __module_get(THIS_MODULE);
return 0;
-}
-static struct pernet_operations l2tp_eth_net_ops = {
- .init = l2tp_eth_init_net,
- .id = &l2tp_eth_net_id,
- .size = sizeof(struct l2tp_eth_net),
-};
+err_sess_dev:
+ l2tp_session_dec_refcount(session);
+ free_netdev(dev);
+err_sess:
+ kfree(session);
+err:
+ return rc;
+}
static const struct l2tp_nl_cmd_ops l2tp_eth_nl_cmd_ops = {
@@ -323,25 +380,18 @@ static int __init l2tp_eth_init(void)
err = l2tp_nl_register_ops(L2TP_PWTYPE_ETH, &l2tp_eth_nl_cmd_ops);
if (err)
- goto out;
-
- err = register_pernet_device(&l2tp_eth_net_ops);
- if (err)
- goto out_unreg;
+ goto err;
pr_info("L2TP ethernet pseudowire support (L2TPv3)\n");
return 0;
-out_unreg:
- l2tp_nl_unregister_ops(L2TP_PWTYPE_ETH);
-out:
+err:
return err;
}
static void __exit l2tp_eth_exit(void)
{
- unregister_pernet_device(&l2tp_eth_net_ops);
l2tp_nl_unregister_ops(L2TP_PWTYPE_ETH);
}
diff --git a/net/l2tp/l2tp_ip.c b/net/l2tp/l2tp_ip.c
index 58f87bdd12c7..fd7363f8405a 100644
--- a/net/l2tp/l2tp_ip.c
+++ b/net/l2tp/l2tp_ip.c
@@ -122,6 +122,7 @@ static int l2tp_ip_recv(struct sk_buff *skb)
unsigned char *ptr, *optr;
struct l2tp_session *session;
struct l2tp_tunnel *tunnel = NULL;
+ struct iphdr *iph;
int length;
if (!pskb_may_pull(skb, 4))
@@ -180,23 +181,16 @@ pass_up:
goto discard;
tunnel_id = ntohl(*(__be32 *) &skb->data[4]);
- tunnel = l2tp_tunnel_find(net, tunnel_id);
- if (tunnel) {
- sk = tunnel->sock;
- sock_hold(sk);
- } else {
- struct iphdr *iph = (struct iphdr *) skb_network_header(skb);
-
- read_lock_bh(&l2tp_ip_lock);
- sk = __l2tp_ip_bind_lookup(net, iph->daddr, 0, tunnel_id);
- if (!sk) {
- read_unlock_bh(&l2tp_ip_lock);
- goto discard;
- }
+ iph = (struct iphdr *)skb_network_header(skb);
- sock_hold(sk);
+ read_lock_bh(&l2tp_ip_lock);
+ sk = __l2tp_ip_bind_lookup(net, iph->daddr, 0, tunnel_id);
+ if (!sk) {
read_unlock_bh(&l2tp_ip_lock);
+ goto discard;
}
+ sock_hold(sk);
+ read_unlock_bh(&l2tp_ip_lock);
if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
goto discard_put;
@@ -269,15 +263,9 @@ static int l2tp_ip_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
if (addr->l2tp_family != AF_INET)
return -EINVAL;
- ret = -EADDRINUSE;
- read_lock_bh(&l2tp_ip_lock);
- if (__l2tp_ip_bind_lookup(net, addr->l2tp_addr.s_addr,
- sk->sk_bound_dev_if, addr->l2tp_conn_id))
- goto out_in_use;
-
- read_unlock_bh(&l2tp_ip_lock);
-
lock_sock(sk);
+
+ ret = -EINVAL;
if (!sock_flag(sk, SOCK_ZAPPED))
goto out;
@@ -294,25 +282,28 @@ static int l2tp_ip_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
inet->inet_rcv_saddr = inet->inet_saddr = addr->l2tp_addr.s_addr;
if (chk_addr_ret == RTN_MULTICAST || chk_addr_ret == RTN_BROADCAST)
inet->inet_saddr = 0; /* Use device */
- sk_dst_reset(sk);
+ write_lock_bh(&l2tp_ip_lock);
+ if (__l2tp_ip_bind_lookup(net, addr->l2tp_addr.s_addr,
+ sk->sk_bound_dev_if, addr->l2tp_conn_id)) {
+ write_unlock_bh(&l2tp_ip_lock);
+ ret = -EADDRINUSE;
+ goto out;
+ }
+
+ sk_dst_reset(sk);
l2tp_ip_sk(sk)->conn_id = addr->l2tp_conn_id;
- write_lock_bh(&l2tp_ip_lock);
sk_add_bind_node(sk, &l2tp_ip_bind_table);
sk_del_node_init(sk);
write_unlock_bh(&l2tp_ip_lock);
+
ret = 0;
sock_reset_flag(sk, SOCK_ZAPPED);
out:
release_sock(sk);
- return ret;
-
-out_in_use:
- read_unlock_bh(&l2tp_ip_lock);
-
return ret;
}
@@ -321,21 +312,24 @@ static int l2tp_ip_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len
struct sockaddr_l2tpip *lsa = (struct sockaddr_l2tpip *) uaddr;
int rc;
- if (sock_flag(sk, SOCK_ZAPPED)) /* Must bind first - autobinding does not work */
- return -EINVAL;
-
if (addr_len < sizeof(*lsa))
return -EINVAL;
if (ipv4_is_multicast(lsa->l2tp_addr.s_addr))
return -EINVAL;
- rc = ip4_datagram_connect(sk, uaddr, addr_len);
- if (rc < 0)
- return rc;
-
lock_sock(sk);
+ /* Must bind first - autobinding does not work */
+ if (sock_flag(sk, SOCK_ZAPPED)) {
+ rc = -EINVAL;
+ goto out_sk;
+ }
+
+ rc = __ip4_datagram_connect(sk, uaddr, addr_len);
+ if (rc < 0)
+ goto out_sk;
+
l2tp_ip_sk(sk)->peer_conn_id = lsa->l2tp_conn_id;
write_lock_bh(&l2tp_ip_lock);
@@ -343,7 +337,9 @@ static int l2tp_ip_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len
sk_add_bind_node(sk, &l2tp_ip_bind_table);
write_unlock_bh(&l2tp_ip_lock);
+out_sk:
release_sock(sk);
+
return rc;
}
diff --git a/net/l2tp/l2tp_ip6.c b/net/l2tp/l2tp_ip6.c
index 2b5230ef8536..5bb5337e74fc 100644
--- a/net/l2tp/l2tp_ip6.c
+++ b/net/l2tp/l2tp_ip6.c
@@ -134,6 +134,7 @@ static int l2tp_ip6_recv(struct sk_buff *skb)
unsigned char *ptr, *optr;
struct l2tp_session *session;
struct l2tp_tunnel *tunnel = NULL;
+ struct ipv6hdr *iph;
int length;
if (!pskb_may_pull(skb, 4))
@@ -193,24 +194,16 @@ pass_up:
goto discard;
tunnel_id = ntohl(*(__be32 *) &skb->data[4]);
- tunnel = l2tp_tunnel_find(net, tunnel_id);
- if (tunnel) {
- sk = tunnel->sock;
- sock_hold(sk);
- } else {
- struct ipv6hdr *iph = ipv6_hdr(skb);
-
- read_lock_bh(&l2tp_ip6_lock);
- sk = __l2tp_ip6_bind_lookup(net, &iph->daddr,
- 0, tunnel_id);
- if (!sk) {
- read_unlock_bh(&l2tp_ip6_lock);
- goto discard;
- }
+ iph = ipv6_hdr(skb);
- sock_hold(sk);
+ read_lock_bh(&l2tp_ip6_lock);
+ sk = __l2tp_ip6_bind_lookup(net, &iph->daddr, 0, tunnel_id);
+ if (!sk) {
read_unlock_bh(&l2tp_ip6_lock);
+ goto discard;
}
+ sock_hold(sk);
+ read_unlock_bh(&l2tp_ip6_lock);
if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
goto discard_put;
@@ -278,6 +271,7 @@ static int l2tp_ip6_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
struct sockaddr_l2tpip6 *addr = (struct sockaddr_l2tpip6 *) uaddr;
struct net *net = sock_net(sk);
__be32 v4addr = 0;
+ int bound_dev_if;
int addr_type;
int err;
@@ -296,13 +290,6 @@ static int l2tp_ip6_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
if (addr_type & IPV6_ADDR_MULTICAST)
return -EADDRNOTAVAIL;
- err = -EADDRINUSE;
- read_lock_bh(&l2tp_ip6_lock);
- if (__l2tp_ip6_bind_lookup(net, &addr->l2tp_addr,
- sk->sk_bound_dev_if, addr->l2tp_conn_id))
- goto out_in_use;
- read_unlock_bh(&l2tp_ip6_lock);
-
lock_sock(sk);
err = -EINVAL;
@@ -312,28 +299,25 @@ static int l2tp_ip6_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
if (sk->sk_state != TCP_CLOSE)
goto out_unlock;
+ bound_dev_if = sk->sk_bound_dev_if;
+
/* Check if the address belongs to the host. */
rcu_read_lock();
if (addr_type != IPV6_ADDR_ANY) {
struct net_device *dev = NULL;
if (addr_type & IPV6_ADDR_LINKLOCAL) {
- if (addr_len >= sizeof(struct sockaddr_in6) &&
- addr->l2tp_scope_id) {
- /* Override any existing binding, if another
- * one is supplied by user.
- */
- sk->sk_bound_dev_if = addr->l2tp_scope_id;
- }
+ if (addr->l2tp_scope_id)
+ bound_dev_if = addr->l2tp_scope_id;
/* Binding to link-local address requires an
- interface */
- if (!sk->sk_bound_dev_if)
+ * interface.
+ */
+ if (!bound_dev_if)
goto out_unlock_rcu;
err = -ENODEV;
- dev = dev_get_by_index_rcu(sock_net(sk),
- sk->sk_bound_dev_if);
+ dev = dev_get_by_index_rcu(sock_net(sk), bound_dev_if);
if (!dev)
goto out_unlock_rcu;
}
@@ -348,13 +332,22 @@ static int l2tp_ip6_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
}
rcu_read_unlock();
- inet->inet_rcv_saddr = inet->inet_saddr = v4addr;
+ write_lock_bh(&l2tp_ip6_lock);
+ if (__l2tp_ip6_bind_lookup(net, &addr->l2tp_addr, bound_dev_if,
+ addr->l2tp_conn_id)) {
+ write_unlock_bh(&l2tp_ip6_lock);
+ err = -EADDRINUSE;
+ goto out_unlock;
+ }
+
+ inet->inet_saddr = v4addr;
+ inet->inet_rcv_saddr = v4addr;
+ sk->sk_bound_dev_if = bound_dev_if;
sk->sk_v6_rcv_saddr = addr->l2tp_addr;
np->saddr = addr->l2tp_addr;
l2tp_ip6_sk(sk)->conn_id = addr->l2tp_conn_id;
- write_lock_bh(&l2tp_ip6_lock);
sk_add_bind_node(sk, &l2tp_ip6_bind_table);
sk_del_node_init(sk);
write_unlock_bh(&l2tp_ip6_lock);
@@ -367,10 +360,7 @@ out_unlock_rcu:
rcu_read_unlock();
out_unlock:
release_sock(sk);
- return err;
-out_in_use:
- read_unlock_bh(&l2tp_ip6_lock);
return err;
}
@@ -383,9 +373,6 @@ static int l2tp_ip6_connect(struct sock *sk, struct sockaddr *uaddr,
int addr_type;
int rc;
- if (sock_flag(sk, SOCK_ZAPPED)) /* Must bind first - autobinding does not work */
- return -EINVAL;
-
if (addr_len < sizeof(*lsa))
return -EINVAL;
@@ -402,10 +389,18 @@ static int l2tp_ip6_connect(struct sock *sk, struct sockaddr *uaddr,
return -EINVAL;
}
- rc = ip6_datagram_connect(sk, uaddr, addr_len);
-
lock_sock(sk);
+ /* Must bind first - autobinding does not work */
+ if (sock_flag(sk, SOCK_ZAPPED)) {
+ rc = -EINVAL;
+ goto out_sk;
+ }
+
+ rc = __ip6_datagram_connect(sk, uaddr, addr_len);
+ if (rc < 0)
+ goto out_sk;
+
l2tp_ip6_sk(sk)->peer_conn_id = lsa->l2tp_conn_id;
write_lock_bh(&l2tp_ip6_lock);
@@ -413,6 +408,7 @@ static int l2tp_ip6_connect(struct sock *sk, struct sockaddr *uaddr,
sk_add_bind_node(sk, &l2tp_ip6_bind_table);
write_unlock_bh(&l2tp_ip6_lock);
+out_sk:
release_sock(sk);
return rc;
diff --git a/net/l2tp/l2tp_netlink.c b/net/l2tp/l2tp_netlink.c
index fb3248ff8b48..d3a84a181348 100644
--- a/net/l2tp/l2tp_netlink.c
+++ b/net/l2tp/l2tp_netlink.c
@@ -55,7 +55,8 @@ static int l2tp_nl_session_send(struct sk_buff *skb, u32 portid, u32 seq,
/* Accessed under genl lock */
static const struct l2tp_nl_cmd_ops *l2tp_nl_cmd_ops[__L2TP_PWTYPE_MAX];
-static struct l2tp_session *l2tp_nl_session_find(struct genl_info *info)
+static struct l2tp_session *l2tp_nl_session_get(struct genl_info *info,
+ bool do_ref)
{
u32 tunnel_id;
u32 session_id;
@@ -66,14 +67,17 @@ static struct l2tp_session *l2tp_nl_session_find(struct genl_info *info)
if (info->attrs[L2TP_ATTR_IFNAME]) {
ifname = nla_data(info->attrs[L2TP_ATTR_IFNAME]);
- session = l2tp_session_find_by_ifname(net, ifname);
+ session = l2tp_session_get_by_ifname(net, ifname, do_ref);
} else if ((info->attrs[L2TP_ATTR_SESSION_ID]) &&
(info->attrs[L2TP_ATTR_CONN_ID])) {
tunnel_id = nla_get_u32(info->attrs[L2TP_ATTR_CONN_ID]);
session_id = nla_get_u32(info->attrs[L2TP_ATTR_SESSION_ID]);
- tunnel = l2tp_tunnel_find(net, tunnel_id);
- if (tunnel)
- session = l2tp_session_find(net, tunnel, session_id);
+ tunnel = l2tp_tunnel_get(net, tunnel_id);
+ if (tunnel) {
+ session = l2tp_session_get(net, tunnel, session_id,
+ do_ref);
+ l2tp_tunnel_dec_refcount(tunnel);
+ }
}
return session;
@@ -276,8 +280,8 @@ static int l2tp_nl_cmd_tunnel_delete(struct sk_buff *skb, struct genl_info *info
}
tunnel_id = nla_get_u32(info->attrs[L2TP_ATTR_CONN_ID]);
- tunnel = l2tp_tunnel_find(net, tunnel_id);
- if (tunnel == NULL) {
+ tunnel = l2tp_tunnel_get(net, tunnel_id);
+ if (!tunnel) {
ret = -ENODEV;
goto out;
}
@@ -287,6 +291,8 @@ static int l2tp_nl_cmd_tunnel_delete(struct sk_buff *skb, struct genl_info *info
l2tp_tunnel_delete(tunnel);
+ l2tp_tunnel_dec_refcount(tunnel);
+
out:
return ret;
}
@@ -304,8 +310,8 @@ static int l2tp_nl_cmd_tunnel_modify(struct sk_buff *skb, struct genl_info *info
}
tunnel_id = nla_get_u32(info->attrs[L2TP_ATTR_CONN_ID]);
- tunnel = l2tp_tunnel_find(net, tunnel_id);
- if (tunnel == NULL) {
+ tunnel = l2tp_tunnel_get(net, tunnel_id);
+ if (!tunnel) {
ret = -ENODEV;
goto out;
}
@@ -316,6 +322,8 @@ static int l2tp_nl_cmd_tunnel_modify(struct sk_buff *skb, struct genl_info *info
ret = l2tp_tunnel_notify(&l2tp_nl_family, info,
tunnel, L2TP_CMD_TUNNEL_MODIFY);
+ l2tp_tunnel_dec_refcount(tunnel);
+
out:
return ret;
}
@@ -420,34 +428,37 @@ static int l2tp_nl_cmd_tunnel_get(struct sk_buff *skb, struct genl_info *info)
if (!info->attrs[L2TP_ATTR_CONN_ID]) {
ret = -EINVAL;
- goto out;
+ goto err;
}
tunnel_id = nla_get_u32(info->attrs[L2TP_ATTR_CONN_ID]);
- tunnel = l2tp_tunnel_find(net, tunnel_id);
- if (tunnel == NULL) {
- ret = -ENODEV;
- goto out;
- }
-
msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
if (!msg) {
ret = -ENOMEM;
- goto out;
+ goto err;
+ }
+
+ tunnel = l2tp_tunnel_get(net, tunnel_id);
+ if (!tunnel) {
+ ret = -ENODEV;
+ goto err_nlmsg;
}
ret = l2tp_nl_tunnel_send(msg, info->snd_portid, info->snd_seq,
NLM_F_ACK, tunnel, L2TP_CMD_TUNNEL_GET);
if (ret < 0)
- goto err_out;
+ goto err_nlmsg_tunnel;
+
+ l2tp_tunnel_dec_refcount(tunnel);
return genlmsg_unicast(net, msg, info->snd_portid);
-err_out:
+err_nlmsg_tunnel:
+ l2tp_tunnel_dec_refcount(tunnel);
+err_nlmsg:
nlmsg_free(msg);
-
-out:
+err:
return ret;
}
@@ -491,8 +502,9 @@ static int l2tp_nl_cmd_session_create(struct sk_buff *skb, struct genl_info *inf
ret = -EINVAL;
goto out;
}
+
tunnel_id = nla_get_u32(info->attrs[L2TP_ATTR_CONN_ID]);
- tunnel = l2tp_tunnel_find(net, tunnel_id);
+ tunnel = l2tp_tunnel_get(net, tunnel_id);
if (!tunnel) {
ret = -ENODEV;
goto out;
@@ -500,29 +512,24 @@ static int l2tp_nl_cmd_session_create(struct sk_buff *skb, struct genl_info *inf
if (!info->attrs[L2TP_ATTR_SESSION_ID]) {
ret = -EINVAL;
- goto out;
+ goto out_tunnel;
}
session_id = nla_get_u32(info->attrs[L2TP_ATTR_SESSION_ID]);
- session = l2tp_session_find(net, tunnel, session_id);
- if (session) {
- ret = -EEXIST;
- goto out;
- }
if (!info->attrs[L2TP_ATTR_PEER_SESSION_ID]) {
ret = -EINVAL;
- goto out;
+ goto out_tunnel;
}
peer_session_id = nla_get_u32(info->attrs[L2TP_ATTR_PEER_SESSION_ID]);
if (!info->attrs[L2TP_ATTR_PW_TYPE]) {
ret = -EINVAL;
- goto out;
+ goto out_tunnel;
}
cfg.pw_type = nla_get_u16(info->attrs[L2TP_ATTR_PW_TYPE]);
if (cfg.pw_type >= __L2TP_PWTYPE_MAX) {
ret = -EINVAL;
- goto out;
+ goto out_tunnel;
}
if (tunnel->version > 2) {
@@ -544,7 +551,7 @@ static int l2tp_nl_cmd_session_create(struct sk_buff *skb, struct genl_info *inf
u16 len = nla_len(info->attrs[L2TP_ATTR_COOKIE]);
if (len > 8) {
ret = -EINVAL;
- goto out;
+ goto out_tunnel;
}
cfg.cookie_len = len;
memcpy(&cfg.cookie[0], nla_data(info->attrs[L2TP_ATTR_COOKIE]), len);
@@ -553,7 +560,7 @@ static int l2tp_nl_cmd_session_create(struct sk_buff *skb, struct genl_info *inf
u16 len = nla_len(info->attrs[L2TP_ATTR_PEER_COOKIE]);
if (len > 8) {
ret = -EINVAL;
- goto out;
+ goto out_tunnel;
}
cfg.peer_cookie_len = len;
memcpy(&cfg.peer_cookie[0], nla_data(info->attrs[L2TP_ATTR_PEER_COOKIE]), len);
@@ -596,7 +603,7 @@ static int l2tp_nl_cmd_session_create(struct sk_buff *skb, struct genl_info *inf
if ((l2tp_nl_cmd_ops[cfg.pw_type] == NULL) ||
(l2tp_nl_cmd_ops[cfg.pw_type]->session_create == NULL)) {
ret = -EPROTONOSUPPORT;
- goto out;
+ goto out_tunnel;
}
/* Check that pseudowire-specific params are present */
@@ -606,7 +613,7 @@ static int l2tp_nl_cmd_session_create(struct sk_buff *skb, struct genl_info *inf
case L2TP_PWTYPE_ETH_VLAN:
if (!info->attrs[L2TP_ATTR_VLAN_ID]) {
ret = -EINVAL;
- goto out;
+ goto out_tunnel;
}
break;
case L2TP_PWTYPE_ETH:
@@ -620,18 +627,22 @@ static int l2tp_nl_cmd_session_create(struct sk_buff *skb, struct genl_info *inf
break;
}
- ret = -EPROTONOSUPPORT;
- if (l2tp_nl_cmd_ops[cfg.pw_type]->session_create)
- ret = (*l2tp_nl_cmd_ops[cfg.pw_type]->session_create)(net, tunnel_id,
- session_id, peer_session_id, &cfg);
+ ret = l2tp_nl_cmd_ops[cfg.pw_type]->session_create(net, tunnel,
+ session_id,
+ peer_session_id,
+ &cfg);
if (ret >= 0) {
- session = l2tp_session_find(net, tunnel, session_id);
- if (session)
+ session = l2tp_session_get(net, tunnel, session_id, false);
+ if (session) {
ret = l2tp_session_notify(&l2tp_nl_family, info, session,
L2TP_CMD_SESSION_CREATE);
+ l2tp_session_dec_refcount(session);
+ }
}
+out_tunnel:
+ l2tp_tunnel_dec_refcount(tunnel);
out:
return ret;
}
@@ -642,7 +653,7 @@ static int l2tp_nl_cmd_session_delete(struct sk_buff *skb, struct genl_info *inf
struct l2tp_session *session;
u16 pw_type;
- session = l2tp_nl_session_find(info);
+ session = l2tp_nl_session_get(info, true);
if (session == NULL) {
ret = -ENODEV;
goto out;
@@ -656,6 +667,10 @@ static int l2tp_nl_cmd_session_delete(struct sk_buff *skb, struct genl_info *inf
if (l2tp_nl_cmd_ops[pw_type] && l2tp_nl_cmd_ops[pw_type]->session_delete)
ret = (*l2tp_nl_cmd_ops[pw_type]->session_delete)(session);
+ if (session->deref)
+ session->deref(session);
+ l2tp_session_dec_refcount(session);
+
out:
return ret;
}
@@ -665,7 +680,7 @@ static int l2tp_nl_cmd_session_modify(struct sk_buff *skb, struct genl_info *inf
int ret = 0;
struct l2tp_session *session;
- session = l2tp_nl_session_find(info);
+ session = l2tp_nl_session_get(info, false);
if (session == NULL) {
ret = -ENODEV;
goto out;
@@ -700,6 +715,8 @@ static int l2tp_nl_cmd_session_modify(struct sk_buff *skb, struct genl_info *inf
ret = l2tp_session_notify(&l2tp_nl_family, info,
session, L2TP_CMD_SESSION_MODIFY);
+ l2tp_session_dec_refcount(session);
+
out:
return ret;
}
@@ -786,29 +803,34 @@ static int l2tp_nl_cmd_session_get(struct sk_buff *skb, struct genl_info *info)
struct sk_buff *msg;
int ret;
- session = l2tp_nl_session_find(info);
+ session = l2tp_nl_session_get(info, false);
if (session == NULL) {
ret = -ENODEV;
- goto out;
+ goto err;
}
msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
if (!msg) {
ret = -ENOMEM;
- goto out;
+ goto err_ref;
}
ret = l2tp_nl_session_send(msg, info->snd_portid, info->snd_seq,
0, session, L2TP_CMD_SESSION_GET);
if (ret < 0)
- goto err_out;
+ goto err_ref_msg;
- return genlmsg_unicast(genl_info_net(info), msg, info->snd_portid);
+ ret = genlmsg_unicast(genl_info_net(info), msg, info->snd_portid);
-err_out:
- nlmsg_free(msg);
+ l2tp_session_dec_refcount(session);
-out:
+ return ret;
+
+err_ref_msg:
+ nlmsg_free(msg);
+err_ref:
+ l2tp_session_dec_refcount(session);
+err:
return ret;
}
diff --git a/net/l2tp/l2tp_ppp.c b/net/l2tp/l2tp_ppp.c
index bc5d6b8f8ede..8ff5352bb0e3 100644
--- a/net/l2tp/l2tp_ppp.c
+++ b/net/l2tp/l2tp_ppp.c
@@ -122,8 +122,11 @@
struct pppol2tp_session {
int owner; /* pid that opened the socket */
- struct sock *sock; /* Pointer to the session
+ struct mutex sk_lock; /* Protects .sk */
+ struct sock __rcu *sk; /* Pointer to the session
* PPPoX socket */
+ struct sock *__sk; /* Copy of .sk, for cleanup */
+ struct rcu_head rcu; /* For asynchronous release */
struct sock *tunnel_sock; /* Pointer to the tunnel UDP
* socket */
int flags; /* accessed by PPPIOCGFLAGS.
@@ -138,6 +141,24 @@ static const struct ppp_channel_ops pppol2tp_chan_ops = {
static const struct proto_ops pppol2tp_ops;
+/* Retrieves the pppol2tp socket associated to a session.
+ * A reference is held on the returned socket, so this function must be paired
+ * with sock_put().
+ */
+static struct sock *pppol2tp_session_get_sock(struct l2tp_session *session)
+{
+ struct pppol2tp_session *ps = l2tp_session_priv(session);
+ struct sock *sk;
+
+ rcu_read_lock();
+ sk = rcu_dereference(ps->sk);
+ if (sk)
+ sock_hold(sk);
+ rcu_read_unlock();
+
+ return sk;
+}
+
/* Helpers to obtain tunnel/session contexts from sockets.
*/
static inline struct l2tp_session *pppol2tp_sock_to_session(struct sock *sk)
@@ -224,13 +245,14 @@ static void pppol2tp_recv(struct l2tp_session *session, struct sk_buff *skb, int
/* If the socket is bound, send it in to PPP's input queue. Otherwise
* queue it on the session socket.
*/
- sk = ps->sock;
+ rcu_read_lock();
+ sk = rcu_dereference(ps->sk);
if (sk == NULL)
goto no_sock;
if (sk->sk_state & PPPOX_BOUND) {
struct pppox_sock *po;
- l2tp_dbg(session, PPPOL2TP_MSG_DATA,
+ l2tp_dbg(session, L2TP_MSG_DATA,
"%s: recv %d byte data frame, passing to ppp\n",
session->name, data_len);
@@ -253,7 +275,7 @@ static void pppol2tp_recv(struct l2tp_session *session, struct sk_buff *skb, int
po = pppox_sk(sk);
ppp_input(&po->chan, skb);
} else {
- l2tp_dbg(session, PPPOL2TP_MSG_DATA,
+ l2tp_dbg(session, L2TP_MSG_DATA,
"%s: recv %d byte data frame, passing to L2TP socket\n",
session->name, data_len);
@@ -262,30 +284,16 @@ static void pppol2tp_recv(struct l2tp_session *session, struct sk_buff *skb, int
kfree_skb(skb);
}
}
+ rcu_read_unlock();
return;
no_sock:
- l2tp_info(session, PPPOL2TP_MSG_DATA, "%s: no socket\n", session->name);
+ rcu_read_unlock();
+ l2tp_info(session, L2TP_MSG_DATA, "%s: no socket\n", session->name);
kfree_skb(skb);
}
-static void pppol2tp_session_sock_hold(struct l2tp_session *session)
-{
- struct pppol2tp_session *ps = l2tp_session_priv(session);
-
- if (ps->sock)
- sock_hold(ps->sock);
-}
-
-static void pppol2tp_session_sock_put(struct l2tp_session *session)
-{
- struct pppol2tp_session *ps = l2tp_session_priv(session);
-
- if (ps->sock)
- sock_put(ps->sock);
-}
-
/************************************************************************
* Transmit handling
***********************************************************************/
@@ -446,17 +454,16 @@ abort:
*/
static void pppol2tp_session_close(struct l2tp_session *session)
{
- struct pppol2tp_session *ps = l2tp_session_priv(session);
- struct sock *sk = ps->sock;
- struct socket *sock = sk->sk_socket;
+ struct sock *sk;
BUG_ON(session->magic != L2TP_SESSION_MAGIC);
- if (sock)
- inet_shutdown(sock, SEND_SHUTDOWN);
-
- /* Don't let the session go away before our socket does */
- l2tp_session_inc_refcount(session);
+ sk = pppol2tp_session_get_sock(session);
+ if (sk) {
+ if (sk->sk_socket)
+ inet_shutdown(sk->sk_socket, SEND_SHUTDOWN);
+ sock_put(sk);
+ }
}
/* Really kill the session socket. (Called from sock_put() if
@@ -476,6 +483,14 @@ static void pppol2tp_session_destruct(struct sock *sk)
}
}
+static void pppol2tp_put_sk(struct rcu_head *head)
+{
+ struct pppol2tp_session *ps;
+
+ ps = container_of(head, typeof(*ps), rcu);
+ sock_put(ps->__sk);
+}
+
/* Called when the PPPoX socket (session) is closed.
*/
static int pppol2tp_release(struct socket *sock)
@@ -501,11 +516,23 @@ static int pppol2tp_release(struct socket *sock)
session = pppol2tp_sock_to_session(sk);
- /* Purge any queued data */
if (session != NULL) {
- __l2tp_session_unhash(session);
- l2tp_session_queue_purge(session);
- sock_put(sk);
+ struct pppol2tp_session *ps;
+
+ l2tp_session_delete(session);
+
+ ps = l2tp_session_priv(session);
+ mutex_lock(&ps->sk_lock);
+ ps->__sk = rcu_dereference_protected(ps->sk,
+ lockdep_is_held(&ps->sk_lock));
+ RCU_INIT_POINTER(ps->sk, NULL);
+ mutex_unlock(&ps->sk_lock);
+ call_rcu(&ps->rcu, pppol2tp_put_sk);
+
+ /* Rely on the sock_put() call at the end of the function for
+ * dropping the reference held by pppol2tp_sock_to_session().
+ * The last reference will be dropped by pppol2tp_put_sk().
+ */
}
release_sock(sk);
@@ -572,16 +599,47 @@ out:
static void pppol2tp_show(struct seq_file *m, void *arg)
{
struct l2tp_session *session = arg;
- struct pppol2tp_session *ps = l2tp_session_priv(session);
+ struct sock *sk;
+
+ sk = pppol2tp_session_get_sock(session);
+ if (sk) {
+ struct pppox_sock *po = pppox_sk(sk);
- if (ps) {
- struct pppox_sock *po = pppox_sk(ps->sock);
- if (po)
- seq_printf(m, " interface %s\n", ppp_dev_name(&po->chan));
+ seq_printf(m, " interface %s\n", ppp_dev_name(&po->chan));
+ sock_put(sk);
}
}
#endif
+static void pppol2tp_session_init(struct l2tp_session *session)
+{
+ struct pppol2tp_session *ps;
+ struct dst_entry *dst;
+
+ session->recv_skb = pppol2tp_recv;
+ session->session_close = pppol2tp_session_close;
+#if defined(CONFIG_L2TP_DEBUGFS) || defined(CONFIG_L2TP_DEBUGFS_MODULE)
+ session->show = pppol2tp_show;
+#endif
+
+ ps = l2tp_session_priv(session);
+ mutex_init(&ps->sk_lock);
+ ps->tunnel_sock = session->tunnel->sock;
+ ps->owner = current->pid;
+
+ /* If PMTU discovery was enabled, use the MTU that was discovered */
+ dst = sk_dst_get(session->tunnel->sock);
+ if (dst) {
+ u32 pmtu = dst_mtu(dst);
+
+ if (pmtu) {
+ session->mtu = pmtu - PPPOL2TP_HEADER_OVERHEAD;
+ session->mru = pmtu - PPPOL2TP_HEADER_OVERHEAD;
+ }
+ dst_release(dst);
+ }
+}
+
/* connect() handler. Attach a PPPoX socket to a tunnel UDP socket
*/
static int pppol2tp_connect(struct socket *sock, struct sockaddr *uservaddr,
@@ -593,7 +651,6 @@ static int pppol2tp_connect(struct socket *sock, struct sockaddr *uservaddr,
struct l2tp_session *session = NULL;
struct l2tp_tunnel *tunnel;
struct pppol2tp_session *ps;
- struct dst_entry *dst;
struct l2tp_session_cfg cfg = { 0, };
int error = 0;
u32 tunnel_id, peer_tunnel_id;
@@ -715,13 +772,17 @@ static int pppol2tp_connect(struct socket *sock, struct sockaddr *uservaddr,
/* Using a pre-existing session is fine as long as it hasn't
* been connected yet.
*/
- if (ps->sock) {
+ mutex_lock(&ps->sk_lock);
+ if (rcu_dereference_protected(ps->sk,
+ lockdep_is_held(&ps->sk_lock))) {
+ mutex_unlock(&ps->sk_lock);
error = -EEXIST;
goto end;
}
/* consistency checks */
if (ps->tunnel_sock != tunnel->sock) {
+ mutex_unlock(&ps->sk_lock);
error = -EEXIST;
goto end;
}
@@ -737,35 +798,19 @@ static int pppol2tp_connect(struct socket *sock, struct sockaddr *uservaddr,
error = PTR_ERR(session);
goto end;
}
- }
-
- /* Associate session with its PPPoL2TP socket */
- ps = l2tp_session_priv(session);
- ps->owner = current->pid;
- ps->sock = sk;
- ps->tunnel_sock = tunnel->sock;
- session->recv_skb = pppol2tp_recv;
- session->session_close = pppol2tp_session_close;
-#if defined(CONFIG_L2TP_DEBUGFS) || defined(CONFIG_L2TP_DEBUGFS_MODULE)
- session->show = pppol2tp_show;
-#endif
-
- /* We need to know each time a skb is dropped from the reorder
- * queue.
- */
- session->ref = pppol2tp_session_sock_hold;
- session->deref = pppol2tp_session_sock_put;
-
- /* If PMTU discovery was enabled, use the MTU that was discovered */
- dst = sk_dst_get(tunnel->sock);
- if (dst != NULL) {
- u32 pmtu = dst_mtu(dst);
+ pppol2tp_session_init(session);
+ ps = l2tp_session_priv(session);
+ l2tp_session_inc_refcount(session);
- if (pmtu != 0)
- session->mtu = session->mru = pmtu -
- PPPOL2TP_HEADER_OVERHEAD;
- dst_release(dst);
+ mutex_lock(&ps->sk_lock);
+ error = l2tp_session_register(session, tunnel);
+ if (error < 0) {
+ mutex_unlock(&ps->sk_lock);
+ kfree(session);
+ goto end;
+ }
+ drop_refcnt = true;
}
/* Special case: if source & dest session_id == 0x0000, this
@@ -790,14 +835,25 @@ static int pppol2tp_connect(struct socket *sock, struct sockaddr *uservaddr,
po->chan.mtu = session->mtu;
error = ppp_register_net_channel(sock_net(sk), &po->chan);
- if (error)
+ if (error) {
+ mutex_unlock(&ps->sk_lock);
goto end;
+ }
out_no_ppp:
/* This is how we get the session context from the socket. */
sk->sk_user_data = session;
+ rcu_assign_pointer(ps->sk, sk);
+ mutex_unlock(&ps->sk_lock);
+
+ /* Keep the reference we've grabbed on the session: sk doesn't expect
+ * the session to disappear. pppol2tp_session_destruct() is responsible
+ * for dropping it.
+ */
+ drop_refcnt = false;
+
sk->sk_state = PPPOX_CONNECTED;
- l2tp_info(session, PPPOL2TP_MSG_CONTROL, "%s: created\n",
+ l2tp_info(session, L2TP_MSG_CONTROL, "%s: created\n",
session->name);
end:
@@ -810,25 +866,19 @@ end:
#ifdef CONFIG_L2TP_V3
-/* Called when creating sessions via the netlink interface.
- */
-static int pppol2tp_session_create(struct net *net, u32 tunnel_id, u32 session_id, u32 peer_session_id, struct l2tp_session_cfg *cfg)
+/* Called when creating sessions via the netlink interface. */
+static int pppol2tp_session_create(struct net *net, struct l2tp_tunnel *tunnel,
+ u32 session_id, u32 peer_session_id,
+ struct l2tp_session_cfg *cfg)
{
int error;
- struct l2tp_tunnel *tunnel;
struct l2tp_session *session;
- struct pppol2tp_session *ps;
-
- tunnel = l2tp_tunnel_find(net, tunnel_id);
-
- /* Error if we can't find the tunnel */
- error = -ENOENT;
- if (tunnel == NULL)
- goto out;
/* Error if tunnel socket is not prepped */
- if (tunnel->sock == NULL)
- goto out;
+ if (!tunnel->sock) {
+ error = -ENOENT;
+ goto err;
+ }
/* Default MTU values. */
if (cfg->mtu == 0)
@@ -842,18 +892,20 @@ static int pppol2tp_session_create(struct net *net, u32 tunnel_id, u32 session_i
peer_session_id, cfg);
if (IS_ERR(session)) {
error = PTR_ERR(session);
- goto out;
+ goto err;
}
- ps = l2tp_session_priv(session);
- ps->tunnel_sock = tunnel->sock;
+ pppol2tp_session_init(session);
- l2tp_info(session, PPPOL2TP_MSG_CONTROL, "%s: created\n",
- session->name);
+ error = l2tp_session_register(session, tunnel);
+ if (error < 0)
+ goto err_sess;
- error = 0;
+ return 0;
-out:
+err_sess:
+ kfree(session);
+err:
return error;
}
@@ -1010,16 +1062,14 @@ static int pppol2tp_session_ioctl(struct l2tp_session *session,
struct l2tp_tunnel *tunnel = session->tunnel;
struct pppol2tp_ioc_stats stats;
- l2tp_dbg(session, PPPOL2TP_MSG_CONTROL,
+ l2tp_dbg(session, L2TP_MSG_CONTROL,
"%s: pppol2tp_session_ioctl(cmd=%#x, arg=%#lx)\n",
session->name, cmd, arg);
- sk = ps->sock;
+ sk = pppol2tp_session_get_sock(session);
if (!sk)
return -EBADR;
- sock_hold(sk);
-
switch (cmd) {
case SIOCGIFMTU:
err = -ENXIO;
@@ -1033,7 +1083,7 @@ static int pppol2tp_session_ioctl(struct l2tp_session *session,
if (copy_to_user((void __user *) arg, &ifr, sizeof(struct ifreq)))
break;
- l2tp_info(session, PPPOL2TP_MSG_CONTROL, "%s: get mtu=%d\n",
+ l2tp_info(session, L2TP_MSG_CONTROL, "%s: get mtu=%d\n",
session->name, session->mtu);
err = 0;
break;
@@ -1049,7 +1099,7 @@ static int pppol2tp_session_ioctl(struct l2tp_session *session,
session->mtu = ifr.ifr_mtu;
- l2tp_info(session, PPPOL2TP_MSG_CONTROL, "%s: set mtu=%d\n",
+ l2tp_info(session, L2TP_MSG_CONTROL, "%s: set mtu=%d\n",
session->name, session->mtu);
err = 0;
break;
@@ -1063,7 +1113,7 @@ static int pppol2tp_session_ioctl(struct l2tp_session *session,
if (put_user(session->mru, (int __user *) arg))
break;
- l2tp_info(session, PPPOL2TP_MSG_CONTROL, "%s: get mru=%d\n",
+ l2tp_info(session, L2TP_MSG_CONTROL, "%s: get mru=%d\n",
session->name, session->mru);
err = 0;
break;
@@ -1078,7 +1128,7 @@ static int pppol2tp_session_ioctl(struct l2tp_session *session,
break;
session->mru = val;
- l2tp_info(session, PPPOL2TP_MSG_CONTROL, "%s: set mru=%d\n",
+ l2tp_info(session, L2TP_MSG_CONTROL, "%s: set mru=%d\n",
session->name, session->mru);
err = 0;
break;
@@ -1088,7 +1138,7 @@ static int pppol2tp_session_ioctl(struct l2tp_session *session,
if (put_user(ps->flags, (int __user *) arg))
break;
- l2tp_info(session, PPPOL2TP_MSG_CONTROL, "%s: get flags=%d\n",
+ l2tp_info(session, L2TP_MSG_CONTROL, "%s: get flags=%d\n",
session->name, ps->flags);
err = 0;
break;
@@ -1098,7 +1148,7 @@ static int pppol2tp_session_ioctl(struct l2tp_session *session,
if (get_user(val, (int __user *) arg))
break;
ps->flags = val;
- l2tp_info(session, PPPOL2TP_MSG_CONTROL, "%s: set flags=%d\n",
+ l2tp_info(session, L2TP_MSG_CONTROL, "%s: set flags=%d\n",
session->name, ps->flags);
err = 0;
break;
@@ -1115,7 +1165,7 @@ static int pppol2tp_session_ioctl(struct l2tp_session *session,
if (copy_to_user((void __user *) arg, &stats,
sizeof(stats)))
break;
- l2tp_info(session, PPPOL2TP_MSG_CONTROL, "%s: get L2TP stats\n",
+ l2tp_info(session, L2TP_MSG_CONTROL, "%s: get L2TP stats\n",
session->name);
err = 0;
break;
@@ -1143,7 +1193,7 @@ static int pppol2tp_tunnel_ioctl(struct l2tp_tunnel *tunnel,
struct sock *sk;
struct pppol2tp_ioc_stats stats;
- l2tp_dbg(tunnel, PPPOL2TP_MSG_CONTROL,
+ l2tp_dbg(tunnel, L2TP_MSG_CONTROL,
"%s: pppol2tp_tunnel_ioctl(cmd=%#x, arg=%#lx)\n",
tunnel->name, cmd, arg);
@@ -1186,7 +1236,7 @@ static int pppol2tp_tunnel_ioctl(struct l2tp_tunnel *tunnel,
err = -EFAULT;
break;
}
- l2tp_info(tunnel, PPPOL2TP_MSG_CONTROL, "%s: get L2TP stats\n",
+ l2tp_info(tunnel, L2TP_MSG_CONTROL, "%s: get L2TP stats\n",
tunnel->name);
err = 0;
break;
@@ -1276,7 +1326,7 @@ static int pppol2tp_tunnel_setsockopt(struct sock *sk,
switch (optname) {
case PPPOL2TP_SO_DEBUG:
tunnel->debug = val;
- l2tp_info(tunnel, PPPOL2TP_MSG_CONTROL, "%s: set debug=%x\n",
+ l2tp_info(tunnel, L2TP_MSG_CONTROL, "%s: set debug=%x\n",
tunnel->name, tunnel->debug);
break;
@@ -1295,7 +1345,6 @@ static int pppol2tp_session_setsockopt(struct sock *sk,
int optname, int val)
{
int err = 0;
- struct pppol2tp_session *ps = l2tp_session_priv(session);
switch (optname) {
case PPPOL2TP_SO_RECVSEQ:
@@ -1304,7 +1353,7 @@ static int pppol2tp_session_setsockopt(struct sock *sk,
break;
}
session->recv_seq = val ? -1 : 0;
- l2tp_info(session, PPPOL2TP_MSG_CONTROL,
+ l2tp_info(session, L2TP_MSG_CONTROL,
"%s: set recv_seq=%d\n",
session->name, session->recv_seq);
break;
@@ -1316,13 +1365,13 @@ static int pppol2tp_session_setsockopt(struct sock *sk,
}
session->send_seq = val ? -1 : 0;
{
- struct sock *ssk = ps->sock;
- struct pppox_sock *po = pppox_sk(ssk);
+ struct pppox_sock *po = pppox_sk(sk);
+
po->chan.hdrlen = val ? PPPOL2TP_L2TP_HDR_SIZE_SEQ :
PPPOL2TP_L2TP_HDR_SIZE_NOSEQ;
}
l2tp_session_set_header_len(session, session->tunnel->version);
- l2tp_info(session, PPPOL2TP_MSG_CONTROL,
+ l2tp_info(session, L2TP_MSG_CONTROL,
"%s: set send_seq=%d\n",
session->name, session->send_seq);
break;
@@ -1333,20 +1382,20 @@ static int pppol2tp_session_setsockopt(struct sock *sk,
break;
}
session->lns_mode = val ? -1 : 0;
- l2tp_info(session, PPPOL2TP_MSG_CONTROL,
+ l2tp_info(session, L2TP_MSG_CONTROL,
"%s: set lns_mode=%d\n",
session->name, session->lns_mode);
break;
case PPPOL2TP_SO_DEBUG:
session->debug = val;
- l2tp_info(session, PPPOL2TP_MSG_CONTROL, "%s: set debug=%x\n",
+ l2tp_info(session, L2TP_MSG_CONTROL, "%s: set debug=%x\n",
session->name, session->debug);
break;
case PPPOL2TP_SO_REORDERTO:
session->reorder_timeout = msecs_to_jiffies(val);
- l2tp_info(session, PPPOL2TP_MSG_CONTROL,
+ l2tp_info(session, L2TP_MSG_CONTROL,
"%s: set reorder_timeout=%d\n",
session->name, session->reorder_timeout);
break;
@@ -1427,7 +1476,7 @@ static int pppol2tp_tunnel_getsockopt(struct sock *sk,
switch (optname) {
case PPPOL2TP_SO_DEBUG:
*val = tunnel->debug;
- l2tp_info(tunnel, PPPOL2TP_MSG_CONTROL, "%s: get debug=%x\n",
+ l2tp_info(tunnel, L2TP_MSG_CONTROL, "%s: get debug=%x\n",
tunnel->name, tunnel->debug);
break;
@@ -1450,31 +1499,31 @@ static int pppol2tp_session_getsockopt(struct sock *sk,
switch (optname) {
case PPPOL2TP_SO_RECVSEQ:
*val = session->recv_seq;
- l2tp_info(session, PPPOL2TP_MSG_CONTROL,
+ l2tp_info(session, L2TP_MSG_CONTROL,
"%s: get recv_seq=%d\n", session->name, *val);
break;
case PPPOL2TP_SO_SENDSEQ:
*val = session->send_seq;
- l2tp_info(session, PPPOL2TP_MSG_CONTROL,
+ l2tp_info(session, L2TP_MSG_CONTROL,
"%s: get send_seq=%d\n", session->name, *val);
break;
case PPPOL2TP_SO_LNSMODE:
*val = session->lns_mode;
- l2tp_info(session, PPPOL2TP_MSG_CONTROL,
+ l2tp_info(session, L2TP_MSG_CONTROL,
"%s: get lns_mode=%d\n", session->name, *val);
break;
case PPPOL2TP_SO_DEBUG:
*val = session->debug;
- l2tp_info(session, PPPOL2TP_MSG_CONTROL, "%s: get debug=%d\n",
+ l2tp_info(session, L2TP_MSG_CONTROL, "%s: get debug=%d\n",
session->name, *val);
break;
case PPPOL2TP_SO_REORDERTO:
*val = (int) jiffies_to_msecs(session->reorder_timeout);
- l2tp_info(session, PPPOL2TP_MSG_CONTROL,
+ l2tp_info(session, L2TP_MSG_CONTROL,
"%s: get reorder_timeout=%d\n", session->name, *val);
break;
@@ -1653,8 +1702,9 @@ static void pppol2tp_seq_session_show(struct seq_file *m, void *v)
{
struct l2tp_session *session = v;
struct l2tp_tunnel *tunnel = session->tunnel;
- struct pppol2tp_session *ps = l2tp_session_priv(session);
- struct pppox_sock *po = pppox_sk(ps->sock);
+ unsigned char state;
+ char user_data_ok;
+ struct sock *sk;
u32 ip = 0;
u16 port = 0;
@@ -1664,6 +1714,15 @@ static void pppol2tp_seq_session_show(struct seq_file *m, void *v)
port = ntohs(inet->inet_sport);
}
+ sk = pppol2tp_session_get_sock(session);
+ if (sk) {
+ state = sk->sk_state;
+ user_data_ok = (session == sk->sk_user_data) ? 'Y' : 'N';
+ } else {
+ state = 0;
+ user_data_ok = 'N';
+ }
+
seq_printf(m, " SESSION '%s' %08X/%d %04X/%04X -> "
"%04X/%04X %d %c\n",
session->name, ip, port,
@@ -1671,9 +1730,7 @@ static void pppol2tp_seq_session_show(struct seq_file *m, void *v)
session->session_id,
tunnel->peer_tunnel_id,
session->peer_session_id,
- ps->sock->sk_state,
- (session == ps->sock->sk_user_data) ?
- 'Y' : 'N');
+ state, user_data_ok);
seq_printf(m, " %d/%d/%c/%c/%s %08x %u\n",
session->mtu, session->mru,
session->recv_seq ? 'R' : '-',
@@ -1690,8 +1747,12 @@ static void pppol2tp_seq_session_show(struct seq_file *m, void *v)
atomic_long_read(&session->stats.rx_bytes),
atomic_long_read(&session->stats.rx_errors));
- if (po)
+ if (sk) {
+ struct pppox_sock *po = pppox_sk(sk);
+
seq_printf(m, " interface %s\n", ppp_dev_name(&po->chan));
+ sock_put(sk);
+ }
}
static int pppol2tp_seq_show(struct seq_file *m, void *v)
diff --git a/net/socket.c b/net/socket.c
index 15bdba4211ad..88086d18c208 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -3304,3 +3304,49 @@ int kernel_sock_shutdown(struct socket *sock, enum sock_shutdown_cmd how)
return sock->ops->shutdown(sock, how);
}
EXPORT_SYMBOL(kernel_sock_shutdown);
+
+/* This routine returns the IP overhead imposed by a socket i.e.
+ * the length of the underlying IP header, depending on whether
+ * this is an IPv4 or IPv6 socket and the length from IP options turned
+ * on at the socket. Assumes that the caller has a lock on the socket.
+ */
+u32 kernel_sock_ip_overhead(struct sock *sk)
+{
+ struct inet_sock *inet;
+ struct ip_options_rcu *opt;
+ u32 overhead = 0;
+ bool owned_by_user;
+#if IS_ENABLED(CONFIG_IPV6)
+ struct ipv6_pinfo *np;
+ struct ipv6_txoptions *optv6 = NULL;
+#endif /* IS_ENABLED(CONFIG_IPV6) */
+
+ if (!sk)
+ return overhead;
+
+ owned_by_user = sock_owned_by_user(sk);
+ switch (sk->sk_family) {
+ case AF_INET:
+ inet = inet_sk(sk);
+ overhead += sizeof(struct iphdr);
+ opt = rcu_dereference_protected(inet->inet_opt,
+ owned_by_user);
+ if (opt)
+ overhead += opt->opt.optlen;
+ return overhead;
+#if IS_ENABLED(CONFIG_IPV6)
+ case AF_INET6:
+ np = inet6_sk(sk);
+ overhead += sizeof(struct ipv6hdr);
+ if (np)
+ optv6 = rcu_dereference_protected(np->opt,
+ owned_by_user);
+ if (optv6)
+ overhead += (optv6->opt_flen + optv6->opt_nflen);
+ return overhead;
+#endif /* IS_ENABLED(CONFIG_IPV6) */
+ default: /* Returns 0 overhead if the socket is not ipv4 or ipv6 */
+ return overhead;
+ }
+}
+EXPORT_SYMBOL(kernel_sock_ip_overhead);
diff --git a/security/integrity/evm/evm_crypto.c b/security/integrity/evm/evm_crypto.c
index 461f8d891579..44352b0b7510 100644
--- a/security/integrity/evm/evm_crypto.c
+++ b/security/integrity/evm/evm_crypto.c
@@ -47,7 +47,7 @@ static struct shash_desc *init_desc(char type)
algo = evm_hash;
}
- if (*tfm == NULL) {
+ if (IS_ERR_OR_NULL(*tfm)) {
mutex_lock(&mutex);
if (*tfm)
goto out;
diff --git a/sound/core/pcm_lib.c b/sound/core/pcm_lib.c
index 950730709d28..ab8846e7e8ff 100644
--- a/sound/core/pcm_lib.c
+++ b/sound/core/pcm_lib.c
@@ -456,6 +456,7 @@ static int snd_pcm_update_hw_ptr0(struct snd_pcm_substream *substream,
no_delta_check:
if (runtime->status->hw_ptr == new_hw_ptr) {
+ runtime->hw_ptr_jiffies = curr_jiffies;
update_audio_tstamp(substream, &curr_tstamp, &audio_tstamp);
return 0;
}
Powered by blists - more mailing lists