Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:4.4 commit in: /
Date: Wed, 27 May 2020 15:27:01
Message-Id: 1590593205.5224b72d03d54d71f867a8280b21db6616a390cf.mpagano@gentoo
1 commit: 5224b72d03d54d71f867a8280b21db6616a390cf
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Wed May 27 15:26:45 2020 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Wed May 27 15:26:45 2020 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=5224b72d
7
8 Linux patch 4.4.225
9
10 Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>
11
12 0000_README | 4 +
13 1224_linux-4.4.225.patch | 3799 ++++++++++++++++++++++++++++++++++++++++++++++
14 2 files changed, 3803 insertions(+)
15
16 diff --git a/0000_README b/0000_README
17 index fb38e65..6e7060b 100644
18 --- a/0000_README
19 +++ b/0000_README
20 @@ -939,6 +939,10 @@ Patch: 1223_linux-4.4.224.patch
21 From: http://www.kernel.org
22 Desc: Linux 4.4.224
23
24 +Patch: 1224_linux-4.4.225.patch
25 +From: http://www.kernel.org
26 +Desc: Linux 4.4.225
27 +
28 Patch: 1500_XATTR_USER_PREFIX.patch
29 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
30 Desc: Support for namespace user.pax.* on tmpfs.
31
32 diff --git a/1224_linux-4.4.225.patch b/1224_linux-4.4.225.patch
33 new file mode 100644
34 index 0000000..f1d7a75
35 --- /dev/null
36 +++ b/1224_linux-4.4.225.patch
37 @@ -0,0 +1,3799 @@
38 +diff --git a/Documentation/networking/l2tp.txt b/Documentation/networking/l2tp.txt
39 +index 4650a00ed012..9bc271cdc9a8 100644
40 +--- a/Documentation/networking/l2tp.txt
41 ++++ b/Documentation/networking/l2tp.txt
42 +@@ -177,10 +177,10 @@ setsockopt on the PPPoX socket to set a debug mask.
43 +
44 + The following debug mask bits are available:
45 +
46 +-PPPOL2TP_MSG_DEBUG verbose debug (if compiled in)
47 +-PPPOL2TP_MSG_CONTROL userspace - kernel interface
48 +-PPPOL2TP_MSG_SEQ sequence numbers handling
49 +-PPPOL2TP_MSG_DATA data packets
50 ++L2TP_MSG_DEBUG verbose debug (if compiled in)
51 ++L2TP_MSG_CONTROL userspace - kernel interface
52 ++L2TP_MSG_SEQ sequence numbers handling
53 ++L2TP_MSG_DATA data packets
54 +
55 + If enabled, files under a l2tp debugfs directory can be used to dump
56 + kernel state about L2TP tunnels and sessions. To access it, the
57 +diff --git a/Makefile b/Makefile
58 +index f381af71fa32..4e5362707405 100644
59 +--- a/Makefile
60 ++++ b/Makefile
61 +@@ -1,6 +1,6 @@
62 + VERSION = 4
63 + PATCHLEVEL = 4
64 +-SUBLEVEL = 224
65 ++SUBLEVEL = 225
66 + EXTRAVERSION =
67 + NAME = Blurry Fish Butt
68 +
69 +diff --git a/arch/arm/include/asm/futex.h b/arch/arm/include/asm/futex.h
70 +index cc414382dab4..561b2ba6bc28 100644
71 +--- a/arch/arm/include/asm/futex.h
72 ++++ b/arch/arm/include/asm/futex.h
73 +@@ -162,8 +162,13 @@ arch_futex_atomic_op_inuser(int op, int oparg, int *oval, u32 __user *uaddr)
74 + preempt_enable();
75 + #endif
76 +
77 +- if (!ret)
78 +- *oval = oldval;
79 ++ /*
80 ++ * Store unconditionally. If ret != 0 the extra store is the least
81 ++ * of the worries but GCC cannot figure out that __futex_atomic_op()
82 ++ * is either setting ret to -EFAULT or storing the old value in
83 ++ * oldval which results in a uninitialized warning at the call site.
84 ++ */
85 ++ *oval = oldval;
86 +
87 + return ret;
88 + }
89 +diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
90 +index e1807296a1a0..33d2b5948d7f 100644
91 +--- a/drivers/hid/hid-ids.h
92 ++++ b/drivers/hid/hid-ids.h
93 +@@ -319,6 +319,7 @@
94 + #define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_7349 0x7349
95 + #define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_73F7 0x73f7
96 + #define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_A001 0xa001
97 ++#define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_C002 0xc002
98 +
99 + #define USB_VENDOR_ID_ELAN 0x04f3
100 +
101 +diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c
102 +index 9de379c1b3fd..56c4a81d3ea2 100644
103 +--- a/drivers/hid/hid-multitouch.c
104 ++++ b/drivers/hid/hid-multitouch.c
105 +@@ -1300,6 +1300,9 @@ static const struct hid_device_id mt_devices[] = {
106 + { .driver_data = MT_CLS_EGALAX_SERIAL,
107 + MT_USB_DEVICE(USB_VENDOR_ID_DWAV,
108 + USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_A001) },
109 ++ { .driver_data = MT_CLS_EGALAX,
110 ++ MT_USB_DEVICE(USB_VENDOR_ID_DWAV,
111 ++ USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_C002) },
112 +
113 + /* Elitegroup panel */
114 + { .driver_data = MT_CLS_SERIAL,
115 +diff --git a/drivers/i2c/i2c-dev.c b/drivers/i2c/i2c-dev.c
116 +index e56b774e7cf9..7584f292e2fd 100644
117 +--- a/drivers/i2c/i2c-dev.c
118 ++++ b/drivers/i2c/i2c-dev.c
119 +@@ -22,6 +22,7 @@
120 +
121 + /* The I2C_RDWR ioctl code is written by Kolja Waschk <waschk@×××××.de> */
122 +
123 ++#include <linux/cdev.h>
124 + #include <linux/kernel.h>
125 + #include <linux/module.h>
126 + #include <linux/device.h>
127 +@@ -46,10 +47,11 @@
128 + struct i2c_dev {
129 + struct list_head list;
130 + struct i2c_adapter *adap;
131 +- struct device *dev;
132 ++ struct device dev;
133 ++ struct cdev cdev;
134 + };
135 +
136 +-#define I2C_MINORS 256
137 ++#define I2C_MINORS MINORMASK
138 + static LIST_HEAD(i2c_dev_list);
139 + static DEFINE_SPINLOCK(i2c_dev_list_lock);
140 +
141 +@@ -89,12 +91,14 @@ static struct i2c_dev *get_free_i2c_dev(struct i2c_adapter *adap)
142 + return i2c_dev;
143 + }
144 +
145 +-static void return_i2c_dev(struct i2c_dev *i2c_dev)
146 ++static void put_i2c_dev(struct i2c_dev *i2c_dev, bool del_cdev)
147 + {
148 + spin_lock(&i2c_dev_list_lock);
149 + list_del(&i2c_dev->list);
150 + spin_unlock(&i2c_dev_list_lock);
151 +- kfree(i2c_dev);
152 ++ if (del_cdev)
153 ++ cdev_device_del(&i2c_dev->cdev, &i2c_dev->dev);
154 ++ put_device(&i2c_dev->dev);
155 + }
156 +
157 + static ssize_t name_show(struct device *dev,
158 +@@ -490,13 +494,8 @@ static int i2cdev_open(struct inode *inode, struct file *file)
159 + unsigned int minor = iminor(inode);
160 + struct i2c_client *client;
161 + struct i2c_adapter *adap;
162 +- struct i2c_dev *i2c_dev;
163 +-
164 +- i2c_dev = i2c_dev_get_by_minor(minor);
165 +- if (!i2c_dev)
166 +- return -ENODEV;
167 +
168 +- adap = i2c_get_adapter(i2c_dev->adap->nr);
169 ++ adap = i2c_get_adapter(minor);
170 + if (!adap)
171 + return -ENODEV;
172 +
173 +@@ -545,6 +544,14 @@ static const struct file_operations i2cdev_fops = {
174 +
175 + static struct class *i2c_dev_class;
176 +
177 ++static void i2cdev_dev_release(struct device *dev)
178 ++{
179 ++ struct i2c_dev *i2c_dev;
180 ++
181 ++ i2c_dev = container_of(dev, struct i2c_dev, dev);
182 ++ kfree(i2c_dev);
183 ++}
184 ++
185 + static int i2cdev_attach_adapter(struct device *dev, void *dummy)
186 + {
187 + struct i2c_adapter *adap;
188 +@@ -559,21 +566,25 @@ static int i2cdev_attach_adapter(struct device *dev, void *dummy)
189 + if (IS_ERR(i2c_dev))
190 + return PTR_ERR(i2c_dev);
191 +
192 +- /* register this i2c device with the driver core */
193 +- i2c_dev->dev = device_create(i2c_dev_class, &adap->dev,
194 +- MKDEV(I2C_MAJOR, adap->nr), NULL,
195 +- "i2c-%d", adap->nr);
196 +- if (IS_ERR(i2c_dev->dev)) {
197 +- res = PTR_ERR(i2c_dev->dev);
198 +- goto error;
199 ++ cdev_init(&i2c_dev->cdev, &i2cdev_fops);
200 ++ i2c_dev->cdev.owner = THIS_MODULE;
201 ++
202 ++ device_initialize(&i2c_dev->dev);
203 ++ i2c_dev->dev.devt = MKDEV(I2C_MAJOR, adap->nr);
204 ++ i2c_dev->dev.class = i2c_dev_class;
205 ++ i2c_dev->dev.parent = &adap->dev;
206 ++ i2c_dev->dev.release = i2cdev_dev_release;
207 ++ dev_set_name(&i2c_dev->dev, "i2c-%d", adap->nr);
208 ++
209 ++ res = cdev_device_add(&i2c_dev->cdev, &i2c_dev->dev);
210 ++ if (res) {
211 ++ put_i2c_dev(i2c_dev, false);
212 ++ return res;
213 + }
214 +
215 + pr_debug("i2c-dev: adapter [%s] registered as minor %d\n",
216 + adap->name, adap->nr);
217 + return 0;
218 +-error:
219 +- return_i2c_dev(i2c_dev);
220 +- return res;
221 + }
222 +
223 + static int i2cdev_detach_adapter(struct device *dev, void *dummy)
224 +@@ -589,8 +600,7 @@ static int i2cdev_detach_adapter(struct device *dev, void *dummy)
225 + if (!i2c_dev) /* attach_adapter must have failed */
226 + return 0;
227 +
228 +- return_i2c_dev(i2c_dev);
229 +- device_destroy(i2c_dev_class, MKDEV(I2C_MAJOR, adap->nr));
230 ++ put_i2c_dev(i2c_dev, true);
231 +
232 + pr_debug("i2c-dev: adapter [%s] unregistered\n", adap->name);
233 + return 0;
234 +@@ -627,7 +637,7 @@ static int __init i2c_dev_init(void)
235 +
236 + printk(KERN_INFO "i2c /dev entries driver\n");
237 +
238 +- res = register_chrdev(I2C_MAJOR, "i2c", &i2cdev_fops);
239 ++ res = register_chrdev_region(MKDEV(I2C_MAJOR, 0), I2C_MINORS, "i2c");
240 + if (res)
241 + goto out;
242 +
243 +@@ -651,7 +661,7 @@ static int __init i2c_dev_init(void)
244 + out_unreg_class:
245 + class_destroy(i2c_dev_class);
246 + out_unreg_chrdev:
247 +- unregister_chrdev(I2C_MAJOR, "i2c");
248 ++ unregister_chrdev_region(MKDEV(I2C_MAJOR, 0), I2C_MINORS);
249 + out:
250 + printk(KERN_ERR "%s: Driver Initialisation failed\n", __FILE__);
251 + return res;
252 +@@ -662,7 +672,7 @@ static void __exit i2c_dev_exit(void)
253 + bus_unregister_notifier(&i2c_bus_type, &i2cdev_notifier);
254 + i2c_for_each_dev(NULL, i2cdev_detach_adapter);
255 + class_destroy(i2c_dev_class);
256 +- unregister_chrdev(I2C_MAJOR, "i2c");
257 ++ unregister_chrdev_region(MKDEV(I2C_MAJOR, 0), I2C_MINORS);
258 + }
259 +
260 + MODULE_AUTHOR("Frodo Looijaard <frodol@×××.nl> and "
261 +diff --git a/drivers/media/media-device.c b/drivers/media/media-device.c
262 +index 7b39440192d6..0ca9506f4654 100644
263 +--- a/drivers/media/media-device.c
264 ++++ b/drivers/media/media-device.c
265 +@@ -24,6 +24,7 @@
266 + #include <linux/export.h>
267 + #include <linux/ioctl.h>
268 + #include <linux/media.h>
269 ++#include <linux/slab.h>
270 + #include <linux/types.h>
271 +
272 + #include <media/media-device.h>
273 +@@ -234,7 +235,7 @@ static long media_device_ioctl(struct file *filp, unsigned int cmd,
274 + unsigned long arg)
275 + {
276 + struct media_devnode *devnode = media_devnode_data(filp);
277 +- struct media_device *dev = to_media_device(devnode);
278 ++ struct media_device *dev = devnode->media_dev;
279 + long ret;
280 +
281 + switch (cmd) {
282 +@@ -303,7 +304,7 @@ static long media_device_compat_ioctl(struct file *filp, unsigned int cmd,
283 + unsigned long arg)
284 + {
285 + struct media_devnode *devnode = media_devnode_data(filp);
286 +- struct media_device *dev = to_media_device(devnode);
287 ++ struct media_device *dev = devnode->media_dev;
288 + long ret;
289 +
290 + switch (cmd) {
291 +@@ -344,7 +345,8 @@ static const struct media_file_operations media_device_fops = {
292 + static ssize_t show_model(struct device *cd,
293 + struct device_attribute *attr, char *buf)
294 + {
295 +- struct media_device *mdev = to_media_device(to_media_devnode(cd));
296 ++ struct media_devnode *devnode = to_media_devnode(cd);
297 ++ struct media_device *mdev = devnode->media_dev;
298 +
299 + return sprintf(buf, "%.*s\n", (int)sizeof(mdev->model), mdev->model);
300 + }
301 +@@ -372,6 +374,7 @@ static void media_device_release(struct media_devnode *mdev)
302 + int __must_check __media_device_register(struct media_device *mdev,
303 + struct module *owner)
304 + {
305 ++ struct media_devnode *devnode;
306 + int ret;
307 +
308 + if (WARN_ON(mdev->dev == NULL || mdev->model[0] == 0))
309 +@@ -382,17 +385,28 @@ int __must_check __media_device_register(struct media_device *mdev,
310 + spin_lock_init(&mdev->lock);
311 + mutex_init(&mdev->graph_mutex);
312 +
313 ++ devnode = kzalloc(sizeof(*devnode), GFP_KERNEL);
314 ++ if (!devnode)
315 ++ return -ENOMEM;
316 ++
317 + /* Register the device node. */
318 +- mdev->devnode.fops = &media_device_fops;
319 +- mdev->devnode.parent = mdev->dev;
320 +- mdev->devnode.release = media_device_release;
321 +- ret = media_devnode_register(&mdev->devnode, owner);
322 +- if (ret < 0)
323 ++ mdev->devnode = devnode;
324 ++ devnode->fops = &media_device_fops;
325 ++ devnode->parent = mdev->dev;
326 ++ devnode->release = media_device_release;
327 ++ ret = media_devnode_register(mdev, devnode, owner);
328 ++ if (ret < 0) {
329 ++ /* devnode free is handled in media_devnode_*() */
330 ++ mdev->devnode = NULL;
331 + return ret;
332 ++ }
333 +
334 +- ret = device_create_file(&mdev->devnode.dev, &dev_attr_model);
335 ++ ret = device_create_file(&devnode->dev, &dev_attr_model);
336 + if (ret < 0) {
337 +- media_devnode_unregister(&mdev->devnode);
338 ++ /* devnode free is handled in media_devnode_*() */
339 ++ mdev->devnode = NULL;
340 ++ media_devnode_unregister_prepare(devnode);
341 ++ media_devnode_unregister(devnode);
342 + return ret;
343 + }
344 +
345 +@@ -410,11 +424,16 @@ void media_device_unregister(struct media_device *mdev)
346 + struct media_entity *entity;
347 + struct media_entity *next;
348 +
349 ++ /* Clear the devnode register bit to avoid races with media dev open */
350 ++ media_devnode_unregister_prepare(mdev->devnode);
351 ++
352 + list_for_each_entry_safe(entity, next, &mdev->entities, list)
353 + media_device_unregister_entity(entity);
354 +
355 +- device_remove_file(&mdev->devnode.dev, &dev_attr_model);
356 +- media_devnode_unregister(&mdev->devnode);
357 ++ device_remove_file(&mdev->devnode->dev, &dev_attr_model);
358 ++ media_devnode_unregister(mdev->devnode);
359 ++ /* devnode free is handled in media_devnode_*() */
360 ++ mdev->devnode = NULL;
361 + }
362 + EXPORT_SYMBOL_GPL(media_device_unregister);
363 +
364 +diff --git a/drivers/media/media-devnode.c b/drivers/media/media-devnode.c
365 +index ebf9626e5ae5..e887120d19aa 100644
366 +--- a/drivers/media/media-devnode.c
367 ++++ b/drivers/media/media-devnode.c
368 +@@ -44,6 +44,7 @@
369 + #include <linux/uaccess.h>
370 +
371 + #include <media/media-devnode.h>
372 ++#include <media/media-device.h>
373 +
374 + #define MEDIA_NUM_DEVICES 256
375 + #define MEDIA_NAME "media"
376 +@@ -59,21 +60,19 @@ static DECLARE_BITMAP(media_devnode_nums, MEDIA_NUM_DEVICES);
377 + /* Called when the last user of the media device exits. */
378 + static void media_devnode_release(struct device *cd)
379 + {
380 +- struct media_devnode *mdev = to_media_devnode(cd);
381 ++ struct media_devnode *devnode = to_media_devnode(cd);
382 +
383 + mutex_lock(&media_devnode_lock);
384 +-
385 +- /* Delete the cdev on this minor as well */
386 +- cdev_del(&mdev->cdev);
387 +-
388 + /* Mark device node number as free */
389 +- clear_bit(mdev->minor, media_devnode_nums);
390 +-
391 ++ clear_bit(devnode->minor, media_devnode_nums);
392 + mutex_unlock(&media_devnode_lock);
393 +
394 + /* Release media_devnode and perform other cleanups as needed. */
395 +- if (mdev->release)
396 +- mdev->release(mdev);
397 ++ if (devnode->release)
398 ++ devnode->release(devnode);
399 ++
400 ++ kfree(devnode);
401 ++ pr_debug("%s: Media Devnode Deallocated\n", __func__);
402 + }
403 +
404 + static struct bus_type media_bus_type = {
405 +@@ -83,37 +82,37 @@ static struct bus_type media_bus_type = {
406 + static ssize_t media_read(struct file *filp, char __user *buf,
407 + size_t sz, loff_t *off)
408 + {
409 +- struct media_devnode *mdev = media_devnode_data(filp);
410 ++ struct media_devnode *devnode = media_devnode_data(filp);
411 +
412 +- if (!mdev->fops->read)
413 ++ if (!devnode->fops->read)
414 + return -EINVAL;
415 +- if (!media_devnode_is_registered(mdev))
416 ++ if (!media_devnode_is_registered(devnode))
417 + return -EIO;
418 +- return mdev->fops->read(filp, buf, sz, off);
419 ++ return devnode->fops->read(filp, buf, sz, off);
420 + }
421 +
422 + static ssize_t media_write(struct file *filp, const char __user *buf,
423 + size_t sz, loff_t *off)
424 + {
425 +- struct media_devnode *mdev = media_devnode_data(filp);
426 ++ struct media_devnode *devnode = media_devnode_data(filp);
427 +
428 +- if (!mdev->fops->write)
429 ++ if (!devnode->fops->write)
430 + return -EINVAL;
431 +- if (!media_devnode_is_registered(mdev))
432 ++ if (!media_devnode_is_registered(devnode))
433 + return -EIO;
434 +- return mdev->fops->write(filp, buf, sz, off);
435 ++ return devnode->fops->write(filp, buf, sz, off);
436 + }
437 +
438 + static unsigned int media_poll(struct file *filp,
439 + struct poll_table_struct *poll)
440 + {
441 +- struct media_devnode *mdev = media_devnode_data(filp);
442 ++ struct media_devnode *devnode = media_devnode_data(filp);
443 +
444 +- if (!media_devnode_is_registered(mdev))
445 ++ if (!media_devnode_is_registered(devnode))
446 + return POLLERR | POLLHUP;
447 +- if (!mdev->fops->poll)
448 ++ if (!devnode->fops->poll)
449 + return DEFAULT_POLLMASK;
450 +- return mdev->fops->poll(filp, poll);
451 ++ return devnode->fops->poll(filp, poll);
452 + }
453 +
454 + static long
455 +@@ -121,12 +120,12 @@ __media_ioctl(struct file *filp, unsigned int cmd, unsigned long arg,
456 + long (*ioctl_func)(struct file *filp, unsigned int cmd,
457 + unsigned long arg))
458 + {
459 +- struct media_devnode *mdev = media_devnode_data(filp);
460 ++ struct media_devnode *devnode = media_devnode_data(filp);
461 +
462 + if (!ioctl_func)
463 + return -ENOTTY;
464 +
465 +- if (!media_devnode_is_registered(mdev))
466 ++ if (!media_devnode_is_registered(devnode))
467 + return -EIO;
468 +
469 + return ioctl_func(filp, cmd, arg);
470 +@@ -134,9 +133,9 @@ __media_ioctl(struct file *filp, unsigned int cmd, unsigned long arg,
471 +
472 + static long media_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
473 + {
474 +- struct media_devnode *mdev = media_devnode_data(filp);
475 ++ struct media_devnode *devnode = media_devnode_data(filp);
476 +
477 +- return __media_ioctl(filp, cmd, arg, mdev->fops->ioctl);
478 ++ return __media_ioctl(filp, cmd, arg, devnode->fops->ioctl);
479 + }
480 +
481 + #ifdef CONFIG_COMPAT
482 +@@ -144,9 +143,9 @@ static long media_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
483 + static long media_compat_ioctl(struct file *filp, unsigned int cmd,
484 + unsigned long arg)
485 + {
486 +- struct media_devnode *mdev = media_devnode_data(filp);
487 ++ struct media_devnode *devnode = media_devnode_data(filp);
488 +
489 +- return __media_ioctl(filp, cmd, arg, mdev->fops->compat_ioctl);
490 ++ return __media_ioctl(filp, cmd, arg, devnode->fops->compat_ioctl);
491 + }
492 +
493 + #endif /* CONFIG_COMPAT */
494 +@@ -154,7 +153,7 @@ static long media_compat_ioctl(struct file *filp, unsigned int cmd,
495 + /* Override for the open function */
496 + static int media_open(struct inode *inode, struct file *filp)
497 + {
498 +- struct media_devnode *mdev;
499 ++ struct media_devnode *devnode;
500 + int ret;
501 +
502 + /* Check if the media device is available. This needs to be done with
503 +@@ -164,23 +163,24 @@ static int media_open(struct inode *inode, struct file *filp)
504 + * a crash.
505 + */
506 + mutex_lock(&media_devnode_lock);
507 +- mdev = container_of(inode->i_cdev, struct media_devnode, cdev);
508 ++ devnode = container_of(inode->i_cdev, struct media_devnode, cdev);
509 + /* return ENXIO if the media device has been removed
510 + already or if it is not registered anymore. */
511 +- if (!media_devnode_is_registered(mdev)) {
512 ++ if (!media_devnode_is_registered(devnode)) {
513 + mutex_unlock(&media_devnode_lock);
514 + return -ENXIO;
515 + }
516 + /* and increase the device refcount */
517 +- get_device(&mdev->dev);
518 ++ get_device(&devnode->dev);
519 + mutex_unlock(&media_devnode_lock);
520 +
521 +- filp->private_data = mdev;
522 ++ filp->private_data = devnode;
523 +
524 +- if (mdev->fops->open) {
525 +- ret = mdev->fops->open(filp);
526 ++ if (devnode->fops->open) {
527 ++ ret = devnode->fops->open(filp);
528 + if (ret) {
529 +- put_device(&mdev->dev);
530 ++ put_device(&devnode->dev);
531 ++ filp->private_data = NULL;
532 + return ret;
533 + }
534 + }
535 +@@ -191,15 +191,18 @@ static int media_open(struct inode *inode, struct file *filp)
536 + /* Override for the release function */
537 + static int media_release(struct inode *inode, struct file *filp)
538 + {
539 +- struct media_devnode *mdev = media_devnode_data(filp);
540 ++ struct media_devnode *devnode = media_devnode_data(filp);
541 ++
542 ++ if (devnode->fops->release)
543 ++ devnode->fops->release(filp);
544 +
545 +- if (mdev->fops->release)
546 +- mdev->fops->release(filp);
547 ++ filp->private_data = NULL;
548 +
549 + /* decrease the refcount unconditionally since the release()
550 + return value is ignored. */
551 +- put_device(&mdev->dev);
552 +- filp->private_data = NULL;
553 ++ put_device(&devnode->dev);
554 ++
555 ++ pr_debug("%s: Media Release\n", __func__);
556 + return 0;
557 + }
558 +
559 +@@ -219,7 +222,8 @@ static const struct file_operations media_devnode_fops = {
560 +
561 + /**
562 + * media_devnode_register - register a media device node
563 +- * @mdev: media device node structure we want to register
564 ++ * @media_dev: struct media_device we want to register a device node
565 ++ * @devnode: media device node structure we want to register
566 + *
567 + * The registration code assigns minor numbers and registers the new device node
568 + * with the kernel. An error is returned if no free minor number can be found,
569 +@@ -231,7 +235,8 @@ static const struct file_operations media_devnode_fops = {
570 + * the media_devnode structure is *not* called, so the caller is responsible for
571 + * freeing any data.
572 + */
573 +-int __must_check media_devnode_register(struct media_devnode *mdev,
574 ++int __must_check media_devnode_register(struct media_device *mdev,
575 ++ struct media_devnode *devnode,
576 + struct module *owner)
577 + {
578 + int minor;
579 +@@ -243,68 +248,89 @@ int __must_check media_devnode_register(struct media_devnode *mdev,
580 + if (minor == MEDIA_NUM_DEVICES) {
581 + mutex_unlock(&media_devnode_lock);
582 + pr_err("could not get a free minor\n");
583 ++ kfree(devnode);
584 + return -ENFILE;
585 + }
586 +
587 + set_bit(minor, media_devnode_nums);
588 + mutex_unlock(&media_devnode_lock);
589 +
590 +- mdev->minor = minor;
591 ++ devnode->minor = minor;
592 ++ devnode->media_dev = mdev;
593 ++
594 ++ /* Part 1: Initialize dev now to use dev.kobj for cdev.kobj.parent */
595 ++ devnode->dev.bus = &media_bus_type;
596 ++ devnode->dev.devt = MKDEV(MAJOR(media_dev_t), devnode->minor);
597 ++ devnode->dev.release = media_devnode_release;
598 ++ if (devnode->parent)
599 ++ devnode->dev.parent = devnode->parent;
600 ++ dev_set_name(&devnode->dev, "media%d", devnode->minor);
601 ++ device_initialize(&devnode->dev);
602 +
603 + /* Part 2: Initialize and register the character device */
604 +- cdev_init(&mdev->cdev, &media_devnode_fops);
605 +- mdev->cdev.owner = owner;
606 ++ cdev_init(&devnode->cdev, &media_devnode_fops);
607 ++ devnode->cdev.owner = owner;
608 ++ devnode->cdev.kobj.parent = &devnode->dev.kobj;
609 +
610 +- ret = cdev_add(&mdev->cdev, MKDEV(MAJOR(media_dev_t), mdev->minor), 1);
611 ++ ret = cdev_add(&devnode->cdev, MKDEV(MAJOR(media_dev_t), devnode->minor), 1);
612 + if (ret < 0) {
613 + pr_err("%s: cdev_add failed\n", __func__);
614 +- goto error;
615 ++ goto cdev_add_error;
616 + }
617 +
618 +- /* Part 3: Register the media device */
619 +- mdev->dev.bus = &media_bus_type;
620 +- mdev->dev.devt = MKDEV(MAJOR(media_dev_t), mdev->minor);
621 +- mdev->dev.release = media_devnode_release;
622 +- if (mdev->parent)
623 +- mdev->dev.parent = mdev->parent;
624 +- dev_set_name(&mdev->dev, "media%d", mdev->minor);
625 +- ret = device_register(&mdev->dev);
626 ++ /* Part 3: Add the media device */
627 ++ ret = device_add(&devnode->dev);
628 + if (ret < 0) {
629 +- pr_err("%s: device_register failed\n", __func__);
630 +- goto error;
631 ++ pr_err("%s: device_add failed\n", __func__);
632 ++ goto device_add_error;
633 + }
634 +
635 + /* Part 4: Activate this minor. The char device can now be used. */
636 +- set_bit(MEDIA_FLAG_REGISTERED, &mdev->flags);
637 ++ set_bit(MEDIA_FLAG_REGISTERED, &devnode->flags);
638 +
639 + return 0;
640 +
641 +-error:
642 +- cdev_del(&mdev->cdev);
643 +- clear_bit(mdev->minor, media_devnode_nums);
644 ++device_add_error:
645 ++ cdev_del(&devnode->cdev);
646 ++cdev_add_error:
647 ++ mutex_lock(&media_devnode_lock);
648 ++ clear_bit(devnode->minor, media_devnode_nums);
649 ++ devnode->media_dev = NULL;
650 ++ mutex_unlock(&media_devnode_lock);
651 ++
652 ++ put_device(&devnode->dev);
653 + return ret;
654 + }
655 +
656 ++void media_devnode_unregister_prepare(struct media_devnode *devnode)
657 ++{
658 ++ /* Check if devnode was ever registered at all */
659 ++ if (!media_devnode_is_registered(devnode))
660 ++ return;
661 ++
662 ++ mutex_lock(&media_devnode_lock);
663 ++ clear_bit(MEDIA_FLAG_REGISTERED, &devnode->flags);
664 ++ mutex_unlock(&media_devnode_lock);
665 ++}
666 ++
667 + /**
668 + * media_devnode_unregister - unregister a media device node
669 +- * @mdev: the device node to unregister
670 ++ * @devnode: the device node to unregister
671 + *
672 + * This unregisters the passed device. Future open calls will be met with
673 + * errors.
674 + *
675 +- * This function can safely be called if the device node has never been
676 +- * registered or has already been unregistered.
677 ++ * Should be called after media_devnode_unregister_prepare()
678 + */
679 +-void media_devnode_unregister(struct media_devnode *mdev)
680 ++void media_devnode_unregister(struct media_devnode *devnode)
681 + {
682 +- /* Check if mdev was ever registered at all */
683 +- if (!media_devnode_is_registered(mdev))
684 +- return;
685 +-
686 + mutex_lock(&media_devnode_lock);
687 +- clear_bit(MEDIA_FLAG_REGISTERED, &mdev->flags);
688 ++ /* Delete the cdev on this minor as well */
689 ++ cdev_del(&devnode->cdev);
690 + mutex_unlock(&media_devnode_lock);
691 +- device_unregister(&mdev->dev);
692 ++ device_del(&devnode->dev);
693 ++ devnode->media_dev = NULL;
694 ++ put_device(&devnode->dev);
695 + }
696 +
697 + /*
698 +diff --git a/drivers/media/usb/uvc/uvc_driver.c b/drivers/media/usb/uvc/uvc_driver.c
699 +index 9cd0268b2767..f353ab569b8e 100644
700 +--- a/drivers/media/usb/uvc/uvc_driver.c
701 ++++ b/drivers/media/usb/uvc/uvc_driver.c
702 +@@ -1800,7 +1800,7 @@ static void uvc_delete(struct uvc_device *dev)
703 + if (dev->vdev.dev)
704 + v4l2_device_unregister(&dev->vdev);
705 + #ifdef CONFIG_MEDIA_CONTROLLER
706 +- if (media_devnode_is_registered(&dev->mdev.devnode))
707 ++ if (media_devnode_is_registered(dev->mdev.devnode))
708 + media_device_unregister(&dev->mdev);
709 + #endif
710 +
711 +diff --git a/drivers/misc/mei/client.c b/drivers/misc/mei/client.c
712 +index df268365e04e..c8e3995b8cb7 100644
713 +--- a/drivers/misc/mei/client.c
714 ++++ b/drivers/misc/mei/client.c
715 +@@ -276,6 +276,7 @@ void mei_me_cl_rm_by_uuid(struct mei_device *dev, const uuid_le *uuid)
716 + down_write(&dev->me_clients_rwsem);
717 + me_cl = __mei_me_cl_by_uuid(dev, uuid);
718 + __mei_me_cl_del(dev, me_cl);
719 ++ mei_me_cl_put(me_cl);
720 + up_write(&dev->me_clients_rwsem);
721 + }
722 +
723 +@@ -297,6 +298,7 @@ void mei_me_cl_rm_by_uuid_id(struct mei_device *dev, const uuid_le *uuid, u8 id)
724 + down_write(&dev->me_clients_rwsem);
725 + me_cl = __mei_me_cl_by_uuid_id(dev, uuid, id);
726 + __mei_me_cl_del(dev, me_cl);
727 ++ mei_me_cl_put(me_cl);
728 + up_write(&dev->me_clients_rwsem);
729 + }
730 +
731 +diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
732 +index 9404f38d9d0d..2cf5c581c7e0 100644
733 +--- a/drivers/net/ethernet/intel/igb/igb_main.c
734 ++++ b/drivers/net/ethernet/intel/igb/igb_main.c
735 +@@ -3296,7 +3296,7 @@ void igb_configure_tx_ring(struct igb_adapter *adapter,
736 + tdba & 0x00000000ffffffffULL);
737 + wr32(E1000_TDBAH(reg_idx), tdba >> 32);
738 +
739 +- ring->tail = hw->hw_addr + E1000_TDT(reg_idx);
740 ++ ring->tail = adapter->io_addr + E1000_TDT(reg_idx);
741 + wr32(E1000_TDH(reg_idx), 0);
742 + writel(0, ring->tail);
743 +
744 +@@ -3652,7 +3652,7 @@ void igb_configure_rx_ring(struct igb_adapter *adapter,
745 + ring->count * sizeof(union e1000_adv_rx_desc));
746 +
747 + /* initialize head and tail */
748 +- ring->tail = hw->hw_addr + E1000_RDT(reg_idx);
749 ++ ring->tail = adapter->io_addr + E1000_RDT(reg_idx);
750 + wr32(E1000_RDH(reg_idx), 0);
751 + writel(0, ring->tail);
752 +
753 +diff --git a/drivers/nvdimm/btt.c b/drivers/nvdimm/btt.c
754 +index 957234272ef7..727eaf203463 100644
755 +--- a/drivers/nvdimm/btt.c
756 ++++ b/drivers/nvdimm/btt.c
757 +@@ -443,9 +443,9 @@ static int btt_log_init(struct arena_info *arena)
758 +
759 + static int btt_freelist_init(struct arena_info *arena)
760 + {
761 +- int old, new, ret;
762 ++ int new, ret;
763 + u32 i, map_entry;
764 +- struct log_entry log_new, log_old;
765 ++ struct log_entry log_new;
766 +
767 + arena->freelist = kcalloc(arena->nfree, sizeof(struct free_entry),
768 + GFP_KERNEL);
769 +@@ -453,10 +453,6 @@ static int btt_freelist_init(struct arena_info *arena)
770 + return -ENOMEM;
771 +
772 + for (i = 0; i < arena->nfree; i++) {
773 +- old = btt_log_read(arena, i, &log_old, LOG_OLD_ENT);
774 +- if (old < 0)
775 +- return old;
776 +-
777 + new = btt_log_read(arena, i, &log_new, LOG_NEW_ENT);
778 + if (new < 0)
779 + return new;
780 +diff --git a/drivers/platform/x86/alienware-wmi.c b/drivers/platform/x86/alienware-wmi.c
781 +index f5585740a765..95121bff2d3e 100644
782 +--- a/drivers/platform/x86/alienware-wmi.c
783 ++++ b/drivers/platform/x86/alienware-wmi.c
784 +@@ -449,23 +449,22 @@ static acpi_status alienware_hdmi_command(struct hdmi_args *in_args,
785 +
786 + input.length = (acpi_size) sizeof(*in_args);
787 + input.pointer = in_args;
788 +- if (out_data != NULL) {
789 ++ if (out_data) {
790 + output.length = ACPI_ALLOCATE_BUFFER;
791 + output.pointer = NULL;
792 + status = wmi_evaluate_method(WMAX_CONTROL_GUID, 1,
793 + command, &input, &output);
794 +- } else
795 ++ if (ACPI_SUCCESS(status)) {
796 ++ obj = (union acpi_object *)output.pointer;
797 ++ if (obj && obj->type == ACPI_TYPE_INTEGER)
798 ++ *out_data = (u32)obj->integer.value;
799 ++ }
800 ++ kfree(output.pointer);
801 ++ } else {
802 + status = wmi_evaluate_method(WMAX_CONTROL_GUID, 1,
803 + command, &input, NULL);
804 +-
805 +- if (ACPI_SUCCESS(status) && out_data != NULL) {
806 +- obj = (union acpi_object *)output.pointer;
807 +- if (obj && obj->type == ACPI_TYPE_INTEGER)
808 +- *out_data = (u32) obj->integer.value;
809 + }
810 +- kfree(output.pointer);
811 + return status;
812 +-
813 + }
814 +
815 + static ssize_t show_hdmi_cable(struct device *dev,
816 +diff --git a/drivers/platform/x86/asus-nb-wmi.c b/drivers/platform/x86/asus-nb-wmi.c
817 +index cccf250cd1e3..ee64c9512a3a 100644
818 +--- a/drivers/platform/x86/asus-nb-wmi.c
819 ++++ b/drivers/platform/x86/asus-nb-wmi.c
820 +@@ -551,9 +551,33 @@ static struct asus_wmi_driver asus_nb_wmi_driver = {
821 + .detect_quirks = asus_nb_wmi_quirks,
822 + };
823 +
824 ++static const struct dmi_system_id asus_nb_wmi_blacklist[] __initconst = {
825 ++ {
826 ++ /*
827 ++ * asus-nb-wm adds no functionality. The T100TA has a detachable
828 ++ * USB kbd, so no hotkeys and it has no WMI rfkill; and loading
829 ++ * asus-nb-wm causes the camera LED to turn and _stay_ on.
830 ++ */
831 ++ .matches = {
832 ++ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
833 ++ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "T100TA"),
834 ++ },
835 ++ },
836 ++ {
837 ++ /* The Asus T200TA has the same issue as the T100TA */
838 ++ .matches = {
839 ++ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
840 ++ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "T200TA"),
841 ++ },
842 ++ },
843 ++ {} /* Terminating entry */
844 ++};
845 +
846 + static int __init asus_nb_wmi_init(void)
847 + {
848 ++ if (dmi_check_system(asus_nb_wmi_blacklist))
849 ++ return -ENODEV;
850 ++
851 + return asus_wmi_register_driver(&asus_nb_wmi_driver);
852 + }
853 +
854 +diff --git a/drivers/staging/iio/accel/sca3000_ring.c b/drivers/staging/iio/accel/sca3000_ring.c
855 +index 20b878d35ea2..fc8b6f179ec6 100644
856 +--- a/drivers/staging/iio/accel/sca3000_ring.c
857 ++++ b/drivers/staging/iio/accel/sca3000_ring.c
858 +@@ -56,7 +56,7 @@ static int sca3000_read_data(struct sca3000_state *st,
859 + st->tx[0] = SCA3000_READ_REG(reg_address_high);
860 + ret = spi_sync_transfer(st->us, xfer, ARRAY_SIZE(xfer));
861 + if (ret) {
862 +- dev_err(get_device(&st->us->dev), "problem reading register");
863 ++ dev_err(&st->us->dev, "problem reading register");
864 + goto error_free_rx;
865 + }
866 +
867 +diff --git a/drivers/staging/iio/resolver/ad2s1210.c b/drivers/staging/iio/resolver/ad2s1210.c
868 +index 8eb7179da342..4a12a3ea3f25 100644
869 +--- a/drivers/staging/iio/resolver/ad2s1210.c
870 ++++ b/drivers/staging/iio/resolver/ad2s1210.c
871 +@@ -125,17 +125,24 @@ static int ad2s1210_config_write(struct ad2s1210_state *st, u8 data)
872 + static int ad2s1210_config_read(struct ad2s1210_state *st,
873 + unsigned char address)
874 + {
875 +- struct spi_transfer xfer = {
876 +- .len = 2,
877 +- .rx_buf = st->rx,
878 +- .tx_buf = st->tx,
879 ++ struct spi_transfer xfers[] = {
880 ++ {
881 ++ .len = 1,
882 ++ .rx_buf = &st->rx[0],
883 ++ .tx_buf = &st->tx[0],
884 ++ .cs_change = 1,
885 ++ }, {
886 ++ .len = 1,
887 ++ .rx_buf = &st->rx[1],
888 ++ .tx_buf = &st->tx[1],
889 ++ },
890 + };
891 + int ret = 0;
892 +
893 + ad2s1210_set_mode(MOD_CONFIG, st);
894 + st->tx[0] = address | AD2S1210_MSB_IS_HIGH;
895 + st->tx[1] = AD2S1210_REG_FAULT;
896 +- ret = spi_sync_transfer(st->sdev, &xfer, 1);
897 ++ ret = spi_sync_transfer(st->sdev, xfers, 2);
898 + if (ret < 0)
899 + return ret;
900 + st->old_data = true;
901 +diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c
902 +index 747343c61398..f083ecfddd1b 100644
903 +--- a/drivers/usb/core/message.c
904 ++++ b/drivers/usb/core/message.c
905 +@@ -1080,11 +1080,11 @@ void usb_disable_endpoint(struct usb_device *dev, unsigned int epaddr,
906 +
907 + if (usb_endpoint_out(epaddr)) {
908 + ep = dev->ep_out[epnum];
909 +- if (reset_hardware)
910 ++ if (reset_hardware && epnum != 0)
911 + dev->ep_out[epnum] = NULL;
912 + } else {
913 + ep = dev->ep_in[epnum];
914 +- if (reset_hardware)
915 ++ if (reset_hardware && epnum != 0)
916 + dev->ep_in[epnum] = NULL;
917 + }
918 + if (ep) {
919 +diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c
920 +index efdf81ea3b5f..3d0497421e62 100644
921 +--- a/fs/ceph/caps.c
922 ++++ b/fs/ceph/caps.c
923 +@@ -3293,6 +3293,7 @@ retry:
924 + WARN_ON(1);
925 + tsession = NULL;
926 + target = -1;
927 ++ mutex_lock(&session->s_mutex);
928 + }
929 + goto retry;
930 +
931 +diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c
932 +index 53679716baca..18b9213ce0bd 100644
933 +--- a/fs/ext4/xattr.c
934 ++++ b/fs/ext4/xattr.c
935 +@@ -139,31 +139,26 @@ static __le32 ext4_xattr_block_csum(struct inode *inode,
936 + }
937 +
938 + static int ext4_xattr_block_csum_verify(struct inode *inode,
939 +- sector_t block_nr,
940 +- struct ext4_xattr_header *hdr)
941 ++ struct buffer_head *bh)
942 + {
943 +- if (ext4_has_metadata_csum(inode->i_sb) &&
944 +- (hdr->h_checksum != ext4_xattr_block_csum(inode, block_nr, hdr)))
945 +- return 0;
946 +- return 1;
947 +-}
948 +-
949 +-static void ext4_xattr_block_csum_set(struct inode *inode,
950 +- sector_t block_nr,
951 +- struct ext4_xattr_header *hdr)
952 +-{
953 +- if (!ext4_has_metadata_csum(inode->i_sb))
954 +- return;
955 ++ struct ext4_xattr_header *hdr = BHDR(bh);
956 ++ int ret = 1;
957 +
958 +- hdr->h_checksum = ext4_xattr_block_csum(inode, block_nr, hdr);
959 ++ if (ext4_has_metadata_csum(inode->i_sb)) {
960 ++ lock_buffer(bh);
961 ++ ret = (hdr->h_checksum == ext4_xattr_block_csum(inode,
962 ++ bh->b_blocknr, hdr));
963 ++ unlock_buffer(bh);
964 ++ }
965 ++ return ret;
966 + }
967 +
968 +-static inline int ext4_handle_dirty_xattr_block(handle_t *handle,
969 +- struct inode *inode,
970 +- struct buffer_head *bh)
971 ++static void ext4_xattr_block_csum_set(struct inode *inode,
972 ++ struct buffer_head *bh)
973 + {
974 +- ext4_xattr_block_csum_set(inode, bh->b_blocknr, BHDR(bh));
975 +- return ext4_handle_dirty_metadata(handle, inode, bh);
976 ++ if (ext4_has_metadata_csum(inode->i_sb))
977 ++ BHDR(bh)->h_checksum = ext4_xattr_block_csum(inode,
978 ++ bh->b_blocknr, BHDR(bh));
979 + }
980 +
981 + static inline const struct xattr_handler *
982 +@@ -226,7 +221,7 @@ ext4_xattr_check_block(struct inode *inode, struct buffer_head *bh)
983 + if (buffer_verified(bh))
984 + return 0;
985 +
986 +- if (!ext4_xattr_block_csum_verify(inode, bh->b_blocknr, BHDR(bh)))
987 ++ if (!ext4_xattr_block_csum_verify(inode, bh))
988 + return -EFSBADCRC;
989 + error = ext4_xattr_check_names(BFIRST(bh), bh->b_data + bh->b_size,
990 + bh->b_data);
991 +@@ -590,23 +585,23 @@ ext4_xattr_release_block(handle_t *handle, struct inode *inode,
992 + le32_add_cpu(&BHDR(bh)->h_refcount, -1);
993 + if (ce)
994 + mb_cache_entry_release(ce);
995 ++
996 ++ ext4_xattr_block_csum_set(inode, bh);
997 + /*
998 + * Beware of this ugliness: Releasing of xattr block references
999 + * from different inodes can race and so we have to protect
1000 + * from a race where someone else frees the block (and releases
1001 + * its journal_head) before we are done dirtying the buffer. In
1002 + * nojournal mode this race is harmless and we actually cannot
1003 +- * call ext4_handle_dirty_xattr_block() with locked buffer as
1004 ++ * call ext4_handle_dirty_metadata() with locked buffer as
1005 + * that function can call sync_dirty_buffer() so for that case
1006 + * we handle the dirtying after unlocking the buffer.
1007 + */
1008 + if (ext4_handle_valid(handle))
1009 +- error = ext4_handle_dirty_xattr_block(handle, inode,
1010 +- bh);
1011 ++ error = ext4_handle_dirty_metadata(handle, inode, bh);
1012 + unlock_buffer(bh);
1013 + if (!ext4_handle_valid(handle))
1014 +- error = ext4_handle_dirty_xattr_block(handle, inode,
1015 +- bh);
1016 ++ error = ext4_handle_dirty_metadata(handle, inode, bh);
1017 + if (IS_SYNC(inode))
1018 + ext4_handle_sync(handle);
1019 + dquot_free_block(inode, EXT4_C2B(EXT4_SB(inode->i_sb), 1));
1020 +@@ -837,13 +832,14 @@ ext4_xattr_block_set(handle_t *handle, struct inode *inode,
1021 + ext4_xattr_rehash(header(s->base),
1022 + s->here);
1023 + }
1024 ++ ext4_xattr_block_csum_set(inode, bs->bh);
1025 + unlock_buffer(bs->bh);
1026 + if (error == -EFSCORRUPTED)
1027 + goto bad_block;
1028 + if (!error)
1029 +- error = ext4_handle_dirty_xattr_block(handle,
1030 +- inode,
1031 +- bs->bh);
1032 ++ error = ext4_handle_dirty_metadata(handle,
1033 ++ inode,
1034 ++ bs->bh);
1035 + if (error)
1036 + goto cleanup;
1037 + goto inserted;
1038 +@@ -912,10 +908,11 @@ inserted:
1039 + le32_add_cpu(&BHDR(new_bh)->h_refcount, 1);
1040 + ea_bdebug(new_bh, "reusing; refcount now=%d",
1041 + le32_to_cpu(BHDR(new_bh)->h_refcount));
1042 ++ ext4_xattr_block_csum_set(inode, new_bh);
1043 + unlock_buffer(new_bh);
1044 +- error = ext4_handle_dirty_xattr_block(handle,
1045 +- inode,
1046 +- new_bh);
1047 ++ error = ext4_handle_dirty_metadata(handle,
1048 ++ inode,
1049 ++ new_bh);
1050 + if (error)
1051 + goto cleanup_dquot;
1052 + }
1053 +@@ -965,11 +962,12 @@ getblk_failed:
1054 + goto getblk_failed;
1055 + }
1056 + memcpy(new_bh->b_data, s->base, new_bh->b_size);
1057 ++ ext4_xattr_block_csum_set(inode, new_bh);
1058 + set_buffer_uptodate(new_bh);
1059 + unlock_buffer(new_bh);
1060 + ext4_xattr_cache_insert(ext4_mb_cache, new_bh);
1061 +- error = ext4_handle_dirty_xattr_block(handle,
1062 +- inode, new_bh);
1063 ++ error = ext4_handle_dirty_metadata(handle, inode,
1064 ++ new_bh);
1065 + if (error)
1066 + goto cleanup;
1067 + }
1068 +diff --git a/fs/file.c b/fs/file.c
1069 +index 7e9eb65a2912..090015401c55 100644
1070 +--- a/fs/file.c
1071 ++++ b/fs/file.c
1072 +@@ -88,7 +88,7 @@ static void copy_fd_bitmaps(struct fdtable *nfdt, struct fdtable *ofdt,
1073 + */
1074 + static void copy_fdtable(struct fdtable *nfdt, struct fdtable *ofdt)
1075 + {
1076 +- unsigned int cpy, set;
1077 ++ size_t cpy, set;
1078 +
1079 + BUG_ON(nfdt->max_fds < ofdt->max_fds);
1080 +
1081 +diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c
1082 +index f80ffccb0316..1eb737c466dd 100644
1083 +--- a/fs/gfs2/glock.c
1084 ++++ b/fs/gfs2/glock.c
1085 +@@ -541,9 +541,6 @@ __acquires(&gl->gl_lockref.lock)
1086 + goto out_unlock;
1087 + if (nonblock)
1088 + goto out_sched;
1089 +- smp_mb();
1090 +- if (atomic_read(&gl->gl_revokes) != 0)
1091 +- goto out_sched;
1092 + set_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags);
1093 + GLOCK_BUG_ON(gl, gl->gl_demote_state == LM_ST_EXCLUSIVE);
1094 + gl->gl_target = gl->gl_demote_state;
1095 +diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h
1096 +index bb3a4bb35183..f0a3fc723ae4 100644
1097 +--- a/include/linux/cpumask.h
1098 ++++ b/include/linux/cpumask.h
1099 +@@ -160,6 +160,8 @@ static inline unsigned int cpumask_local_spread(unsigned int i, int node)
1100 + for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)mask)
1101 + #define for_each_cpu_not(cpu, mask) \
1102 + for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)mask)
1103 ++#define for_each_cpu_wrap(cpu, mask, start) \
1104 ++ for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)mask, (void)(start))
1105 + #define for_each_cpu_and(cpu, mask, and) \
1106 + for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)mask, (void)and)
1107 + #else
1108 +@@ -232,6 +234,23 @@ unsigned int cpumask_local_spread(unsigned int i, int node);
1109 + (cpu) = cpumask_next_zero((cpu), (mask)), \
1110 + (cpu) < nr_cpu_ids;)
1111 +
1112 ++extern int cpumask_next_wrap(int n, const struct cpumask *mask, int start, bool wrap);
1113 ++
1114 ++/**
1115 ++ * for_each_cpu_wrap - iterate over every cpu in a mask, starting at a specified location
1116 ++ * @cpu: the (optionally unsigned) integer iterator
1117 ++ * @mask: the cpumask poiter
1118 ++ * @start: the start location
1119 ++ *
1120 ++ * The implementation does not assume any bit in @mask is set (including @start).
1121 ++ *
1122 ++ * After the loop, cpu is >= nr_cpu_ids.
1123 ++ */
1124 ++#define for_each_cpu_wrap(cpu, mask, start) \
1125 ++ for ((cpu) = cpumask_next_wrap((start)-1, (mask), (start), false); \
1126 ++ (cpu) < nr_cpumask_bits; \
1127 ++ (cpu) = cpumask_next_wrap((cpu), (mask), (start), true))
1128 ++
1129 + /**
1130 + * for_each_cpu_and - iterate over every cpu in both masks
1131 + * @cpu: the (optionally unsigned) integer iterator
1132 +diff --git a/include/linux/net.h b/include/linux/net.h
1133 +index c00b8d182226..6de18ead3dfe 100644
1134 +--- a/include/linux/net.h
1135 ++++ b/include/linux/net.h
1136 +@@ -291,6 +291,9 @@ int kernel_sendpage(struct socket *sock, struct page *page, int offset,
1137 + int kernel_sock_ioctl(struct socket *sock, int cmd, unsigned long arg);
1138 + int kernel_sock_shutdown(struct socket *sock, enum sock_shutdown_cmd how);
1139 +
1140 ++/* Routine returns the IP overhead imposed by a (caller-protected) socket. */
1141 ++u32 kernel_sock_ip_overhead(struct sock *sk);
1142 ++
1143 + #define MODULE_ALIAS_NETPROTO(proto) \
1144 + MODULE_ALIAS("net-pf-" __stringify(proto))
1145 +
1146 +diff --git a/include/linux/padata.h b/include/linux/padata.h
1147 +index 438694650471..547a8d1e4a3b 100644
1148 +--- a/include/linux/padata.h
1149 ++++ b/include/linux/padata.h
1150 +@@ -24,7 +24,6 @@
1151 + #include <linux/workqueue.h>
1152 + #include <linux/spinlock.h>
1153 + #include <linux/list.h>
1154 +-#include <linux/timer.h>
1155 + #include <linux/notifier.h>
1156 + #include <linux/kobject.h>
1157 +
1158 +@@ -37,6 +36,7 @@
1159 + * @list: List entry, to attach to the padata lists.
1160 + * @pd: Pointer to the internal control structure.
1161 + * @cb_cpu: Callback cpu for serializatioon.
1162 ++ * @cpu: Cpu for parallelization.
1163 + * @seq_nr: Sequence number of the parallelized data object.
1164 + * @info: Used to pass information from the parallel to the serial function.
1165 + * @parallel: Parallel execution function.
1166 +@@ -46,6 +46,7 @@ struct padata_priv {
1167 + struct list_head list;
1168 + struct parallel_data *pd;
1169 + int cb_cpu;
1170 ++ int cpu;
1171 + int info;
1172 + void (*parallel)(struct padata_priv *padata);
1173 + void (*serial)(struct padata_priv *padata);
1174 +@@ -83,7 +84,6 @@ struct padata_serial_queue {
1175 + * @serial: List to wait for serialization after reordering.
1176 + * @pwork: work struct for parallelization.
1177 + * @swork: work struct for serialization.
1178 +- * @pd: Backpointer to the internal control structure.
1179 + * @work: work struct for parallelization.
1180 + * @num_obj: Number of objects that are processed by this cpu.
1181 + * @cpu_index: Index of the cpu.
1182 +@@ -91,7 +91,6 @@ struct padata_serial_queue {
1183 + struct padata_parallel_queue {
1184 + struct padata_list parallel;
1185 + struct padata_list reorder;
1186 +- struct parallel_data *pd;
1187 + struct work_struct work;
1188 + atomic_t num_obj;
1189 + int cpu_index;
1190 +@@ -118,10 +117,10 @@ struct padata_cpumask {
1191 + * @reorder_objects: Number of objects waiting in the reorder queues.
1192 + * @refcnt: Number of objects holding a reference on this parallel_data.
1193 + * @max_seq_nr: Maximal used sequence number.
1194 ++ * @cpu: Next CPU to be processed.
1195 + * @cpumask: The cpumasks in use for parallel and serial workers.
1196 ++ * @reorder_work: work struct for reordering.
1197 + * @lock: Reorder lock.
1198 +- * @processed: Number of already processed objects.
1199 +- * @timer: Reorder timer.
1200 + */
1201 + struct parallel_data {
1202 + struct padata_instance *pinst;
1203 +@@ -130,10 +129,10 @@ struct parallel_data {
1204 + atomic_t reorder_objects;
1205 + atomic_t refcnt;
1206 + atomic_t seq_nr;
1207 ++ int cpu;
1208 + struct padata_cpumask cpumask;
1209 ++ struct work_struct reorder_work;
1210 + spinlock_t lock ____cacheline_aligned;
1211 +- unsigned int processed;
1212 +- struct timer_list timer;
1213 + };
1214 +
1215 + /**
1216 +diff --git a/include/media/media-device.h b/include/media/media-device.h
1217 +index 6e6db78f1ee2..00bbd679864a 100644
1218 +--- a/include/media/media-device.h
1219 ++++ b/include/media/media-device.h
1220 +@@ -60,7 +60,7 @@ struct device;
1221 + struct media_device {
1222 + /* dev->driver_data points to this struct. */
1223 + struct device *dev;
1224 +- struct media_devnode devnode;
1225 ++ struct media_devnode *devnode;
1226 +
1227 + char model[32];
1228 + char serial[40];
1229 +@@ -84,9 +84,6 @@ struct media_device {
1230 + #define MEDIA_DEV_NOTIFY_PRE_LINK_CH 0
1231 + #define MEDIA_DEV_NOTIFY_POST_LINK_CH 1
1232 +
1233 +-/* media_devnode to media_device */
1234 +-#define to_media_device(node) container_of(node, struct media_device, devnode)
1235 +-
1236 + int __must_check __media_device_register(struct media_device *mdev,
1237 + struct module *owner);
1238 + #define media_device_register(mdev) __media_device_register(mdev, THIS_MODULE)
1239 +diff --git a/include/media/media-devnode.h b/include/media/media-devnode.h
1240 +index 17ddae32060d..d5ff95bf2d4b 100644
1241 +--- a/include/media/media-devnode.h
1242 ++++ b/include/media/media-devnode.h
1243 +@@ -33,6 +33,8 @@
1244 + #include <linux/device.h>
1245 + #include <linux/cdev.h>
1246 +
1247 ++struct media_device;
1248 ++
1249 + /*
1250 + * Flag to mark the media_devnode struct as registered. Drivers must not touch
1251 + * this flag directly, it will be set and cleared by media_devnode_register and
1252 +@@ -67,6 +69,8 @@ struct media_file_operations {
1253 + * before registering the node.
1254 + */
1255 + struct media_devnode {
1256 ++ struct media_device *media_dev;
1257 ++
1258 + /* device ops */
1259 + const struct media_file_operations *fops;
1260 +
1261 +@@ -80,24 +84,42 @@ struct media_devnode {
1262 + unsigned long flags; /* Use bitops to access flags */
1263 +
1264 + /* callbacks */
1265 +- void (*release)(struct media_devnode *mdev);
1266 ++ void (*release)(struct media_devnode *devnode);
1267 + };
1268 +
1269 + /* dev to media_devnode */
1270 + #define to_media_devnode(cd) container_of(cd, struct media_devnode, dev)
1271 +
1272 +-int __must_check media_devnode_register(struct media_devnode *mdev,
1273 ++int __must_check media_devnode_register(struct media_device *mdev,
1274 ++ struct media_devnode *devnode,
1275 + struct module *owner);
1276 +-void media_devnode_unregister(struct media_devnode *mdev);
1277 ++
1278 ++/**
1279 ++ * media_devnode_unregister_prepare - clear the media device node register bit
1280 ++ * @devnode: the device node to prepare for unregister
1281 ++ *
1282 ++ * This clears the passed device register bit. Future open calls will be met
1283 ++ * with errors. Should be called before media_devnode_unregister() to avoid
1284 ++ * races with unregister and device file open calls.
1285 ++ *
1286 ++ * This function can safely be called if the device node has never been
1287 ++ * registered or has already been unregistered.
1288 ++ */
1289 ++void media_devnode_unregister_prepare(struct media_devnode *devnode);
1290 ++
1291 ++void media_devnode_unregister(struct media_devnode *devnode);
1292 +
1293 + static inline struct media_devnode *media_devnode_data(struct file *filp)
1294 + {
1295 + return filp->private_data;
1296 + }
1297 +
1298 +-static inline int media_devnode_is_registered(struct media_devnode *mdev)
1299 ++static inline int media_devnode_is_registered(struct media_devnode *devnode)
1300 + {
1301 +- return test_bit(MEDIA_FLAG_REGISTERED, &mdev->flags);
1302 ++ if (!devnode)
1303 ++ return false;
1304 ++
1305 ++ return test_bit(MEDIA_FLAG_REGISTERED, &devnode->flags);
1306 + }
1307 +
1308 + #endif /* _MEDIA_DEVNODE_H */
1309 +diff --git a/include/net/ipv6.h b/include/net/ipv6.h
1310 +index 6258264a0bf7..94880f07bc06 100644
1311 +--- a/include/net/ipv6.h
1312 ++++ b/include/net/ipv6.h
1313 +@@ -915,6 +915,8 @@ int compat_ipv6_setsockopt(struct sock *sk, int level, int optname,
1314 + int compat_ipv6_getsockopt(struct sock *sk, int level, int optname,
1315 + char __user *optval, int __user *optlen);
1316 +
1317 ++int __ip6_datagram_connect(struct sock *sk, struct sockaddr *addr,
1318 ++ int addr_len);
1319 + int ip6_datagram_connect(struct sock *sk, struct sockaddr *addr, int addr_len);
1320 + int ip6_datagram_connect_v6_only(struct sock *sk, struct sockaddr *addr,
1321 + int addr_len);
1322 +diff --git a/include/uapi/linux/if_pppol2tp.h b/include/uapi/linux/if_pppol2tp.h
1323 +index 163e8adac2d6..de246e9f4974 100644
1324 +--- a/include/uapi/linux/if_pppol2tp.h
1325 ++++ b/include/uapi/linux/if_pppol2tp.h
1326 +@@ -17,6 +17,7 @@
1327 +
1328 + #include <linux/types.h>
1329 +
1330 ++#include <linux/l2tp.h>
1331 +
1332 + /* Structure used to connect() the socket to a particular tunnel UDP
1333 + * socket over IPv4.
1334 +@@ -89,14 +90,12 @@ enum {
1335 + PPPOL2TP_SO_REORDERTO = 5,
1336 + };
1337 +
1338 +-/* Debug message categories for the DEBUG socket option */
1339 ++/* Debug message categories for the DEBUG socket option (deprecated) */
1340 + enum {
1341 +- PPPOL2TP_MSG_DEBUG = (1 << 0), /* verbose debug (if
1342 +- * compiled in) */
1343 +- PPPOL2TP_MSG_CONTROL = (1 << 1), /* userspace - kernel
1344 +- * interface */
1345 +- PPPOL2TP_MSG_SEQ = (1 << 2), /* sequence numbers */
1346 +- PPPOL2TP_MSG_DATA = (1 << 3), /* data packets */
1347 ++ PPPOL2TP_MSG_DEBUG = L2TP_MSG_DEBUG,
1348 ++ PPPOL2TP_MSG_CONTROL = L2TP_MSG_CONTROL,
1349 ++ PPPOL2TP_MSG_SEQ = L2TP_MSG_SEQ,
1350 ++ PPPOL2TP_MSG_DATA = L2TP_MSG_DATA,
1351 + };
1352 +
1353 +
1354 +diff --git a/include/uapi/linux/l2tp.h b/include/uapi/linux/l2tp.h
1355 +index 347ef22a964e..dedfb2b1832a 100644
1356 +--- a/include/uapi/linux/l2tp.h
1357 ++++ b/include/uapi/linux/l2tp.h
1358 +@@ -108,7 +108,7 @@ enum {
1359 + L2TP_ATTR_VLAN_ID, /* u16 */
1360 + L2TP_ATTR_COOKIE, /* 0, 4 or 8 bytes */
1361 + L2TP_ATTR_PEER_COOKIE, /* 0, 4 or 8 bytes */
1362 +- L2TP_ATTR_DEBUG, /* u32 */
1363 ++ L2TP_ATTR_DEBUG, /* u32, enum l2tp_debug_flags */
1364 + L2TP_ATTR_RECV_SEQ, /* u8 */
1365 + L2TP_ATTR_SEND_SEQ, /* u8 */
1366 + L2TP_ATTR_LNS_MODE, /* u8 */
1367 +@@ -173,6 +173,21 @@ enum l2tp_seqmode {
1368 + L2TP_SEQ_ALL = 2,
1369 + };
1370 +
1371 ++/**
1372 ++ * enum l2tp_debug_flags - debug message categories for L2TP tunnels/sessions
1373 ++ *
1374 ++ * @L2TP_MSG_DEBUG: verbose debug (if compiled in)
1375 ++ * @L2TP_MSG_CONTROL: userspace - kernel interface
1376 ++ * @L2TP_MSG_SEQ: sequence numbers
1377 ++ * @L2TP_MSG_DATA: data packets
1378 ++ */
1379 ++enum l2tp_debug_flags {
1380 ++ L2TP_MSG_DEBUG = (1 << 0),
1381 ++ L2TP_MSG_CONTROL = (1 << 1),
1382 ++ L2TP_MSG_SEQ = (1 << 2),
1383 ++ L2TP_MSG_DATA = (1 << 3),
1384 ++};
1385 ++
1386 + /*
1387 + * NETLINK_GENERIC related info
1388 + */
1389 +diff --git a/kernel/padata.c b/kernel/padata.c
1390 +index ae036af3f012..c50975f43b34 100644
1391 +--- a/kernel/padata.c
1392 ++++ b/kernel/padata.c
1393 +@@ -65,15 +65,11 @@ static int padata_cpu_hash(struct parallel_data *pd)
1394 + static void padata_parallel_worker(struct work_struct *parallel_work)
1395 + {
1396 + struct padata_parallel_queue *pqueue;
1397 +- struct parallel_data *pd;
1398 +- struct padata_instance *pinst;
1399 + LIST_HEAD(local_list);
1400 +
1401 + local_bh_disable();
1402 + pqueue = container_of(parallel_work,
1403 + struct padata_parallel_queue, work);
1404 +- pd = pqueue->pd;
1405 +- pinst = pd->pinst;
1406 +
1407 + spin_lock(&pqueue->parallel.lock);
1408 + list_replace_init(&pqueue->parallel.list, &local_list);
1409 +@@ -136,6 +132,7 @@ int padata_do_parallel(struct padata_instance *pinst,
1410 + padata->cb_cpu = cb_cpu;
1411 +
1412 + target_cpu = padata_cpu_hash(pd);
1413 ++ padata->cpu = target_cpu;
1414 + queue = per_cpu_ptr(pd->pqueue, target_cpu);
1415 +
1416 + spin_lock(&queue->parallel.lock);
1417 +@@ -159,8 +156,6 @@ EXPORT_SYMBOL(padata_do_parallel);
1418 + * A pointer to the control struct of the next object that needs
1419 + * serialization, if present in one of the percpu reorder queues.
1420 + *
1421 +- * NULL, if all percpu reorder queues are empty.
1422 +- *
1423 + * -EINPROGRESS, if the next object that needs serialization will
1424 + * be parallel processed by another cpu and is not yet present in
1425 + * the cpu's reorder queue.
1426 +@@ -170,25 +165,12 @@ EXPORT_SYMBOL(padata_do_parallel);
1427 + */
1428 + static struct padata_priv *padata_get_next(struct parallel_data *pd)
1429 + {
1430 +- int cpu, num_cpus;
1431 +- unsigned int next_nr, next_index;
1432 + struct padata_parallel_queue *next_queue;
1433 + struct padata_priv *padata;
1434 + struct padata_list *reorder;
1435 ++ int cpu = pd->cpu;
1436 +
1437 +- num_cpus = cpumask_weight(pd->cpumask.pcpu);
1438 +-
1439 +- /*
1440 +- * Calculate the percpu reorder queue and the sequence
1441 +- * number of the next object.
1442 +- */
1443 +- next_nr = pd->processed;
1444 +- next_index = next_nr % num_cpus;
1445 +- cpu = padata_index_to_cpu(pd, next_index);
1446 + next_queue = per_cpu_ptr(pd->pqueue, cpu);
1447 +-
1448 +- padata = NULL;
1449 +-
1450 + reorder = &next_queue->reorder;
1451 +
1452 + spin_lock(&reorder->lock);
1453 +@@ -199,7 +181,8 @@ static struct padata_priv *padata_get_next(struct parallel_data *pd)
1454 + list_del_init(&padata->list);
1455 + atomic_dec(&pd->reorder_objects);
1456 +
1457 +- pd->processed++;
1458 ++ pd->cpu = cpumask_next_wrap(cpu, pd->cpumask.pcpu, -1,
1459 ++ false);
1460 +
1461 + spin_unlock(&reorder->lock);
1462 + goto out;
1463 +@@ -222,6 +205,7 @@ static void padata_reorder(struct parallel_data *pd)
1464 + struct padata_priv *padata;
1465 + struct padata_serial_queue *squeue;
1466 + struct padata_instance *pinst = pd->pinst;
1467 ++ struct padata_parallel_queue *next_queue;
1468 +
1469 + /*
1470 + * We need to ensure that only one cpu can work on dequeueing of
1471 +@@ -240,12 +224,11 @@ static void padata_reorder(struct parallel_data *pd)
1472 + padata = padata_get_next(pd);
1473 +
1474 + /*
1475 +- * All reorder queues are empty, or the next object that needs
1476 +- * serialization is parallel processed by another cpu and is
1477 +- * still on it's way to the cpu's reorder queue, nothing to
1478 +- * do for now.
1479 ++ * If the next object that needs serialization is parallel
1480 ++ * processed by another cpu and is still on it's way to the
1481 ++ * cpu's reorder queue, nothing to do for now.
1482 + */
1483 +- if (!padata || PTR_ERR(padata) == -EINPROGRESS)
1484 ++ if (PTR_ERR(padata) == -EINPROGRESS)
1485 + break;
1486 +
1487 + /*
1488 +@@ -254,7 +237,6 @@ static void padata_reorder(struct parallel_data *pd)
1489 + * so exit immediately.
1490 + */
1491 + if (PTR_ERR(padata) == -ENODATA) {
1492 +- del_timer(&pd->timer);
1493 + spin_unlock_bh(&pd->lock);
1494 + return;
1495 + }
1496 +@@ -273,28 +255,27 @@ static void padata_reorder(struct parallel_data *pd)
1497 +
1498 + /*
1499 + * The next object that needs serialization might have arrived to
1500 +- * the reorder queues in the meantime, we will be called again
1501 +- * from the timer function if no one else cares for it.
1502 ++ * the reorder queues in the meantime.
1503 + *
1504 +- * Ensure reorder_objects is read after pd->lock is dropped so we see
1505 +- * an increment from another task in padata_do_serial. Pairs with
1506 ++ * Ensure reorder queue is read after pd->lock is dropped so we see
1507 ++ * new objects from another task in padata_do_serial. Pairs with
1508 + * smp_mb__after_atomic in padata_do_serial.
1509 + */
1510 + smp_mb();
1511 +- if (atomic_read(&pd->reorder_objects)
1512 +- && !(pinst->flags & PADATA_RESET))
1513 +- mod_timer(&pd->timer, jiffies + HZ);
1514 +- else
1515 +- del_timer(&pd->timer);
1516 +
1517 +- return;
1518 ++ next_queue = per_cpu_ptr(pd->pqueue, pd->cpu);
1519 ++ if (!list_empty(&next_queue->reorder.list))
1520 ++ queue_work(pinst->wq, &pd->reorder_work);
1521 + }
1522 +
1523 +-static void padata_reorder_timer(unsigned long arg)
1524 ++static void invoke_padata_reorder(struct work_struct *work)
1525 + {
1526 +- struct parallel_data *pd = (struct parallel_data *)arg;
1527 ++ struct parallel_data *pd;
1528 +
1529 ++ local_bh_disable();
1530 ++ pd = container_of(work, struct parallel_data, reorder_work);
1531 + padata_reorder(pd);
1532 ++ local_bh_enable();
1533 + }
1534 +
1535 + static void padata_serial_worker(struct work_struct *serial_work)
1536 +@@ -341,29 +322,22 @@ static void padata_serial_worker(struct work_struct *serial_work)
1537 + */
1538 + void padata_do_serial(struct padata_priv *padata)
1539 + {
1540 +- int cpu;
1541 +- struct padata_parallel_queue *pqueue;
1542 +- struct parallel_data *pd;
1543 +-
1544 +- pd = padata->pd;
1545 +-
1546 +- cpu = get_cpu();
1547 +- pqueue = per_cpu_ptr(pd->pqueue, cpu);
1548 ++ struct parallel_data *pd = padata->pd;
1549 ++ struct padata_parallel_queue *pqueue = per_cpu_ptr(pd->pqueue,
1550 ++ padata->cpu);
1551 +
1552 + spin_lock(&pqueue->reorder.lock);
1553 +- atomic_inc(&pd->reorder_objects);
1554 + list_add_tail(&padata->list, &pqueue->reorder.list);
1555 ++ atomic_inc(&pd->reorder_objects);
1556 + spin_unlock(&pqueue->reorder.lock);
1557 +
1558 + /*
1559 +- * Ensure the atomic_inc of reorder_objects above is ordered correctly
1560 ++ * Ensure the addition to the reorder list is ordered correctly
1561 + * with the trylock of pd->lock in padata_reorder. Pairs with smp_mb
1562 + * in padata_reorder.
1563 + */
1564 + smp_mb__after_atomic();
1565 +
1566 +- put_cpu();
1567 +-
1568 + padata_reorder(pd);
1569 + }
1570 + EXPORT_SYMBOL(padata_do_serial);
1571 +@@ -412,9 +386,14 @@ static void padata_init_pqueues(struct parallel_data *pd)
1572 + struct padata_parallel_queue *pqueue;
1573 +
1574 + cpu_index = 0;
1575 +- for_each_cpu(cpu, pd->cpumask.pcpu) {
1576 ++ for_each_possible_cpu(cpu) {
1577 + pqueue = per_cpu_ptr(pd->pqueue, cpu);
1578 +- pqueue->pd = pd;
1579 ++
1580 ++ if (!cpumask_test_cpu(cpu, pd->cpumask.pcpu)) {
1581 ++ pqueue->cpu_index = -1;
1582 ++ continue;
1583 ++ }
1584 ++
1585 + pqueue->cpu_index = cpu_index;
1586 + cpu_index++;
1587 +
1588 +@@ -448,12 +427,13 @@ static struct parallel_data *padata_alloc_pd(struct padata_instance *pinst,
1589 +
1590 + padata_init_pqueues(pd);
1591 + padata_init_squeues(pd);
1592 +- setup_timer(&pd->timer, padata_reorder_timer, (unsigned long)pd);
1593 + atomic_set(&pd->seq_nr, -1);
1594 + atomic_set(&pd->reorder_objects, 0);
1595 + atomic_set(&pd->refcnt, 1);
1596 + pd->pinst = pinst;
1597 + spin_lock_init(&pd->lock);
1598 ++ pd->cpu = cpumask_first(pd->cpumask.pcpu);
1599 ++ INIT_WORK(&pd->reorder_work, invoke_padata_reorder);
1600 +
1601 + return pd;
1602 +
1603 +diff --git a/lib/cpumask.c b/lib/cpumask.c
1604 +index 5a70f6196f57..24f06e7abf92 100644
1605 +--- a/lib/cpumask.c
1606 ++++ b/lib/cpumask.c
1607 +@@ -42,6 +42,38 @@ int cpumask_any_but(const struct cpumask *mask, unsigned int cpu)
1608 + return i;
1609 + }
1610 +
1611 ++/**
1612 ++ * cpumask_next_wrap - helper to implement for_each_cpu_wrap
1613 ++ * @n: the cpu prior to the place to search
1614 ++ * @mask: the cpumask pointer
1615 ++ * @start: the start point of the iteration
1616 ++ * @wrap: assume @n crossing @start terminates the iteration
1617 ++ *
1618 ++ * Returns >= nr_cpu_ids on completion
1619 ++ *
1620 ++ * Note: the @wrap argument is required for the start condition when
1621 ++ * we cannot assume @start is set in @mask.
1622 ++ */
1623 ++int cpumask_next_wrap(int n, const struct cpumask *mask, int start, bool wrap)
1624 ++{
1625 ++ int next;
1626 ++
1627 ++again:
1628 ++ next = cpumask_next(n, mask);
1629 ++
1630 ++ if (wrap && n < start && next >= start) {
1631 ++ return nr_cpumask_bits;
1632 ++
1633 ++ } else if (next >= nr_cpumask_bits) {
1634 ++ wrap = true;
1635 ++ n = -1;
1636 ++ goto again;
1637 ++ }
1638 ++
1639 ++ return next;
1640 ++}
1641 ++EXPORT_SYMBOL(cpumask_next_wrap);
1642 ++
1643 + /* These are not inline because of header tangles. */
1644 + #ifdef CONFIG_CPUMASK_OFFSTACK
1645 + /**
1646 +diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c
1647 +index f33154365b64..389b6367a810 100644
1648 +--- a/net/ipv6/datagram.c
1649 ++++ b/net/ipv6/datagram.c
1650 +@@ -40,7 +40,8 @@ static bool ipv6_mapped_addr_any(const struct in6_addr *a)
1651 + return ipv6_addr_v4mapped(a) && (a->s6_addr32[3] == 0);
1652 + }
1653 +
1654 +-static int __ip6_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
1655 ++int __ip6_datagram_connect(struct sock *sk, struct sockaddr *uaddr,
1656 ++ int addr_len)
1657 + {
1658 + struct sockaddr_in6 *usin = (struct sockaddr_in6 *) uaddr;
1659 + struct inet_sock *inet = inet_sk(sk);
1660 +@@ -213,6 +214,7 @@ out:
1661 + fl6_sock_release(flowlabel);
1662 + return err;
1663 + }
1664 ++EXPORT_SYMBOL_GPL(__ip6_datagram_connect);
1665 +
1666 + int ip6_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
1667 + {
1668 +diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c
1669 +index 8cbccddc0b1e..0233c496fc51 100644
1670 +--- a/net/l2tp/l2tp_core.c
1671 ++++ b/net/l2tp/l2tp_core.c
1672 +@@ -112,53 +112,19 @@ struct l2tp_net {
1673 + spinlock_t l2tp_session_hlist_lock;
1674 + };
1675 +
1676 +-static void l2tp_tunnel_free(struct l2tp_tunnel *tunnel);
1677 +
1678 + static inline struct l2tp_tunnel *l2tp_tunnel(struct sock *sk)
1679 + {
1680 + return sk->sk_user_data;
1681 + }
1682 +
1683 +-static inline struct l2tp_net *l2tp_pernet(struct net *net)
1684 ++static inline struct l2tp_net *l2tp_pernet(const struct net *net)
1685 + {
1686 + BUG_ON(!net);
1687 +
1688 + return net_generic(net, l2tp_net_id);
1689 + }
1690 +
1691 +-/* Tunnel reference counts. Incremented per session that is added to
1692 +- * the tunnel.
1693 +- */
1694 +-static inline void l2tp_tunnel_inc_refcount_1(struct l2tp_tunnel *tunnel)
1695 +-{
1696 +- atomic_inc(&tunnel->ref_count);
1697 +-}
1698 +-
1699 +-static inline void l2tp_tunnel_dec_refcount_1(struct l2tp_tunnel *tunnel)
1700 +-{
1701 +- if (atomic_dec_and_test(&tunnel->ref_count))
1702 +- l2tp_tunnel_free(tunnel);
1703 +-}
1704 +-#ifdef L2TP_REFCNT_DEBUG
1705 +-#define l2tp_tunnel_inc_refcount(_t) \
1706 +-do { \
1707 +- pr_debug("l2tp_tunnel_inc_refcount: %s:%d %s: cnt=%d\n", \
1708 +- __func__, __LINE__, (_t)->name, \
1709 +- atomic_read(&_t->ref_count)); \
1710 +- l2tp_tunnel_inc_refcount_1(_t); \
1711 +-} while (0)
1712 +-#define l2tp_tunnel_dec_refcount(_t) \
1713 +-do { \
1714 +- pr_debug("l2tp_tunnel_dec_refcount: %s:%d %s: cnt=%d\n", \
1715 +- __func__, __LINE__, (_t)->name, \
1716 +- atomic_read(&_t->ref_count)); \
1717 +- l2tp_tunnel_dec_refcount_1(_t); \
1718 +-} while (0)
1719 +-#else
1720 +-#define l2tp_tunnel_inc_refcount(t) l2tp_tunnel_inc_refcount_1(t)
1721 +-#define l2tp_tunnel_dec_refcount(t) l2tp_tunnel_dec_refcount_1(t)
1722 +-#endif
1723 +-
1724 + /* Session hash global list for L2TPv3.
1725 + * The session_id SHOULD be random according to RFC3931, but several
1726 + * L2TP implementations use incrementing session_ids. So we do a real
1727 +@@ -216,27 +182,6 @@ static void l2tp_tunnel_sock_put(struct sock *sk)
1728 + sock_put(sk);
1729 + }
1730 +
1731 +-/* Lookup a session by id in the global session list
1732 +- */
1733 +-static struct l2tp_session *l2tp_session_find_2(struct net *net, u32 session_id)
1734 +-{
1735 +- struct l2tp_net *pn = l2tp_pernet(net);
1736 +- struct hlist_head *session_list =
1737 +- l2tp_session_id_hash_2(pn, session_id);
1738 +- struct l2tp_session *session;
1739 +-
1740 +- rcu_read_lock_bh();
1741 +- hlist_for_each_entry_rcu(session, session_list, global_hlist) {
1742 +- if (session->session_id == session_id) {
1743 +- rcu_read_unlock_bh();
1744 +- return session;
1745 +- }
1746 +- }
1747 +- rcu_read_unlock_bh();
1748 +-
1749 +- return NULL;
1750 +-}
1751 +-
1752 + /* Session hash list.
1753 + * The session_id SHOULD be random according to RFC2661, but several
1754 + * L2TP implementations (Cisco and Microsoft) use incrementing
1755 +@@ -249,38 +194,31 @@ l2tp_session_id_hash(struct l2tp_tunnel *tunnel, u32 session_id)
1756 + return &tunnel->session_hlist[hash_32(session_id, L2TP_HASH_BITS)];
1757 + }
1758 +
1759 +-/* Lookup a session by id
1760 +- */
1761 +-struct l2tp_session *l2tp_session_find(struct net *net, struct l2tp_tunnel *tunnel, u32 session_id)
1762 ++/* Lookup a tunnel. A new reference is held on the returned tunnel. */
1763 ++struct l2tp_tunnel *l2tp_tunnel_get(const struct net *net, u32 tunnel_id)
1764 + {
1765 +- struct hlist_head *session_list;
1766 +- struct l2tp_session *session;
1767 ++ const struct l2tp_net *pn = l2tp_pernet(net);
1768 ++ struct l2tp_tunnel *tunnel;
1769 +
1770 +- /* In L2TPv3, session_ids are unique over all tunnels and we
1771 +- * sometimes need to look them up before we know the
1772 +- * tunnel.
1773 +- */
1774 +- if (tunnel == NULL)
1775 +- return l2tp_session_find_2(net, session_id);
1776 ++ rcu_read_lock_bh();
1777 ++ list_for_each_entry_rcu(tunnel, &pn->l2tp_tunnel_list, list) {
1778 ++ if (tunnel->tunnel_id == tunnel_id) {
1779 ++ l2tp_tunnel_inc_refcount(tunnel);
1780 ++ rcu_read_unlock_bh();
1781 +
1782 +- session_list = l2tp_session_id_hash(tunnel, session_id);
1783 +- read_lock_bh(&tunnel->hlist_lock);
1784 +- hlist_for_each_entry(session, session_list, hlist) {
1785 +- if (session->session_id == session_id) {
1786 +- read_unlock_bh(&tunnel->hlist_lock);
1787 +- return session;
1788 ++ return tunnel;
1789 + }
1790 + }
1791 +- read_unlock_bh(&tunnel->hlist_lock);
1792 ++ rcu_read_unlock_bh();
1793 +
1794 + return NULL;
1795 + }
1796 +-EXPORT_SYMBOL_GPL(l2tp_session_find);
1797 ++EXPORT_SYMBOL_GPL(l2tp_tunnel_get);
1798 +
1799 +-/* Like l2tp_session_find() but takes a reference on the returned session.
1800 ++/* Lookup a session. A new reference is held on the returned session.
1801 + * Optionally calls session->ref() too if do_ref is true.
1802 + */
1803 +-struct l2tp_session *l2tp_session_get(struct net *net,
1804 ++struct l2tp_session *l2tp_session_get(const struct net *net,
1805 + struct l2tp_tunnel *tunnel,
1806 + u32 session_id, bool do_ref)
1807 + {
1808 +@@ -355,7 +293,9 @@ EXPORT_SYMBOL_GPL(l2tp_session_get_nth);
1809 + /* Lookup a session by interface name.
1810 + * This is very inefficient but is only used by management interfaces.
1811 + */
1812 +-struct l2tp_session *l2tp_session_find_by_ifname(struct net *net, char *ifname)
1813 ++struct l2tp_session *l2tp_session_get_by_ifname(const struct net *net,
1814 ++ const char *ifname,
1815 ++ bool do_ref)
1816 + {
1817 + struct l2tp_net *pn = l2tp_pernet(net);
1818 + int hash;
1819 +@@ -365,7 +305,11 @@ struct l2tp_session *l2tp_session_find_by_ifname(struct net *net, char *ifname)
1820 + for (hash = 0; hash < L2TP_HASH_SIZE_2; hash++) {
1821 + hlist_for_each_entry_rcu(session, &pn->l2tp_session_hlist[hash], global_hlist) {
1822 + if (!strcmp(session->ifname, ifname)) {
1823 ++ l2tp_session_inc_refcount(session);
1824 ++ if (do_ref && session->ref)
1825 ++ session->ref(session);
1826 + rcu_read_unlock_bh();
1827 ++
1828 + return session;
1829 + }
1830 + }
1831 +@@ -375,22 +319,30 @@ struct l2tp_session *l2tp_session_find_by_ifname(struct net *net, char *ifname)
1832 +
1833 + return NULL;
1834 + }
1835 +-EXPORT_SYMBOL_GPL(l2tp_session_find_by_ifname);
1836 ++EXPORT_SYMBOL_GPL(l2tp_session_get_by_ifname);
1837 +
1838 +-static int l2tp_session_add_to_tunnel(struct l2tp_tunnel *tunnel,
1839 +- struct l2tp_session *session)
1840 ++int l2tp_session_register(struct l2tp_session *session,
1841 ++ struct l2tp_tunnel *tunnel)
1842 + {
1843 + struct l2tp_session *session_walk;
1844 + struct hlist_head *g_head;
1845 + struct hlist_head *head;
1846 + struct l2tp_net *pn;
1847 ++ int err;
1848 +
1849 + head = l2tp_session_id_hash(tunnel, session->session_id);
1850 +
1851 + write_lock_bh(&tunnel->hlist_lock);
1852 ++ if (!tunnel->acpt_newsess) {
1853 ++ err = -ENODEV;
1854 ++ goto err_tlock;
1855 ++ }
1856 ++
1857 + hlist_for_each_entry(session_walk, head, hlist)
1858 +- if (session_walk->session_id == session->session_id)
1859 +- goto exist;
1860 ++ if (session_walk->session_id == session->session_id) {
1861 ++ err = -EEXIST;
1862 ++ goto err_tlock;
1863 ++ }
1864 +
1865 + if (tunnel->version == L2TP_HDR_VER_3) {
1866 + pn = l2tp_pernet(tunnel->l2tp_net);
1867 +@@ -398,30 +350,44 @@ static int l2tp_session_add_to_tunnel(struct l2tp_tunnel *tunnel,
1868 + session->session_id);
1869 +
1870 + spin_lock_bh(&pn->l2tp_session_hlist_lock);
1871 ++
1872 + hlist_for_each_entry(session_walk, g_head, global_hlist)
1873 +- if (session_walk->session_id == session->session_id)
1874 +- goto exist_glob;
1875 ++ if (session_walk->session_id == session->session_id) {
1876 ++ err = -EEXIST;
1877 ++ goto err_tlock_pnlock;
1878 ++ }
1879 +
1880 ++ l2tp_tunnel_inc_refcount(tunnel);
1881 ++ sock_hold(tunnel->sock);
1882 + hlist_add_head_rcu(&session->global_hlist, g_head);
1883 ++
1884 + spin_unlock_bh(&pn->l2tp_session_hlist_lock);
1885 ++ } else {
1886 ++ l2tp_tunnel_inc_refcount(tunnel);
1887 ++ sock_hold(tunnel->sock);
1888 + }
1889 +
1890 + hlist_add_head(&session->hlist, head);
1891 + write_unlock_bh(&tunnel->hlist_lock);
1892 +
1893 ++ /* Ignore management session in session count value */
1894 ++ if (session->session_id != 0)
1895 ++ atomic_inc(&l2tp_session_count);
1896 ++
1897 + return 0;
1898 +
1899 +-exist_glob:
1900 ++err_tlock_pnlock:
1901 + spin_unlock_bh(&pn->l2tp_session_hlist_lock);
1902 +-exist:
1903 ++err_tlock:
1904 + write_unlock_bh(&tunnel->hlist_lock);
1905 +
1906 +- return -EEXIST;
1907 ++ return err;
1908 + }
1909 ++EXPORT_SYMBOL_GPL(l2tp_session_register);
1910 +
1911 + /* Lookup a tunnel by id
1912 + */
1913 +-struct l2tp_tunnel *l2tp_tunnel_find(struct net *net, u32 tunnel_id)
1914 ++struct l2tp_tunnel *l2tp_tunnel_find(const struct net *net, u32 tunnel_id)
1915 + {
1916 + struct l2tp_tunnel *tunnel;
1917 + struct l2tp_net *pn = l2tp_pernet(net);
1918 +@@ -439,7 +405,7 @@ struct l2tp_tunnel *l2tp_tunnel_find(struct net *net, u32 tunnel_id)
1919 + }
1920 + EXPORT_SYMBOL_GPL(l2tp_tunnel_find);
1921 +
1922 +-struct l2tp_tunnel *l2tp_tunnel_find_nth(struct net *net, int nth)
1923 ++struct l2tp_tunnel *l2tp_tunnel_find_nth(const struct net *net, int nth)
1924 + {
1925 + struct l2tp_net *pn = l2tp_pernet(net);
1926 + struct l2tp_tunnel *tunnel;
1927 +@@ -1307,7 +1273,6 @@ static void l2tp_tunnel_destruct(struct sock *sk)
1928 + /* Remove hooks into tunnel socket */
1929 + sk->sk_destruct = tunnel->old_sk_destruct;
1930 + sk->sk_user_data = NULL;
1931 +- tunnel->sock = NULL;
1932 +
1933 + /* Remove the tunnel struct from the tunnel list */
1934 + pn = l2tp_pernet(tunnel->l2tp_net);
1935 +@@ -1317,6 +1282,8 @@ static void l2tp_tunnel_destruct(struct sock *sk)
1936 + atomic_dec(&l2tp_tunnel_count);
1937 +
1938 + l2tp_tunnel_closeall(tunnel);
1939 ++
1940 ++ tunnel->sock = NULL;
1941 + l2tp_tunnel_dec_refcount(tunnel);
1942 +
1943 + /* Call the original destructor */
1944 +@@ -1341,6 +1308,7 @@ void l2tp_tunnel_closeall(struct l2tp_tunnel *tunnel)
1945 + tunnel->name);
1946 +
1947 + write_lock_bh(&tunnel->hlist_lock);
1948 ++ tunnel->acpt_newsess = false;
1949 + for (hash = 0; hash < L2TP_HASH_SIZE; hash++) {
1950 + again:
1951 + hlist_for_each_safe(walk, tmp, &tunnel->session_hlist[hash]) {
1952 +@@ -1394,17 +1362,6 @@ static void l2tp_udp_encap_destroy(struct sock *sk)
1953 + }
1954 + }
1955 +
1956 +-/* Really kill the tunnel.
1957 +- * Come here only when all sessions have been cleared from the tunnel.
1958 +- */
1959 +-static void l2tp_tunnel_free(struct l2tp_tunnel *tunnel)
1960 +-{
1961 +- BUG_ON(atomic_read(&tunnel->ref_count) != 0);
1962 +- BUG_ON(tunnel->sock != NULL);
1963 +- l2tp_info(tunnel, L2TP_MSG_CONTROL, "%s: free...\n", tunnel->name);
1964 +- kfree_rcu(tunnel, rcu);
1965 +-}
1966 +-
1967 + /* Workqueue tunnel deletion function */
1968 + static void l2tp_tunnel_del_work(struct work_struct *work)
1969 + {
1970 +@@ -1655,6 +1612,7 @@ int l2tp_tunnel_create(struct net *net, int fd, int version, u32 tunnel_id, u32
1971 + tunnel->magic = L2TP_TUNNEL_MAGIC;
1972 + sprintf(&tunnel->name[0], "tunl %u", tunnel_id);
1973 + rwlock_init(&tunnel->hlist_lock);
1974 ++ tunnel->acpt_newsess = true;
1975 +
1976 + /* The net we belong to */
1977 + tunnel->l2tp_net = net;
1978 +@@ -1840,7 +1798,6 @@ EXPORT_SYMBOL_GPL(l2tp_session_set_header_len);
1979 + struct l2tp_session *l2tp_session_create(int priv_size, struct l2tp_tunnel *tunnel, u32 session_id, u32 peer_session_id, struct l2tp_session_cfg *cfg)
1980 + {
1981 + struct l2tp_session *session;
1982 +- int err;
1983 +
1984 + session = kzalloc(sizeof(struct l2tp_session) + priv_size, GFP_KERNEL);
1985 + if (session != NULL) {
1986 +@@ -1896,25 +1853,7 @@ struct l2tp_session *l2tp_session_create(int priv_size, struct l2tp_tunnel *tunn
1987 +
1988 + l2tp_session_set_header_len(session, tunnel->version);
1989 +
1990 +- err = l2tp_session_add_to_tunnel(tunnel, session);
1991 +- if (err) {
1992 +- kfree(session);
1993 +-
1994 +- return ERR_PTR(err);
1995 +- }
1996 +-
1997 +- /* Bump the reference count. The session context is deleted
1998 +- * only when this drops to zero.
1999 +- */
2000 + l2tp_session_inc_refcount(session);
2001 +- l2tp_tunnel_inc_refcount(tunnel);
2002 +-
2003 +- /* Ensure tunnel socket isn't deleted */
2004 +- sock_hold(tunnel->sock);
2005 +-
2006 +- /* Ignore management session in session count value */
2007 +- if (session->session_id != 0)
2008 +- atomic_inc(&l2tp_session_count);
2009 +
2010 + return session;
2011 + }
2012 +diff --git a/net/l2tp/l2tp_core.h b/net/l2tp/l2tp_core.h
2013 +index 06323a12d62c..57da0f1d62dd 100644
2014 +--- a/net/l2tp/l2tp_core.h
2015 ++++ b/net/l2tp/l2tp_core.h
2016 +@@ -23,16 +23,6 @@
2017 + #define L2TP_HASH_BITS_2 8
2018 + #define L2TP_HASH_SIZE_2 (1 << L2TP_HASH_BITS_2)
2019 +
2020 +-/* Debug message categories for the DEBUG socket option */
2021 +-enum {
2022 +- L2TP_MSG_DEBUG = (1 << 0), /* verbose debug (if
2023 +- * compiled in) */
2024 +- L2TP_MSG_CONTROL = (1 << 1), /* userspace - kernel
2025 +- * interface */
2026 +- L2TP_MSG_SEQ = (1 << 2), /* sequence numbers */
2027 +- L2TP_MSG_DATA = (1 << 3), /* data packets */
2028 +-};
2029 +-
2030 + struct sk_buff;
2031 +
2032 + struct l2tp_stats {
2033 +@@ -175,6 +165,10 @@ struct l2tp_tunnel {
2034 +
2035 + struct rcu_head rcu;
2036 + rwlock_t hlist_lock; /* protect session_hlist */
2037 ++ bool acpt_newsess; /* Indicates whether this
2038 ++ * tunnel accepts new sessions.
2039 ++ * Protected by hlist_lock.
2040 ++ */
2041 + struct hlist_head session_hlist[L2TP_HASH_SIZE];
2042 + /* hashed list of sessions,
2043 + * hashed by id */
2044 +@@ -210,7 +204,9 @@ struct l2tp_tunnel {
2045 + };
2046 +
2047 + struct l2tp_nl_cmd_ops {
2048 +- int (*session_create)(struct net *net, u32 tunnel_id, u32 session_id, u32 peer_session_id, struct l2tp_session_cfg *cfg);
2049 ++ int (*session_create)(struct net *net, struct l2tp_tunnel *tunnel,
2050 ++ u32 session_id, u32 peer_session_id,
2051 ++ struct l2tp_session_cfg *cfg);
2052 + int (*session_delete)(struct l2tp_session *session);
2053 + };
2054 +
2055 +@@ -244,17 +240,18 @@ out:
2056 + return tunnel;
2057 + }
2058 +
2059 +-struct l2tp_session *l2tp_session_get(struct net *net,
2060 ++struct l2tp_tunnel *l2tp_tunnel_get(const struct net *net, u32 tunnel_id);
2061 ++
2062 ++struct l2tp_session *l2tp_session_get(const struct net *net,
2063 + struct l2tp_tunnel *tunnel,
2064 + u32 session_id, bool do_ref);
2065 +-struct l2tp_session *l2tp_session_find(struct net *net,
2066 +- struct l2tp_tunnel *tunnel,
2067 +- u32 session_id);
2068 + struct l2tp_session *l2tp_session_get_nth(struct l2tp_tunnel *tunnel, int nth,
2069 + bool do_ref);
2070 +-struct l2tp_session *l2tp_session_find_by_ifname(struct net *net, char *ifname);
2071 +-struct l2tp_tunnel *l2tp_tunnel_find(struct net *net, u32 tunnel_id);
2072 +-struct l2tp_tunnel *l2tp_tunnel_find_nth(struct net *net, int nth);
2073 ++struct l2tp_session *l2tp_session_get_by_ifname(const struct net *net,
2074 ++ const char *ifname,
2075 ++ bool do_ref);
2076 ++struct l2tp_tunnel *l2tp_tunnel_find(const struct net *net, u32 tunnel_id);
2077 ++struct l2tp_tunnel *l2tp_tunnel_find_nth(const struct net *net, int nth);
2078 +
2079 + int l2tp_tunnel_create(struct net *net, int fd, int version, u32 tunnel_id,
2080 + u32 peer_tunnel_id, struct l2tp_tunnel_cfg *cfg,
2081 +@@ -265,6 +262,9 @@ struct l2tp_session *l2tp_session_create(int priv_size,
2082 + struct l2tp_tunnel *tunnel,
2083 + u32 session_id, u32 peer_session_id,
2084 + struct l2tp_session_cfg *cfg);
2085 ++int l2tp_session_register(struct l2tp_session *session,
2086 ++ struct l2tp_tunnel *tunnel);
2087 ++
2088 + void __l2tp_session_unhash(struct l2tp_session *session);
2089 + int l2tp_session_delete(struct l2tp_session *session);
2090 + void l2tp_session_free(struct l2tp_session *session);
2091 +@@ -283,6 +283,17 @@ int l2tp_nl_register_ops(enum l2tp_pwtype pw_type,
2092 + void l2tp_nl_unregister_ops(enum l2tp_pwtype pw_type);
2093 + int l2tp_ioctl(struct sock *sk, int cmd, unsigned long arg);
2094 +
2095 ++static inline void l2tp_tunnel_inc_refcount(struct l2tp_tunnel *tunnel)
2096 ++{
2097 ++ atomic_inc(&tunnel->ref_count);
2098 ++}
2099 ++
2100 ++static inline void l2tp_tunnel_dec_refcount(struct l2tp_tunnel *tunnel)
2101 ++{
2102 ++ if (atomic_dec_and_test(&tunnel->ref_count))
2103 ++ kfree_rcu(tunnel, rcu);
2104 ++}
2105 ++
2106 + /* Session reference counts. Incremented when code obtains a reference
2107 + * to a session.
2108 + */
2109 +diff --git a/net/l2tp/l2tp_eth.c b/net/l2tp/l2tp_eth.c
2110 +index c94160df71af..e0a65ee1e830 100644
2111 +--- a/net/l2tp/l2tp_eth.c
2112 ++++ b/net/l2tp/l2tp_eth.c
2113 +@@ -30,6 +30,9 @@
2114 + #include <net/xfrm.h>
2115 + #include <net/net_namespace.h>
2116 + #include <net/netns/generic.h>
2117 ++#include <linux/ip.h>
2118 ++#include <linux/ipv6.h>
2119 ++#include <linux/udp.h>
2120 +
2121 + #include "l2tp_core.h"
2122 +
2123 +@@ -41,7 +44,6 @@ struct l2tp_eth {
2124 + struct net_device *dev;
2125 + struct sock *tunnel_sock;
2126 + struct l2tp_session *session;
2127 +- struct list_head list;
2128 + atomic_long_t tx_bytes;
2129 + atomic_long_t tx_packets;
2130 + atomic_long_t tx_dropped;
2131 +@@ -52,20 +54,9 @@ struct l2tp_eth {
2132 +
2133 + /* via l2tp_session_priv() */
2134 + struct l2tp_eth_sess {
2135 +- struct net_device *dev;
2136 ++ struct net_device __rcu *dev;
2137 + };
2138 +
2139 +-/* per-net private data for this module */
2140 +-static unsigned int l2tp_eth_net_id;
2141 +-struct l2tp_eth_net {
2142 +- struct list_head l2tp_eth_dev_list;
2143 +- spinlock_t l2tp_eth_lock;
2144 +-};
2145 +-
2146 +-static inline struct l2tp_eth_net *l2tp_eth_pernet(struct net *net)
2147 +-{
2148 +- return net_generic(net, l2tp_eth_net_id);
2149 +-}
2150 +
2151 + static struct lock_class_key l2tp_eth_tx_busylock;
2152 + static int l2tp_eth_dev_init(struct net_device *dev)
2153 +@@ -82,12 +73,13 @@ static int l2tp_eth_dev_init(struct net_device *dev)
2154 + static void l2tp_eth_dev_uninit(struct net_device *dev)
2155 + {
2156 + struct l2tp_eth *priv = netdev_priv(dev);
2157 +- struct l2tp_eth_net *pn = l2tp_eth_pernet(dev_net(dev));
2158 ++ struct l2tp_eth_sess *spriv;
2159 +
2160 +- spin_lock(&pn->l2tp_eth_lock);
2161 +- list_del_init(&priv->list);
2162 +- spin_unlock(&pn->l2tp_eth_lock);
2163 +- dev_put(dev);
2164 ++ spriv = l2tp_session_priv(priv->session);
2165 ++ RCU_INIT_POINTER(spriv->dev, NULL);
2166 ++ /* No need for synchronize_net() here. We're called by
2167 ++ * unregister_netdev*(), which does the synchronisation for us.
2168 ++ */
2169 + }
2170 +
2171 + static int l2tp_eth_dev_xmit(struct sk_buff *skb, struct net_device *dev)
2172 +@@ -141,8 +133,8 @@ static void l2tp_eth_dev_setup(struct net_device *dev)
2173 + static void l2tp_eth_dev_recv(struct l2tp_session *session, struct sk_buff *skb, int data_len)
2174 + {
2175 + struct l2tp_eth_sess *spriv = l2tp_session_priv(session);
2176 +- struct net_device *dev = spriv->dev;
2177 +- struct l2tp_eth *priv = netdev_priv(dev);
2178 ++ struct net_device *dev;
2179 ++ struct l2tp_eth *priv;
2180 +
2181 + if (session->debug & L2TP_MSG_DATA) {
2182 + unsigned int length;
2183 +@@ -166,16 +158,25 @@ static void l2tp_eth_dev_recv(struct l2tp_session *session, struct sk_buff *skb,
2184 + skb_dst_drop(skb);
2185 + nf_reset(skb);
2186 +
2187 ++ rcu_read_lock();
2188 ++ dev = rcu_dereference(spriv->dev);
2189 ++ if (!dev)
2190 ++ goto error_rcu;
2191 ++
2192 ++ priv = netdev_priv(dev);
2193 + if (dev_forward_skb(dev, skb) == NET_RX_SUCCESS) {
2194 + atomic_long_inc(&priv->rx_packets);
2195 + atomic_long_add(data_len, &priv->rx_bytes);
2196 + } else {
2197 + atomic_long_inc(&priv->rx_errors);
2198 + }
2199 ++ rcu_read_unlock();
2200 ++
2201 + return;
2202 +
2203 ++error_rcu:
2204 ++ rcu_read_unlock();
2205 + error:
2206 +- atomic_long_inc(&priv->rx_errors);
2207 + kfree_skb(skb);
2208 + }
2209 +
2210 +@@ -186,11 +187,15 @@ static void l2tp_eth_delete(struct l2tp_session *session)
2211 +
2212 + if (session) {
2213 + spriv = l2tp_session_priv(session);
2214 +- dev = spriv->dev;
2215 ++
2216 ++ rtnl_lock();
2217 ++ dev = rtnl_dereference(spriv->dev);
2218 + if (dev) {
2219 +- unregister_netdev(dev);
2220 +- spriv->dev = NULL;
2221 ++ unregister_netdevice(dev);
2222 ++ rtnl_unlock();
2223 + module_put(THIS_MODULE);
2224 ++ } else {
2225 ++ rtnl_unlock();
2226 + }
2227 + }
2228 + }
2229 +@@ -200,35 +205,89 @@ static void l2tp_eth_show(struct seq_file *m, void *arg)
2230 + {
2231 + struct l2tp_session *session = arg;
2232 + struct l2tp_eth_sess *spriv = l2tp_session_priv(session);
2233 +- struct net_device *dev = spriv->dev;
2234 ++ struct net_device *dev;
2235 ++
2236 ++ rcu_read_lock();
2237 ++ dev = rcu_dereference(spriv->dev);
2238 ++ if (!dev) {
2239 ++ rcu_read_unlock();
2240 ++ return;
2241 ++ }
2242 ++ dev_hold(dev);
2243 ++ rcu_read_unlock();
2244 +
2245 + seq_printf(m, " interface %s\n", dev->name);
2246 ++
2247 ++ dev_put(dev);
2248 + }
2249 + #endif
2250 +
2251 +-static int l2tp_eth_create(struct net *net, u32 tunnel_id, u32 session_id, u32 peer_session_id, struct l2tp_session_cfg *cfg)
2252 ++static void l2tp_eth_adjust_mtu(struct l2tp_tunnel *tunnel,
2253 ++ struct l2tp_session *session,
2254 ++ struct net_device *dev)
2255 ++{
2256 ++ unsigned int overhead = 0;
2257 ++ struct dst_entry *dst;
2258 ++ u32 l3_overhead = 0;
2259 ++
2260 ++ /* if the encap is UDP, account for UDP header size */
2261 ++ if (tunnel->encap == L2TP_ENCAPTYPE_UDP) {
2262 ++ overhead += sizeof(struct udphdr);
2263 ++ dev->needed_headroom += sizeof(struct udphdr);
2264 ++ }
2265 ++ if (session->mtu != 0) {
2266 ++ dev->mtu = session->mtu;
2267 ++ dev->needed_headroom += session->hdr_len;
2268 ++ return;
2269 ++ }
2270 ++ lock_sock(tunnel->sock);
2271 ++ l3_overhead = kernel_sock_ip_overhead(tunnel->sock);
2272 ++ release_sock(tunnel->sock);
2273 ++ if (l3_overhead == 0) {
2274 ++ /* L3 Overhead couldn't be identified, this could be
2275 ++ * because tunnel->sock was NULL or the socket's
2276 ++ * address family was not IPv4 or IPv6,
2277 ++ * dev mtu stays at 1500.
2278 ++ */
2279 ++ return;
2280 ++ }
2281 ++ /* Adjust MTU, factor overhead - underlay L3, overlay L2 hdr
2282 ++ * UDP overhead, if any, was already factored in above.
2283 ++ */
2284 ++ overhead += session->hdr_len + ETH_HLEN + l3_overhead;
2285 ++
2286 ++ /* If PMTU discovery was enabled, use discovered MTU on L2TP device */
2287 ++ dst = sk_dst_get(tunnel->sock);
2288 ++ if (dst) {
2289 ++ /* dst_mtu will use PMTU if found, else fallback to intf MTU */
2290 ++ u32 pmtu = dst_mtu(dst);
2291 ++
2292 ++ if (pmtu != 0)
2293 ++ dev->mtu = pmtu;
2294 ++ dst_release(dst);
2295 ++ }
2296 ++ session->mtu = dev->mtu - overhead;
2297 ++ dev->mtu = session->mtu;
2298 ++ dev->needed_headroom += session->hdr_len;
2299 ++}
2300 ++
2301 ++static int l2tp_eth_create(struct net *net, struct l2tp_tunnel *tunnel,
2302 ++ u32 session_id, u32 peer_session_id,
2303 ++ struct l2tp_session_cfg *cfg)
2304 + {
2305 + struct net_device *dev;
2306 + char name[IFNAMSIZ];
2307 +- struct l2tp_tunnel *tunnel;
2308 + struct l2tp_session *session;
2309 + struct l2tp_eth *priv;
2310 + struct l2tp_eth_sess *spriv;
2311 + int rc;
2312 +- struct l2tp_eth_net *pn;
2313 +-
2314 +- tunnel = l2tp_tunnel_find(net, tunnel_id);
2315 +- if (!tunnel) {
2316 +- rc = -ENODEV;
2317 +- goto out;
2318 +- }
2319 +
2320 + if (cfg->ifname) {
2321 + dev = dev_get_by_name(net, cfg->ifname);
2322 + if (dev) {
2323 + dev_put(dev);
2324 + rc = -EEXIST;
2325 +- goto out;
2326 ++ goto err;
2327 + }
2328 + strlcpy(name, cfg->ifname, IFNAMSIZ);
2329 + } else
2330 +@@ -238,26 +297,22 @@ static int l2tp_eth_create(struct net *net, u32 tunnel_id, u32 session_id, u32 p
2331 + peer_session_id, cfg);
2332 + if (IS_ERR(session)) {
2333 + rc = PTR_ERR(session);
2334 +- goto out;
2335 ++ goto err;
2336 + }
2337 +
2338 + dev = alloc_netdev(sizeof(*priv), name, NET_NAME_UNKNOWN,
2339 + l2tp_eth_dev_setup);
2340 + if (!dev) {
2341 + rc = -ENOMEM;
2342 +- goto out_del_session;
2343 ++ goto err_sess;
2344 + }
2345 +
2346 + dev_net_set(dev, net);
2347 +- if (session->mtu == 0)
2348 +- session->mtu = dev->mtu - session->hdr_len;
2349 +- dev->mtu = session->mtu;
2350 +- dev->needed_headroom += session->hdr_len;
2351 ++ l2tp_eth_adjust_mtu(tunnel, session, dev);
2352 +
2353 + priv = netdev_priv(dev);
2354 + priv->dev = dev;
2355 + priv->session = session;
2356 +- INIT_LIST_HEAD(&priv->list);
2357 +
2358 + priv->tunnel_sock = tunnel->sock;
2359 + session->recv_skb = l2tp_eth_dev_recv;
2360 +@@ -267,48 +322,50 @@ static int l2tp_eth_create(struct net *net, u32 tunnel_id, u32 session_id, u32 p
2361 + #endif
2362 +
2363 + spriv = l2tp_session_priv(session);
2364 +- spriv->dev = dev;
2365 +
2366 +- rc = register_netdev(dev);
2367 +- if (rc < 0)
2368 +- goto out_del_dev;
2369 ++ l2tp_session_inc_refcount(session);
2370 +
2371 +- __module_get(THIS_MODULE);
2372 +- /* Must be done after register_netdev() */
2373 +- strlcpy(session->ifname, dev->name, IFNAMSIZ);
2374 ++ rtnl_lock();
2375 +
2376 +- dev_hold(dev);
2377 +- pn = l2tp_eth_pernet(dev_net(dev));
2378 +- spin_lock(&pn->l2tp_eth_lock);
2379 +- list_add(&priv->list, &pn->l2tp_eth_dev_list);
2380 +- spin_unlock(&pn->l2tp_eth_lock);
2381 ++ /* Register both device and session while holding the rtnl lock. This
2382 ++ * ensures that l2tp_eth_delete() will see that there's a device to
2383 ++ * unregister, even if it happened to run before we assign spriv->dev.
2384 ++ */
2385 ++ rc = l2tp_session_register(session, tunnel);
2386 ++ if (rc < 0) {
2387 ++ rtnl_unlock();
2388 ++ goto err_sess_dev;
2389 ++ }
2390 +
2391 +- return 0;
2392 ++ rc = register_netdevice(dev);
2393 ++ if (rc < 0) {
2394 ++ rtnl_unlock();
2395 ++ l2tp_session_delete(session);
2396 ++ l2tp_session_dec_refcount(session);
2397 ++ free_netdev(dev);
2398 +
2399 +-out_del_dev:
2400 +- free_netdev(dev);
2401 +- spriv->dev = NULL;
2402 +-out_del_session:
2403 +- l2tp_session_delete(session);
2404 +-out:
2405 +- return rc;
2406 +-}
2407 ++ return rc;
2408 ++ }
2409 +
2410 +-static __net_init int l2tp_eth_init_net(struct net *net)
2411 +-{
2412 +- struct l2tp_eth_net *pn = net_generic(net, l2tp_eth_net_id);
2413 ++ strlcpy(session->ifname, dev->name, IFNAMSIZ);
2414 ++ rcu_assign_pointer(spriv->dev, dev);
2415 ++
2416 ++ rtnl_unlock();
2417 +
2418 +- INIT_LIST_HEAD(&pn->l2tp_eth_dev_list);
2419 +- spin_lock_init(&pn->l2tp_eth_lock);
2420 ++ l2tp_session_dec_refcount(session);
2421 ++
2422 ++ __module_get(THIS_MODULE);
2423 +
2424 + return 0;
2425 +-}
2426 +
2427 +-static struct pernet_operations l2tp_eth_net_ops = {
2428 +- .init = l2tp_eth_init_net,
2429 +- .id = &l2tp_eth_net_id,
2430 +- .size = sizeof(struct l2tp_eth_net),
2431 +-};
2432 ++err_sess_dev:
2433 ++ l2tp_session_dec_refcount(session);
2434 ++ free_netdev(dev);
2435 ++err_sess:
2436 ++ kfree(session);
2437 ++err:
2438 ++ return rc;
2439 ++}
2440 +
2441 +
2442 + static const struct l2tp_nl_cmd_ops l2tp_eth_nl_cmd_ops = {
2443 +@@ -323,25 +380,18 @@ static int __init l2tp_eth_init(void)
2444 +
2445 + err = l2tp_nl_register_ops(L2TP_PWTYPE_ETH, &l2tp_eth_nl_cmd_ops);
2446 + if (err)
2447 +- goto out;
2448 +-
2449 +- err = register_pernet_device(&l2tp_eth_net_ops);
2450 +- if (err)
2451 +- goto out_unreg;
2452 ++ goto err;
2453 +
2454 + pr_info("L2TP ethernet pseudowire support (L2TPv3)\n");
2455 +
2456 + return 0;
2457 +
2458 +-out_unreg:
2459 +- l2tp_nl_unregister_ops(L2TP_PWTYPE_ETH);
2460 +-out:
2461 ++err:
2462 + return err;
2463 + }
2464 +
2465 + static void __exit l2tp_eth_exit(void)
2466 + {
2467 +- unregister_pernet_device(&l2tp_eth_net_ops);
2468 + l2tp_nl_unregister_ops(L2TP_PWTYPE_ETH);
2469 + }
2470 +
2471 +diff --git a/net/l2tp/l2tp_ip.c b/net/l2tp/l2tp_ip.c
2472 +index 58f87bdd12c7..fd7363f8405a 100644
2473 +--- a/net/l2tp/l2tp_ip.c
2474 ++++ b/net/l2tp/l2tp_ip.c
2475 +@@ -122,6 +122,7 @@ static int l2tp_ip_recv(struct sk_buff *skb)
2476 + unsigned char *ptr, *optr;
2477 + struct l2tp_session *session;
2478 + struct l2tp_tunnel *tunnel = NULL;
2479 ++ struct iphdr *iph;
2480 + int length;
2481 +
2482 + if (!pskb_may_pull(skb, 4))
2483 +@@ -180,23 +181,16 @@ pass_up:
2484 + goto discard;
2485 +
2486 + tunnel_id = ntohl(*(__be32 *) &skb->data[4]);
2487 +- tunnel = l2tp_tunnel_find(net, tunnel_id);
2488 +- if (tunnel) {
2489 +- sk = tunnel->sock;
2490 +- sock_hold(sk);
2491 +- } else {
2492 +- struct iphdr *iph = (struct iphdr *) skb_network_header(skb);
2493 +-
2494 +- read_lock_bh(&l2tp_ip_lock);
2495 +- sk = __l2tp_ip_bind_lookup(net, iph->daddr, 0, tunnel_id);
2496 +- if (!sk) {
2497 +- read_unlock_bh(&l2tp_ip_lock);
2498 +- goto discard;
2499 +- }
2500 ++ iph = (struct iphdr *)skb_network_header(skb);
2501 +
2502 +- sock_hold(sk);
2503 ++ read_lock_bh(&l2tp_ip_lock);
2504 ++ sk = __l2tp_ip_bind_lookup(net, iph->daddr, 0, tunnel_id);
2505 ++ if (!sk) {
2506 + read_unlock_bh(&l2tp_ip_lock);
2507 ++ goto discard;
2508 + }
2509 ++ sock_hold(sk);
2510 ++ read_unlock_bh(&l2tp_ip_lock);
2511 +
2512 + if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
2513 + goto discard_put;
2514 +@@ -269,15 +263,9 @@ static int l2tp_ip_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
2515 + if (addr->l2tp_family != AF_INET)
2516 + return -EINVAL;
2517 +
2518 +- ret = -EADDRINUSE;
2519 +- read_lock_bh(&l2tp_ip_lock);
2520 +- if (__l2tp_ip_bind_lookup(net, addr->l2tp_addr.s_addr,
2521 +- sk->sk_bound_dev_if, addr->l2tp_conn_id))
2522 +- goto out_in_use;
2523 +-
2524 +- read_unlock_bh(&l2tp_ip_lock);
2525 +-
2526 + lock_sock(sk);
2527 ++
2528 ++ ret = -EINVAL;
2529 + if (!sock_flag(sk, SOCK_ZAPPED))
2530 + goto out;
2531 +
2532 +@@ -294,25 +282,28 @@ static int l2tp_ip_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
2533 + inet->inet_rcv_saddr = inet->inet_saddr = addr->l2tp_addr.s_addr;
2534 + if (chk_addr_ret == RTN_MULTICAST || chk_addr_ret == RTN_BROADCAST)
2535 + inet->inet_saddr = 0; /* Use device */
2536 +- sk_dst_reset(sk);
2537 +
2538 ++ write_lock_bh(&l2tp_ip_lock);
2539 ++ if (__l2tp_ip_bind_lookup(net, addr->l2tp_addr.s_addr,
2540 ++ sk->sk_bound_dev_if, addr->l2tp_conn_id)) {
2541 ++ write_unlock_bh(&l2tp_ip_lock);
2542 ++ ret = -EADDRINUSE;
2543 ++ goto out;
2544 ++ }
2545 ++
2546 ++ sk_dst_reset(sk);
2547 + l2tp_ip_sk(sk)->conn_id = addr->l2tp_conn_id;
2548 +
2549 +- write_lock_bh(&l2tp_ip_lock);
2550 + sk_add_bind_node(sk, &l2tp_ip_bind_table);
2551 + sk_del_node_init(sk);
2552 + write_unlock_bh(&l2tp_ip_lock);
2553 ++
2554 + ret = 0;
2555 + sock_reset_flag(sk, SOCK_ZAPPED);
2556 +
2557 + out:
2558 + release_sock(sk);
2559 +
2560 +- return ret;
2561 +-
2562 +-out_in_use:
2563 +- read_unlock_bh(&l2tp_ip_lock);
2564 +-
2565 + return ret;
2566 + }
2567 +
2568 +@@ -321,21 +312,24 @@ static int l2tp_ip_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len
2569 + struct sockaddr_l2tpip *lsa = (struct sockaddr_l2tpip *) uaddr;
2570 + int rc;
2571 +
2572 +- if (sock_flag(sk, SOCK_ZAPPED)) /* Must bind first - autobinding does not work */
2573 +- return -EINVAL;
2574 +-
2575 + if (addr_len < sizeof(*lsa))
2576 + return -EINVAL;
2577 +
2578 + if (ipv4_is_multicast(lsa->l2tp_addr.s_addr))
2579 + return -EINVAL;
2580 +
2581 +- rc = ip4_datagram_connect(sk, uaddr, addr_len);
2582 +- if (rc < 0)
2583 +- return rc;
2584 +-
2585 + lock_sock(sk);
2586 +
2587 ++ /* Must bind first - autobinding does not work */
2588 ++ if (sock_flag(sk, SOCK_ZAPPED)) {
2589 ++ rc = -EINVAL;
2590 ++ goto out_sk;
2591 ++ }
2592 ++
2593 ++ rc = __ip4_datagram_connect(sk, uaddr, addr_len);
2594 ++ if (rc < 0)
2595 ++ goto out_sk;
2596 ++
2597 + l2tp_ip_sk(sk)->peer_conn_id = lsa->l2tp_conn_id;
2598 +
2599 + write_lock_bh(&l2tp_ip_lock);
2600 +@@ -343,7 +337,9 @@ static int l2tp_ip_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len
2601 + sk_add_bind_node(sk, &l2tp_ip_bind_table);
2602 + write_unlock_bh(&l2tp_ip_lock);
2603 +
2604 ++out_sk:
2605 + release_sock(sk);
2606 ++
2607 + return rc;
2608 + }
2609 +
2610 +diff --git a/net/l2tp/l2tp_ip6.c b/net/l2tp/l2tp_ip6.c
2611 +index 2b5230ef8536..5bb5337e74fc 100644
2612 +--- a/net/l2tp/l2tp_ip6.c
2613 ++++ b/net/l2tp/l2tp_ip6.c
2614 +@@ -134,6 +134,7 @@ static int l2tp_ip6_recv(struct sk_buff *skb)
2615 + unsigned char *ptr, *optr;
2616 + struct l2tp_session *session;
2617 + struct l2tp_tunnel *tunnel = NULL;
2618 ++ struct ipv6hdr *iph;
2619 + int length;
2620 +
2621 + if (!pskb_may_pull(skb, 4))
2622 +@@ -193,24 +194,16 @@ pass_up:
2623 + goto discard;
2624 +
2625 + tunnel_id = ntohl(*(__be32 *) &skb->data[4]);
2626 +- tunnel = l2tp_tunnel_find(net, tunnel_id);
2627 +- if (tunnel) {
2628 +- sk = tunnel->sock;
2629 +- sock_hold(sk);
2630 +- } else {
2631 +- struct ipv6hdr *iph = ipv6_hdr(skb);
2632 +-
2633 +- read_lock_bh(&l2tp_ip6_lock);
2634 +- sk = __l2tp_ip6_bind_lookup(net, &iph->daddr,
2635 +- 0, tunnel_id);
2636 +- if (!sk) {
2637 +- read_unlock_bh(&l2tp_ip6_lock);
2638 +- goto discard;
2639 +- }
2640 ++ iph = ipv6_hdr(skb);
2641 +
2642 +- sock_hold(sk);
2643 ++ read_lock_bh(&l2tp_ip6_lock);
2644 ++ sk = __l2tp_ip6_bind_lookup(net, &iph->daddr, 0, tunnel_id);
2645 ++ if (!sk) {
2646 + read_unlock_bh(&l2tp_ip6_lock);
2647 ++ goto discard;
2648 + }
2649 ++ sock_hold(sk);
2650 ++ read_unlock_bh(&l2tp_ip6_lock);
2651 +
2652 + if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
2653 + goto discard_put;
2654 +@@ -278,6 +271,7 @@ static int l2tp_ip6_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
2655 + struct sockaddr_l2tpip6 *addr = (struct sockaddr_l2tpip6 *) uaddr;
2656 + struct net *net = sock_net(sk);
2657 + __be32 v4addr = 0;
2658 ++ int bound_dev_if;
2659 + int addr_type;
2660 + int err;
2661 +
2662 +@@ -296,13 +290,6 @@ static int l2tp_ip6_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
2663 + if (addr_type & IPV6_ADDR_MULTICAST)
2664 + return -EADDRNOTAVAIL;
2665 +
2666 +- err = -EADDRINUSE;
2667 +- read_lock_bh(&l2tp_ip6_lock);
2668 +- if (__l2tp_ip6_bind_lookup(net, &addr->l2tp_addr,
2669 +- sk->sk_bound_dev_if, addr->l2tp_conn_id))
2670 +- goto out_in_use;
2671 +- read_unlock_bh(&l2tp_ip6_lock);
2672 +-
2673 + lock_sock(sk);
2674 +
2675 + err = -EINVAL;
2676 +@@ -312,28 +299,25 @@ static int l2tp_ip6_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
2677 + if (sk->sk_state != TCP_CLOSE)
2678 + goto out_unlock;
2679 +
2680 ++ bound_dev_if = sk->sk_bound_dev_if;
2681 ++
2682 + /* Check if the address belongs to the host. */
2683 + rcu_read_lock();
2684 + if (addr_type != IPV6_ADDR_ANY) {
2685 + struct net_device *dev = NULL;
2686 +
2687 + if (addr_type & IPV6_ADDR_LINKLOCAL) {
2688 +- if (addr_len >= sizeof(struct sockaddr_in6) &&
2689 +- addr->l2tp_scope_id) {
2690 +- /* Override any existing binding, if another
2691 +- * one is supplied by user.
2692 +- */
2693 +- sk->sk_bound_dev_if = addr->l2tp_scope_id;
2694 +- }
2695 ++ if (addr->l2tp_scope_id)
2696 ++ bound_dev_if = addr->l2tp_scope_id;
2697 +
2698 + /* Binding to link-local address requires an
2699 +- interface */
2700 +- if (!sk->sk_bound_dev_if)
2701 ++ * interface.
2702 ++ */
2703 ++ if (!bound_dev_if)
2704 + goto out_unlock_rcu;
2705 +
2706 + err = -ENODEV;
2707 +- dev = dev_get_by_index_rcu(sock_net(sk),
2708 +- sk->sk_bound_dev_if);
2709 ++ dev = dev_get_by_index_rcu(sock_net(sk), bound_dev_if);
2710 + if (!dev)
2711 + goto out_unlock_rcu;
2712 + }
2713 +@@ -348,13 +332,22 @@ static int l2tp_ip6_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
2714 + }
2715 + rcu_read_unlock();
2716 +
2717 +- inet->inet_rcv_saddr = inet->inet_saddr = v4addr;
2718 ++ write_lock_bh(&l2tp_ip6_lock);
2719 ++ if (__l2tp_ip6_bind_lookup(net, &addr->l2tp_addr, bound_dev_if,
2720 ++ addr->l2tp_conn_id)) {
2721 ++ write_unlock_bh(&l2tp_ip6_lock);
2722 ++ err = -EADDRINUSE;
2723 ++ goto out_unlock;
2724 ++ }
2725 ++
2726 ++ inet->inet_saddr = v4addr;
2727 ++ inet->inet_rcv_saddr = v4addr;
2728 ++ sk->sk_bound_dev_if = bound_dev_if;
2729 + sk->sk_v6_rcv_saddr = addr->l2tp_addr;
2730 + np->saddr = addr->l2tp_addr;
2731 +
2732 + l2tp_ip6_sk(sk)->conn_id = addr->l2tp_conn_id;
2733 +
2734 +- write_lock_bh(&l2tp_ip6_lock);
2735 + sk_add_bind_node(sk, &l2tp_ip6_bind_table);
2736 + sk_del_node_init(sk);
2737 + write_unlock_bh(&l2tp_ip6_lock);
2738 +@@ -367,10 +360,7 @@ out_unlock_rcu:
2739 + rcu_read_unlock();
2740 + out_unlock:
2741 + release_sock(sk);
2742 +- return err;
2743 +
2744 +-out_in_use:
2745 +- read_unlock_bh(&l2tp_ip6_lock);
2746 + return err;
2747 + }
2748 +
2749 +@@ -383,9 +373,6 @@ static int l2tp_ip6_connect(struct sock *sk, struct sockaddr *uaddr,
2750 + int addr_type;
2751 + int rc;
2752 +
2753 +- if (sock_flag(sk, SOCK_ZAPPED)) /* Must bind first - autobinding does not work */
2754 +- return -EINVAL;
2755 +-
2756 + if (addr_len < sizeof(*lsa))
2757 + return -EINVAL;
2758 +
2759 +@@ -402,10 +389,18 @@ static int l2tp_ip6_connect(struct sock *sk, struct sockaddr *uaddr,
2760 + return -EINVAL;
2761 + }
2762 +
2763 +- rc = ip6_datagram_connect(sk, uaddr, addr_len);
2764 +-
2765 + lock_sock(sk);
2766 +
2767 ++ /* Must bind first - autobinding does not work */
2768 ++ if (sock_flag(sk, SOCK_ZAPPED)) {
2769 ++ rc = -EINVAL;
2770 ++ goto out_sk;
2771 ++ }
2772 ++
2773 ++ rc = __ip6_datagram_connect(sk, uaddr, addr_len);
2774 ++ if (rc < 0)
2775 ++ goto out_sk;
2776 ++
2777 + l2tp_ip6_sk(sk)->peer_conn_id = lsa->l2tp_conn_id;
2778 +
2779 + write_lock_bh(&l2tp_ip6_lock);
2780 +@@ -413,6 +408,7 @@ static int l2tp_ip6_connect(struct sock *sk, struct sockaddr *uaddr,
2781 + sk_add_bind_node(sk, &l2tp_ip6_bind_table);
2782 + write_unlock_bh(&l2tp_ip6_lock);
2783 +
2784 ++out_sk:
2785 + release_sock(sk);
2786 +
2787 + return rc;
2788 +diff --git a/net/l2tp/l2tp_netlink.c b/net/l2tp/l2tp_netlink.c
2789 +index fb3248ff8b48..d3a84a181348 100644
2790 +--- a/net/l2tp/l2tp_netlink.c
2791 ++++ b/net/l2tp/l2tp_netlink.c
2792 +@@ -55,7 +55,8 @@ static int l2tp_nl_session_send(struct sk_buff *skb, u32 portid, u32 seq,
2793 + /* Accessed under genl lock */
2794 + static const struct l2tp_nl_cmd_ops *l2tp_nl_cmd_ops[__L2TP_PWTYPE_MAX];
2795 +
2796 +-static struct l2tp_session *l2tp_nl_session_find(struct genl_info *info)
2797 ++static struct l2tp_session *l2tp_nl_session_get(struct genl_info *info,
2798 ++ bool do_ref)
2799 + {
2800 + u32 tunnel_id;
2801 + u32 session_id;
2802 +@@ -66,14 +67,17 @@ static struct l2tp_session *l2tp_nl_session_find(struct genl_info *info)
2803 +
2804 + if (info->attrs[L2TP_ATTR_IFNAME]) {
2805 + ifname = nla_data(info->attrs[L2TP_ATTR_IFNAME]);
2806 +- session = l2tp_session_find_by_ifname(net, ifname);
2807 ++ session = l2tp_session_get_by_ifname(net, ifname, do_ref);
2808 + } else if ((info->attrs[L2TP_ATTR_SESSION_ID]) &&
2809 + (info->attrs[L2TP_ATTR_CONN_ID])) {
2810 + tunnel_id = nla_get_u32(info->attrs[L2TP_ATTR_CONN_ID]);
2811 + session_id = nla_get_u32(info->attrs[L2TP_ATTR_SESSION_ID]);
2812 +- tunnel = l2tp_tunnel_find(net, tunnel_id);
2813 +- if (tunnel)
2814 +- session = l2tp_session_find(net, tunnel, session_id);
2815 ++ tunnel = l2tp_tunnel_get(net, tunnel_id);
2816 ++ if (tunnel) {
2817 ++ session = l2tp_session_get(net, tunnel, session_id,
2818 ++ do_ref);
2819 ++ l2tp_tunnel_dec_refcount(tunnel);
2820 ++ }
2821 + }
2822 +
2823 + return session;
2824 +@@ -276,8 +280,8 @@ static int l2tp_nl_cmd_tunnel_delete(struct sk_buff *skb, struct genl_info *info
2825 + }
2826 + tunnel_id = nla_get_u32(info->attrs[L2TP_ATTR_CONN_ID]);
2827 +
2828 +- tunnel = l2tp_tunnel_find(net, tunnel_id);
2829 +- if (tunnel == NULL) {
2830 ++ tunnel = l2tp_tunnel_get(net, tunnel_id);
2831 ++ if (!tunnel) {
2832 + ret = -ENODEV;
2833 + goto out;
2834 + }
2835 +@@ -287,6 +291,8 @@ static int l2tp_nl_cmd_tunnel_delete(struct sk_buff *skb, struct genl_info *info
2836 +
2837 + l2tp_tunnel_delete(tunnel);
2838 +
2839 ++ l2tp_tunnel_dec_refcount(tunnel);
2840 ++
2841 + out:
2842 + return ret;
2843 + }
2844 +@@ -304,8 +310,8 @@ static int l2tp_nl_cmd_tunnel_modify(struct sk_buff *skb, struct genl_info *info
2845 + }
2846 + tunnel_id = nla_get_u32(info->attrs[L2TP_ATTR_CONN_ID]);
2847 +
2848 +- tunnel = l2tp_tunnel_find(net, tunnel_id);
2849 +- if (tunnel == NULL) {
2850 ++ tunnel = l2tp_tunnel_get(net, tunnel_id);
2851 ++ if (!tunnel) {
2852 + ret = -ENODEV;
2853 + goto out;
2854 + }
2855 +@@ -316,6 +322,8 @@ static int l2tp_nl_cmd_tunnel_modify(struct sk_buff *skb, struct genl_info *info
2856 + ret = l2tp_tunnel_notify(&l2tp_nl_family, info,
2857 + tunnel, L2TP_CMD_TUNNEL_MODIFY);
2858 +
2859 ++ l2tp_tunnel_dec_refcount(tunnel);
2860 ++
2861 + out:
2862 + return ret;
2863 + }
2864 +@@ -420,34 +428,37 @@ static int l2tp_nl_cmd_tunnel_get(struct sk_buff *skb, struct genl_info *info)
2865 +
2866 + if (!info->attrs[L2TP_ATTR_CONN_ID]) {
2867 + ret = -EINVAL;
2868 +- goto out;
2869 ++ goto err;
2870 + }
2871 +
2872 + tunnel_id = nla_get_u32(info->attrs[L2TP_ATTR_CONN_ID]);
2873 +
2874 +- tunnel = l2tp_tunnel_find(net, tunnel_id);
2875 +- if (tunnel == NULL) {
2876 +- ret = -ENODEV;
2877 +- goto out;
2878 +- }
2879 +-
2880 + msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
2881 + if (!msg) {
2882 + ret = -ENOMEM;
2883 +- goto out;
2884 ++ goto err;
2885 ++ }
2886 ++
2887 ++ tunnel = l2tp_tunnel_get(net, tunnel_id);
2888 ++ if (!tunnel) {
2889 ++ ret = -ENODEV;
2890 ++ goto err_nlmsg;
2891 + }
2892 +
2893 + ret = l2tp_nl_tunnel_send(msg, info->snd_portid, info->snd_seq,
2894 + NLM_F_ACK, tunnel, L2TP_CMD_TUNNEL_GET);
2895 + if (ret < 0)
2896 +- goto err_out;
2897 ++ goto err_nlmsg_tunnel;
2898 ++
2899 ++ l2tp_tunnel_dec_refcount(tunnel);
2900 +
2901 + return genlmsg_unicast(net, msg, info->snd_portid);
2902 +
2903 +-err_out:
2904 ++err_nlmsg_tunnel:
2905 ++ l2tp_tunnel_dec_refcount(tunnel);
2906 ++err_nlmsg:
2907 + nlmsg_free(msg);
2908 +-
2909 +-out:
2910 ++err:
2911 + return ret;
2912 + }
2913 +
2914 +@@ -491,8 +502,9 @@ static int l2tp_nl_cmd_session_create(struct sk_buff *skb, struct genl_info *inf
2915 + ret = -EINVAL;
2916 + goto out;
2917 + }
2918 ++
2919 + tunnel_id = nla_get_u32(info->attrs[L2TP_ATTR_CONN_ID]);
2920 +- tunnel = l2tp_tunnel_find(net, tunnel_id);
2921 ++ tunnel = l2tp_tunnel_get(net, tunnel_id);
2922 + if (!tunnel) {
2923 + ret = -ENODEV;
2924 + goto out;
2925 +@@ -500,29 +512,24 @@ static int l2tp_nl_cmd_session_create(struct sk_buff *skb, struct genl_info *inf
2926 +
2927 + if (!info->attrs[L2TP_ATTR_SESSION_ID]) {
2928 + ret = -EINVAL;
2929 +- goto out;
2930 ++ goto out_tunnel;
2931 + }
2932 + session_id = nla_get_u32(info->attrs[L2TP_ATTR_SESSION_ID]);
2933 +- session = l2tp_session_find(net, tunnel, session_id);
2934 +- if (session) {
2935 +- ret = -EEXIST;
2936 +- goto out;
2937 +- }
2938 +
2939 + if (!info->attrs[L2TP_ATTR_PEER_SESSION_ID]) {
2940 + ret = -EINVAL;
2941 +- goto out;
2942 ++ goto out_tunnel;
2943 + }
2944 + peer_session_id = nla_get_u32(info->attrs[L2TP_ATTR_PEER_SESSION_ID]);
2945 +
2946 + if (!info->attrs[L2TP_ATTR_PW_TYPE]) {
2947 + ret = -EINVAL;
2948 +- goto out;
2949 ++ goto out_tunnel;
2950 + }
2951 + cfg.pw_type = nla_get_u16(info->attrs[L2TP_ATTR_PW_TYPE]);
2952 + if (cfg.pw_type >= __L2TP_PWTYPE_MAX) {
2953 + ret = -EINVAL;
2954 +- goto out;
2955 ++ goto out_tunnel;
2956 + }
2957 +
2958 + if (tunnel->version > 2) {
2959 +@@ -544,7 +551,7 @@ static int l2tp_nl_cmd_session_create(struct sk_buff *skb, struct genl_info *inf
2960 + u16 len = nla_len(info->attrs[L2TP_ATTR_COOKIE]);
2961 + if (len > 8) {
2962 + ret = -EINVAL;
2963 +- goto out;
2964 ++ goto out_tunnel;
2965 + }
2966 + cfg.cookie_len = len;
2967 + memcpy(&cfg.cookie[0], nla_data(info->attrs[L2TP_ATTR_COOKIE]), len);
2968 +@@ -553,7 +560,7 @@ static int l2tp_nl_cmd_session_create(struct sk_buff *skb, struct genl_info *inf
2969 + u16 len = nla_len(info->attrs[L2TP_ATTR_PEER_COOKIE]);
2970 + if (len > 8) {
2971 + ret = -EINVAL;
2972 +- goto out;
2973 ++ goto out_tunnel;
2974 + }
2975 + cfg.peer_cookie_len = len;
2976 + memcpy(&cfg.peer_cookie[0], nla_data(info->attrs[L2TP_ATTR_PEER_COOKIE]), len);
2977 +@@ -596,7 +603,7 @@ static int l2tp_nl_cmd_session_create(struct sk_buff *skb, struct genl_info *inf
2978 + if ((l2tp_nl_cmd_ops[cfg.pw_type] == NULL) ||
2979 + (l2tp_nl_cmd_ops[cfg.pw_type]->session_create == NULL)) {
2980 + ret = -EPROTONOSUPPORT;
2981 +- goto out;
2982 ++ goto out_tunnel;
2983 + }
2984 +
2985 + /* Check that pseudowire-specific params are present */
2986 +@@ -606,7 +613,7 @@ static int l2tp_nl_cmd_session_create(struct sk_buff *skb, struct genl_info *inf
2987 + case L2TP_PWTYPE_ETH_VLAN:
2988 + if (!info->attrs[L2TP_ATTR_VLAN_ID]) {
2989 + ret = -EINVAL;
2990 +- goto out;
2991 ++ goto out_tunnel;
2992 + }
2993 + break;
2994 + case L2TP_PWTYPE_ETH:
2995 +@@ -620,18 +627,22 @@ static int l2tp_nl_cmd_session_create(struct sk_buff *skb, struct genl_info *inf
2996 + break;
2997 + }
2998 +
2999 +- ret = -EPROTONOSUPPORT;
3000 +- if (l2tp_nl_cmd_ops[cfg.pw_type]->session_create)
3001 +- ret = (*l2tp_nl_cmd_ops[cfg.pw_type]->session_create)(net, tunnel_id,
3002 +- session_id, peer_session_id, &cfg);
3003 ++ ret = l2tp_nl_cmd_ops[cfg.pw_type]->session_create(net, tunnel,
3004 ++ session_id,
3005 ++ peer_session_id,
3006 ++ &cfg);
3007 +
3008 + if (ret >= 0) {
3009 +- session = l2tp_session_find(net, tunnel, session_id);
3010 +- if (session)
3011 ++ session = l2tp_session_get(net, tunnel, session_id, false);
3012 ++ if (session) {
3013 + ret = l2tp_session_notify(&l2tp_nl_family, info, session,
3014 + L2TP_CMD_SESSION_CREATE);
3015 ++ l2tp_session_dec_refcount(session);
3016 ++ }
3017 + }
3018 +
3019 ++out_tunnel:
3020 ++ l2tp_tunnel_dec_refcount(tunnel);
3021 + out:
3022 + return ret;
3023 + }
3024 +@@ -642,7 +653,7 @@ static int l2tp_nl_cmd_session_delete(struct sk_buff *skb, struct genl_info *inf
3025 + struct l2tp_session *session;
3026 + u16 pw_type;
3027 +
3028 +- session = l2tp_nl_session_find(info);
3029 ++ session = l2tp_nl_session_get(info, true);
3030 + if (session == NULL) {
3031 + ret = -ENODEV;
3032 + goto out;
3033 +@@ -656,6 +667,10 @@ static int l2tp_nl_cmd_session_delete(struct sk_buff *skb, struct genl_info *inf
3034 + if (l2tp_nl_cmd_ops[pw_type] && l2tp_nl_cmd_ops[pw_type]->session_delete)
3035 + ret = (*l2tp_nl_cmd_ops[pw_type]->session_delete)(session);
3036 +
3037 ++ if (session->deref)
3038 ++ session->deref(session);
3039 ++ l2tp_session_dec_refcount(session);
3040 ++
3041 + out:
3042 + return ret;
3043 + }
3044 +@@ -665,7 +680,7 @@ static int l2tp_nl_cmd_session_modify(struct sk_buff *skb, struct genl_info *inf
3045 + int ret = 0;
3046 + struct l2tp_session *session;
3047 +
3048 +- session = l2tp_nl_session_find(info);
3049 ++ session = l2tp_nl_session_get(info, false);
3050 + if (session == NULL) {
3051 + ret = -ENODEV;
3052 + goto out;
3053 +@@ -700,6 +715,8 @@ static int l2tp_nl_cmd_session_modify(struct sk_buff *skb, struct genl_info *inf
3054 + ret = l2tp_session_notify(&l2tp_nl_family, info,
3055 + session, L2TP_CMD_SESSION_MODIFY);
3056 +
3057 ++ l2tp_session_dec_refcount(session);
3058 ++
3059 + out:
3060 + return ret;
3061 + }
3062 +@@ -786,29 +803,34 @@ static int l2tp_nl_cmd_session_get(struct sk_buff *skb, struct genl_info *info)
3063 + struct sk_buff *msg;
3064 + int ret;
3065 +
3066 +- session = l2tp_nl_session_find(info);
3067 ++ session = l2tp_nl_session_get(info, false);
3068 + if (session == NULL) {
3069 + ret = -ENODEV;
3070 +- goto out;
3071 ++ goto err;
3072 + }
3073 +
3074 + msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
3075 + if (!msg) {
3076 + ret = -ENOMEM;
3077 +- goto out;
3078 ++ goto err_ref;
3079 + }
3080 +
3081 + ret = l2tp_nl_session_send(msg, info->snd_portid, info->snd_seq,
3082 + 0, session, L2TP_CMD_SESSION_GET);
3083 + if (ret < 0)
3084 +- goto err_out;
3085 ++ goto err_ref_msg;
3086 +
3087 +- return genlmsg_unicast(genl_info_net(info), msg, info->snd_portid);
3088 ++ ret = genlmsg_unicast(genl_info_net(info), msg, info->snd_portid);
3089 +
3090 +-err_out:
3091 +- nlmsg_free(msg);
3092 ++ l2tp_session_dec_refcount(session);
3093 +
3094 +-out:
3095 ++ return ret;
3096 ++
3097 ++err_ref_msg:
3098 ++ nlmsg_free(msg);
3099 ++err_ref:
3100 ++ l2tp_session_dec_refcount(session);
3101 ++err:
3102 + return ret;
3103 + }
3104 +
3105 +diff --git a/net/l2tp/l2tp_ppp.c b/net/l2tp/l2tp_ppp.c
3106 +index bc5d6b8f8ede..8ff5352bb0e3 100644
3107 +--- a/net/l2tp/l2tp_ppp.c
3108 ++++ b/net/l2tp/l2tp_ppp.c
3109 +@@ -122,8 +122,11 @@
3110 + struct pppol2tp_session {
3111 + int owner; /* pid that opened the socket */
3112 +
3113 +- struct sock *sock; /* Pointer to the session
3114 ++ struct mutex sk_lock; /* Protects .sk */
3115 ++ struct sock __rcu *sk; /* Pointer to the session
3116 + * PPPoX socket */
3117 ++ struct sock *__sk; /* Copy of .sk, for cleanup */
3118 ++ struct rcu_head rcu; /* For asynchronous release */
3119 + struct sock *tunnel_sock; /* Pointer to the tunnel UDP
3120 + * socket */
3121 + int flags; /* accessed by PPPIOCGFLAGS.
3122 +@@ -138,6 +141,24 @@ static const struct ppp_channel_ops pppol2tp_chan_ops = {
3123 +
3124 + static const struct proto_ops pppol2tp_ops;
3125 +
3126 ++/* Retrieves the pppol2tp socket associated to a session.
3127 ++ * A reference is held on the returned socket, so this function must be paired
3128 ++ * with sock_put().
3129 ++ */
3130 ++static struct sock *pppol2tp_session_get_sock(struct l2tp_session *session)
3131 ++{
3132 ++ struct pppol2tp_session *ps = l2tp_session_priv(session);
3133 ++ struct sock *sk;
3134 ++
3135 ++ rcu_read_lock();
3136 ++ sk = rcu_dereference(ps->sk);
3137 ++ if (sk)
3138 ++ sock_hold(sk);
3139 ++ rcu_read_unlock();
3140 ++
3141 ++ return sk;
3142 ++}
3143 ++
3144 + /* Helpers to obtain tunnel/session contexts from sockets.
3145 + */
3146 + static inline struct l2tp_session *pppol2tp_sock_to_session(struct sock *sk)
3147 +@@ -224,13 +245,14 @@ static void pppol2tp_recv(struct l2tp_session *session, struct sk_buff *skb, int
3148 + /* If the socket is bound, send it in to PPP's input queue. Otherwise
3149 + * queue it on the session socket.
3150 + */
3151 +- sk = ps->sock;
3152 ++ rcu_read_lock();
3153 ++ sk = rcu_dereference(ps->sk);
3154 + if (sk == NULL)
3155 + goto no_sock;
3156 +
3157 + if (sk->sk_state & PPPOX_BOUND) {
3158 + struct pppox_sock *po;
3159 +- l2tp_dbg(session, PPPOL2TP_MSG_DATA,
3160 ++ l2tp_dbg(session, L2TP_MSG_DATA,
3161 + "%s: recv %d byte data frame, passing to ppp\n",
3162 + session->name, data_len);
3163 +
3164 +@@ -253,7 +275,7 @@ static void pppol2tp_recv(struct l2tp_session *session, struct sk_buff *skb, int
3165 + po = pppox_sk(sk);
3166 + ppp_input(&po->chan, skb);
3167 + } else {
3168 +- l2tp_dbg(session, PPPOL2TP_MSG_DATA,
3169 ++ l2tp_dbg(session, L2TP_MSG_DATA,
3170 + "%s: recv %d byte data frame, passing to L2TP socket\n",
3171 + session->name, data_len);
3172 +
3173 +@@ -262,30 +284,16 @@ static void pppol2tp_recv(struct l2tp_session *session, struct sk_buff *skb, int
3174 + kfree_skb(skb);
3175 + }
3176 + }
3177 ++ rcu_read_unlock();
3178 +
3179 + return;
3180 +
3181 + no_sock:
3182 +- l2tp_info(session, PPPOL2TP_MSG_DATA, "%s: no socket\n", session->name);
3183 ++ rcu_read_unlock();
3184 ++ l2tp_info(session, L2TP_MSG_DATA, "%s: no socket\n", session->name);
3185 + kfree_skb(skb);
3186 + }
3187 +
3188 +-static void pppol2tp_session_sock_hold(struct l2tp_session *session)
3189 +-{
3190 +- struct pppol2tp_session *ps = l2tp_session_priv(session);
3191 +-
3192 +- if (ps->sock)
3193 +- sock_hold(ps->sock);
3194 +-}
3195 +-
3196 +-static void pppol2tp_session_sock_put(struct l2tp_session *session)
3197 +-{
3198 +- struct pppol2tp_session *ps = l2tp_session_priv(session);
3199 +-
3200 +- if (ps->sock)
3201 +- sock_put(ps->sock);
3202 +-}
3203 +-
3204 + /************************************************************************
3205 + * Transmit handling
3206 + ***********************************************************************/
3207 +@@ -446,17 +454,16 @@ abort:
3208 + */
3209 + static void pppol2tp_session_close(struct l2tp_session *session)
3210 + {
3211 +- struct pppol2tp_session *ps = l2tp_session_priv(session);
3212 +- struct sock *sk = ps->sock;
3213 +- struct socket *sock = sk->sk_socket;
3214 ++ struct sock *sk;
3215 +
3216 + BUG_ON(session->magic != L2TP_SESSION_MAGIC);
3217 +
3218 +- if (sock)
3219 +- inet_shutdown(sock, SEND_SHUTDOWN);
3220 +-
3221 +- /* Don't let the session go away before our socket does */
3222 +- l2tp_session_inc_refcount(session);
3223 ++ sk = pppol2tp_session_get_sock(session);
3224 ++ if (sk) {
3225 ++ if (sk->sk_socket)
3226 ++ inet_shutdown(sk->sk_socket, SEND_SHUTDOWN);
3227 ++ sock_put(sk);
3228 ++ }
3229 + }
3230 +
3231 + /* Really kill the session socket. (Called from sock_put() if
3232 +@@ -476,6 +483,14 @@ static void pppol2tp_session_destruct(struct sock *sk)
3233 + }
3234 + }
3235 +
3236 ++static void pppol2tp_put_sk(struct rcu_head *head)
3237 ++{
3238 ++ struct pppol2tp_session *ps;
3239 ++
3240 ++ ps = container_of(head, typeof(*ps), rcu);
3241 ++ sock_put(ps->__sk);
3242 ++}
3243 ++
3244 + /* Called when the PPPoX socket (session) is closed.
3245 + */
3246 + static int pppol2tp_release(struct socket *sock)
3247 +@@ -501,11 +516,23 @@ static int pppol2tp_release(struct socket *sock)
3248 +
3249 + session = pppol2tp_sock_to_session(sk);
3250 +
3251 +- /* Purge any queued data */
3252 + if (session != NULL) {
3253 +- __l2tp_session_unhash(session);
3254 +- l2tp_session_queue_purge(session);
3255 +- sock_put(sk);
3256 ++ struct pppol2tp_session *ps;
3257 ++
3258 ++ l2tp_session_delete(session);
3259 ++
3260 ++ ps = l2tp_session_priv(session);
3261 ++ mutex_lock(&ps->sk_lock);
3262 ++ ps->__sk = rcu_dereference_protected(ps->sk,
3263 ++ lockdep_is_held(&ps->sk_lock));
3264 ++ RCU_INIT_POINTER(ps->sk, NULL);
3265 ++ mutex_unlock(&ps->sk_lock);
3266 ++ call_rcu(&ps->rcu, pppol2tp_put_sk);
3267 ++
3268 ++ /* Rely on the sock_put() call at the end of the function for
3269 ++ * dropping the reference held by pppol2tp_sock_to_session().
3270 ++ * The last reference will be dropped by pppol2tp_put_sk().
3271 ++ */
3272 + }
3273 + release_sock(sk);
3274 +
3275 +@@ -572,16 +599,47 @@ out:
3276 + static void pppol2tp_show(struct seq_file *m, void *arg)
3277 + {
3278 + struct l2tp_session *session = arg;
3279 +- struct pppol2tp_session *ps = l2tp_session_priv(session);
3280 ++ struct sock *sk;
3281 ++
3282 ++ sk = pppol2tp_session_get_sock(session);
3283 ++ if (sk) {
3284 ++ struct pppox_sock *po = pppox_sk(sk);
3285 +
3286 +- if (ps) {
3287 +- struct pppox_sock *po = pppox_sk(ps->sock);
3288 +- if (po)
3289 +- seq_printf(m, " interface %s\n", ppp_dev_name(&po->chan));
3290 ++ seq_printf(m, " interface %s\n", ppp_dev_name(&po->chan));
3291 ++ sock_put(sk);
3292 + }
3293 + }
3294 + #endif
3295 +
3296 ++static void pppol2tp_session_init(struct l2tp_session *session)
3297 ++{
3298 ++ struct pppol2tp_session *ps;
3299 ++ struct dst_entry *dst;
3300 ++
3301 ++ session->recv_skb = pppol2tp_recv;
3302 ++ session->session_close = pppol2tp_session_close;
3303 ++#if defined(CONFIG_L2TP_DEBUGFS) || defined(CONFIG_L2TP_DEBUGFS_MODULE)
3304 ++ session->show = pppol2tp_show;
3305 ++#endif
3306 ++
3307 ++ ps = l2tp_session_priv(session);
3308 ++ mutex_init(&ps->sk_lock);
3309 ++ ps->tunnel_sock = session->tunnel->sock;
3310 ++ ps->owner = current->pid;
3311 ++
3312 ++ /* If PMTU discovery was enabled, use the MTU that was discovered */
3313 ++ dst = sk_dst_get(session->tunnel->sock);
3314 ++ if (dst) {
3315 ++ u32 pmtu = dst_mtu(dst);
3316 ++
3317 ++ if (pmtu) {
3318 ++ session->mtu = pmtu - PPPOL2TP_HEADER_OVERHEAD;
3319 ++ session->mru = pmtu - PPPOL2TP_HEADER_OVERHEAD;
3320 ++ }
3321 ++ dst_release(dst);
3322 ++ }
3323 ++}
3324 ++
3325 + /* connect() handler. Attach a PPPoX socket to a tunnel UDP socket
3326 + */
3327 + static int pppol2tp_connect(struct socket *sock, struct sockaddr *uservaddr,
3328 +@@ -593,7 +651,6 @@ static int pppol2tp_connect(struct socket *sock, struct sockaddr *uservaddr,
3329 + struct l2tp_session *session = NULL;
3330 + struct l2tp_tunnel *tunnel;
3331 + struct pppol2tp_session *ps;
3332 +- struct dst_entry *dst;
3333 + struct l2tp_session_cfg cfg = { 0, };
3334 + int error = 0;
3335 + u32 tunnel_id, peer_tunnel_id;
3336 +@@ -715,13 +772,17 @@ static int pppol2tp_connect(struct socket *sock, struct sockaddr *uservaddr,
3337 + /* Using a pre-existing session is fine as long as it hasn't
3338 + * been connected yet.
3339 + */
3340 +- if (ps->sock) {
3341 ++ mutex_lock(&ps->sk_lock);
3342 ++ if (rcu_dereference_protected(ps->sk,
3343 ++ lockdep_is_held(&ps->sk_lock))) {
3344 ++ mutex_unlock(&ps->sk_lock);
3345 + error = -EEXIST;
3346 + goto end;
3347 + }
3348 +
3349 + /* consistency checks */
3350 + if (ps->tunnel_sock != tunnel->sock) {
3351 ++ mutex_unlock(&ps->sk_lock);
3352 + error = -EEXIST;
3353 + goto end;
3354 + }
3355 +@@ -737,35 +798,19 @@ static int pppol2tp_connect(struct socket *sock, struct sockaddr *uservaddr,
3356 + error = PTR_ERR(session);
3357 + goto end;
3358 + }
3359 +- }
3360 +-
3361 +- /* Associate session with its PPPoL2TP socket */
3362 +- ps = l2tp_session_priv(session);
3363 +- ps->owner = current->pid;
3364 +- ps->sock = sk;
3365 +- ps->tunnel_sock = tunnel->sock;
3366 +
3367 +- session->recv_skb = pppol2tp_recv;
3368 +- session->session_close = pppol2tp_session_close;
3369 +-#if defined(CONFIG_L2TP_DEBUGFS) || defined(CONFIG_L2TP_DEBUGFS_MODULE)
3370 +- session->show = pppol2tp_show;
3371 +-#endif
3372 +-
3373 +- /* We need to know each time a skb is dropped from the reorder
3374 +- * queue.
3375 +- */
3376 +- session->ref = pppol2tp_session_sock_hold;
3377 +- session->deref = pppol2tp_session_sock_put;
3378 +-
3379 +- /* If PMTU discovery was enabled, use the MTU that was discovered */
3380 +- dst = sk_dst_get(tunnel->sock);
3381 +- if (dst != NULL) {
3382 +- u32 pmtu = dst_mtu(dst);
3383 ++ pppol2tp_session_init(session);
3384 ++ ps = l2tp_session_priv(session);
3385 ++ l2tp_session_inc_refcount(session);
3386 +
3387 +- if (pmtu != 0)
3388 +- session->mtu = session->mru = pmtu -
3389 +- PPPOL2TP_HEADER_OVERHEAD;
3390 +- dst_release(dst);
3391 ++ mutex_lock(&ps->sk_lock);
3392 ++ error = l2tp_session_register(session, tunnel);
3393 ++ if (error < 0) {
3394 ++ mutex_unlock(&ps->sk_lock);
3395 ++ kfree(session);
3396 ++ goto end;
3397 ++ }
3398 ++ drop_refcnt = true;
3399 + }
3400 +
3401 + /* Special case: if source & dest session_id == 0x0000, this
3402 +@@ -790,14 +835,25 @@ static int pppol2tp_connect(struct socket *sock, struct sockaddr *uservaddr,
3403 + po->chan.mtu = session->mtu;
3404 +
3405 + error = ppp_register_net_channel(sock_net(sk), &po->chan);
3406 +- if (error)
3407 ++ if (error) {
3408 ++ mutex_unlock(&ps->sk_lock);
3409 + goto end;
3410 ++ }
3411 +
3412 + out_no_ppp:
3413 + /* This is how we get the session context from the socket. */
3414 + sk->sk_user_data = session;
3415 ++ rcu_assign_pointer(ps->sk, sk);
3416 ++ mutex_unlock(&ps->sk_lock);
3417 ++
3418 ++ /* Keep the reference we've grabbed on the session: sk doesn't expect
3419 ++ * the session to disappear. pppol2tp_session_destruct() is responsible
3420 ++ * for dropping it.
3421 ++ */
3422 ++ drop_refcnt = false;
3423 ++
3424 + sk->sk_state = PPPOX_CONNECTED;
3425 +- l2tp_info(session, PPPOL2TP_MSG_CONTROL, "%s: created\n",
3426 ++ l2tp_info(session, L2TP_MSG_CONTROL, "%s: created\n",
3427 + session->name);
3428 +
3429 + end:
3430 +@@ -810,25 +866,19 @@ end:
3431 +
3432 + #ifdef CONFIG_L2TP_V3
3433 +
3434 +-/* Called when creating sessions via the netlink interface.
3435 +- */
3436 +-static int pppol2tp_session_create(struct net *net, u32 tunnel_id, u32 session_id, u32 peer_session_id, struct l2tp_session_cfg *cfg)
3437 ++/* Called when creating sessions via the netlink interface. */
3438 ++static int pppol2tp_session_create(struct net *net, struct l2tp_tunnel *tunnel,
3439 ++ u32 session_id, u32 peer_session_id,
3440 ++ struct l2tp_session_cfg *cfg)
3441 + {
3442 + int error;
3443 +- struct l2tp_tunnel *tunnel;
3444 + struct l2tp_session *session;
3445 +- struct pppol2tp_session *ps;
3446 +-
3447 +- tunnel = l2tp_tunnel_find(net, tunnel_id);
3448 +-
3449 +- /* Error if we can't find the tunnel */
3450 +- error = -ENOENT;
3451 +- if (tunnel == NULL)
3452 +- goto out;
3453 +
3454 + /* Error if tunnel socket is not prepped */
3455 +- if (tunnel->sock == NULL)
3456 +- goto out;
3457 ++ if (!tunnel->sock) {
3458 ++ error = -ENOENT;
3459 ++ goto err;
3460 ++ }
3461 +
3462 + /* Default MTU values. */
3463 + if (cfg->mtu == 0)
3464 +@@ -842,18 +892,20 @@ static int pppol2tp_session_create(struct net *net, u32 tunnel_id, u32 session_i
3465 + peer_session_id, cfg);
3466 + if (IS_ERR(session)) {
3467 + error = PTR_ERR(session);
3468 +- goto out;
3469 ++ goto err;
3470 + }
3471 +
3472 +- ps = l2tp_session_priv(session);
3473 +- ps->tunnel_sock = tunnel->sock;
3474 ++ pppol2tp_session_init(session);
3475 +
3476 +- l2tp_info(session, PPPOL2TP_MSG_CONTROL, "%s: created\n",
3477 +- session->name);
3478 ++ error = l2tp_session_register(session, tunnel);
3479 ++ if (error < 0)
3480 ++ goto err_sess;
3481 +
3482 +- error = 0;
3483 ++ return 0;
3484 +
3485 +-out:
3486 ++err_sess:
3487 ++ kfree(session);
3488 ++err:
3489 + return error;
3490 + }
3491 +
3492 +@@ -1010,16 +1062,14 @@ static int pppol2tp_session_ioctl(struct l2tp_session *session,
3493 + struct l2tp_tunnel *tunnel = session->tunnel;
3494 + struct pppol2tp_ioc_stats stats;
3495 +
3496 +- l2tp_dbg(session, PPPOL2TP_MSG_CONTROL,
3497 ++ l2tp_dbg(session, L2TP_MSG_CONTROL,
3498 + "%s: pppol2tp_session_ioctl(cmd=%#x, arg=%#lx)\n",
3499 + session->name, cmd, arg);
3500 +
3501 +- sk = ps->sock;
3502 ++ sk = pppol2tp_session_get_sock(session);
3503 + if (!sk)
3504 + return -EBADR;
3505 +
3506 +- sock_hold(sk);
3507 +-
3508 + switch (cmd) {
3509 + case SIOCGIFMTU:
3510 + err = -ENXIO;
3511 +@@ -1033,7 +1083,7 @@ static int pppol2tp_session_ioctl(struct l2tp_session *session,
3512 + if (copy_to_user((void __user *) arg, &ifr, sizeof(struct ifreq)))
3513 + break;
3514 +
3515 +- l2tp_info(session, PPPOL2TP_MSG_CONTROL, "%s: get mtu=%d\n",
3516 ++ l2tp_info(session, L2TP_MSG_CONTROL, "%s: get mtu=%d\n",
3517 + session->name, session->mtu);
3518 + err = 0;
3519 + break;
3520 +@@ -1049,7 +1099,7 @@ static int pppol2tp_session_ioctl(struct l2tp_session *session,
3521 +
3522 + session->mtu = ifr.ifr_mtu;
3523 +
3524 +- l2tp_info(session, PPPOL2TP_MSG_CONTROL, "%s: set mtu=%d\n",
3525 ++ l2tp_info(session, L2TP_MSG_CONTROL, "%s: set mtu=%d\n",
3526 + session->name, session->mtu);
3527 + err = 0;
3528 + break;
3529 +@@ -1063,7 +1113,7 @@ static int pppol2tp_session_ioctl(struct l2tp_session *session,
3530 + if (put_user(session->mru, (int __user *) arg))
3531 + break;
3532 +
3533 +- l2tp_info(session, PPPOL2TP_MSG_CONTROL, "%s: get mru=%d\n",
3534 ++ l2tp_info(session, L2TP_MSG_CONTROL, "%s: get mru=%d\n",
3535 + session->name, session->mru);
3536 + err = 0;
3537 + break;
3538 +@@ -1078,7 +1128,7 @@ static int pppol2tp_session_ioctl(struct l2tp_session *session,
3539 + break;
3540 +
3541 + session->mru = val;
3542 +- l2tp_info(session, PPPOL2TP_MSG_CONTROL, "%s: set mru=%d\n",
3543 ++ l2tp_info(session, L2TP_MSG_CONTROL, "%s: set mru=%d\n",
3544 + session->name, session->mru);
3545 + err = 0;
3546 + break;
3547 +@@ -1088,7 +1138,7 @@ static int pppol2tp_session_ioctl(struct l2tp_session *session,
3548 + if (put_user(ps->flags, (int __user *) arg))
3549 + break;
3550 +
3551 +- l2tp_info(session, PPPOL2TP_MSG_CONTROL, "%s: get flags=%d\n",
3552 ++ l2tp_info(session, L2TP_MSG_CONTROL, "%s: get flags=%d\n",
3553 + session->name, ps->flags);
3554 + err = 0;
3555 + break;
3556 +@@ -1098,7 +1148,7 @@ static int pppol2tp_session_ioctl(struct l2tp_session *session,
3557 + if (get_user(val, (int __user *) arg))
3558 + break;
3559 + ps->flags = val;
3560 +- l2tp_info(session, PPPOL2TP_MSG_CONTROL, "%s: set flags=%d\n",
3561 ++ l2tp_info(session, L2TP_MSG_CONTROL, "%s: set flags=%d\n",
3562 + session->name, ps->flags);
3563 + err = 0;
3564 + break;
3565 +@@ -1115,7 +1165,7 @@ static int pppol2tp_session_ioctl(struct l2tp_session *session,
3566 + if (copy_to_user((void __user *) arg, &stats,
3567 + sizeof(stats)))
3568 + break;
3569 +- l2tp_info(session, PPPOL2TP_MSG_CONTROL, "%s: get L2TP stats\n",
3570 ++ l2tp_info(session, L2TP_MSG_CONTROL, "%s: get L2TP stats\n",
3571 + session->name);
3572 + err = 0;
3573 + break;
3574 +@@ -1143,7 +1193,7 @@ static int pppol2tp_tunnel_ioctl(struct l2tp_tunnel *tunnel,
3575 + struct sock *sk;
3576 + struct pppol2tp_ioc_stats stats;
3577 +
3578 +- l2tp_dbg(tunnel, PPPOL2TP_MSG_CONTROL,
3579 ++ l2tp_dbg(tunnel, L2TP_MSG_CONTROL,
3580 + "%s: pppol2tp_tunnel_ioctl(cmd=%#x, arg=%#lx)\n",
3581 + tunnel->name, cmd, arg);
3582 +
3583 +@@ -1186,7 +1236,7 @@ static int pppol2tp_tunnel_ioctl(struct l2tp_tunnel *tunnel,
3584 + err = -EFAULT;
3585 + break;
3586 + }
3587 +- l2tp_info(tunnel, PPPOL2TP_MSG_CONTROL, "%s: get L2TP stats\n",
3588 ++ l2tp_info(tunnel, L2TP_MSG_CONTROL, "%s: get L2TP stats\n",
3589 + tunnel->name);
3590 + err = 0;
3591 + break;
3592 +@@ -1276,7 +1326,7 @@ static int pppol2tp_tunnel_setsockopt(struct sock *sk,
3593 + switch (optname) {
3594 + case PPPOL2TP_SO_DEBUG:
3595 + tunnel->debug = val;
3596 +- l2tp_info(tunnel, PPPOL2TP_MSG_CONTROL, "%s: set debug=%x\n",
3597 ++ l2tp_info(tunnel, L2TP_MSG_CONTROL, "%s: set debug=%x\n",
3598 + tunnel->name, tunnel->debug);
3599 + break;
3600 +
3601 +@@ -1295,7 +1345,6 @@ static int pppol2tp_session_setsockopt(struct sock *sk,
3602 + int optname, int val)
3603 + {
3604 + int err = 0;
3605 +- struct pppol2tp_session *ps = l2tp_session_priv(session);
3606 +
3607 + switch (optname) {
3608 + case PPPOL2TP_SO_RECVSEQ:
3609 +@@ -1304,7 +1353,7 @@ static int pppol2tp_session_setsockopt(struct sock *sk,
3610 + break;
3611 + }
3612 + session->recv_seq = val ? -1 : 0;
3613 +- l2tp_info(session, PPPOL2TP_MSG_CONTROL,
3614 ++ l2tp_info(session, L2TP_MSG_CONTROL,
3615 + "%s: set recv_seq=%d\n",
3616 + session->name, session->recv_seq);
3617 + break;
3618 +@@ -1316,13 +1365,13 @@ static int pppol2tp_session_setsockopt(struct sock *sk,
3619 + }
3620 + session->send_seq = val ? -1 : 0;
3621 + {
3622 +- struct sock *ssk = ps->sock;
3623 +- struct pppox_sock *po = pppox_sk(ssk);
3624 ++ struct pppox_sock *po = pppox_sk(sk);
3625 ++
3626 + po->chan.hdrlen = val ? PPPOL2TP_L2TP_HDR_SIZE_SEQ :
3627 + PPPOL2TP_L2TP_HDR_SIZE_NOSEQ;
3628 + }
3629 + l2tp_session_set_header_len(session, session->tunnel->version);
3630 +- l2tp_info(session, PPPOL2TP_MSG_CONTROL,
3631 ++ l2tp_info(session, L2TP_MSG_CONTROL,
3632 + "%s: set send_seq=%d\n",
3633 + session->name, session->send_seq);
3634 + break;
3635 +@@ -1333,20 +1382,20 @@ static int pppol2tp_session_setsockopt(struct sock *sk,
3636 + break;
3637 + }
3638 + session->lns_mode = val ? -1 : 0;
3639 +- l2tp_info(session, PPPOL2TP_MSG_CONTROL,
3640 ++ l2tp_info(session, L2TP_MSG_CONTROL,
3641 + "%s: set lns_mode=%d\n",
3642 + session->name, session->lns_mode);
3643 + break;
3644 +
3645 + case PPPOL2TP_SO_DEBUG:
3646 + session->debug = val;
3647 +- l2tp_info(session, PPPOL2TP_MSG_CONTROL, "%s: set debug=%x\n",
3648 ++ l2tp_info(session, L2TP_MSG_CONTROL, "%s: set debug=%x\n",
3649 + session->name, session->debug);
3650 + break;
3651 +
3652 + case PPPOL2TP_SO_REORDERTO:
3653 + session->reorder_timeout = msecs_to_jiffies(val);
3654 +- l2tp_info(session, PPPOL2TP_MSG_CONTROL,
3655 ++ l2tp_info(session, L2TP_MSG_CONTROL,
3656 + "%s: set reorder_timeout=%d\n",
3657 + session->name, session->reorder_timeout);
3658 + break;
3659 +@@ -1427,7 +1476,7 @@ static int pppol2tp_tunnel_getsockopt(struct sock *sk,
3660 + switch (optname) {
3661 + case PPPOL2TP_SO_DEBUG:
3662 + *val = tunnel->debug;
3663 +- l2tp_info(tunnel, PPPOL2TP_MSG_CONTROL, "%s: get debug=%x\n",
3664 ++ l2tp_info(tunnel, L2TP_MSG_CONTROL, "%s: get debug=%x\n",
3665 + tunnel->name, tunnel->debug);
3666 + break;
3667 +
3668 +@@ -1450,31 +1499,31 @@ static int pppol2tp_session_getsockopt(struct sock *sk,
3669 + switch (optname) {
3670 + case PPPOL2TP_SO_RECVSEQ:
3671 + *val = session->recv_seq;
3672 +- l2tp_info(session, PPPOL2TP_MSG_CONTROL,
3673 ++ l2tp_info(session, L2TP_MSG_CONTROL,
3674 + "%s: get recv_seq=%d\n", session->name, *val);
3675 + break;
3676 +
3677 + case PPPOL2TP_SO_SENDSEQ:
3678 + *val = session->send_seq;
3679 +- l2tp_info(session, PPPOL2TP_MSG_CONTROL,
3680 ++ l2tp_info(session, L2TP_MSG_CONTROL,
3681 + "%s: get send_seq=%d\n", session->name, *val);
3682 + break;
3683 +
3684 + case PPPOL2TP_SO_LNSMODE:
3685 + *val = session->lns_mode;
3686 +- l2tp_info(session, PPPOL2TP_MSG_CONTROL,
3687 ++ l2tp_info(session, L2TP_MSG_CONTROL,
3688 + "%s: get lns_mode=%d\n", session->name, *val);
3689 + break;
3690 +
3691 + case PPPOL2TP_SO_DEBUG:
3692 + *val = session->debug;
3693 +- l2tp_info(session, PPPOL2TP_MSG_CONTROL, "%s: get debug=%d\n",
3694 ++ l2tp_info(session, L2TP_MSG_CONTROL, "%s: get debug=%d\n",
3695 + session->name, *val);
3696 + break;
3697 +
3698 + case PPPOL2TP_SO_REORDERTO:
3699 + *val = (int) jiffies_to_msecs(session->reorder_timeout);
3700 +- l2tp_info(session, PPPOL2TP_MSG_CONTROL,
3701 ++ l2tp_info(session, L2TP_MSG_CONTROL,
3702 + "%s: get reorder_timeout=%d\n", session->name, *val);
3703 + break;
3704 +
3705 +@@ -1653,8 +1702,9 @@ static void pppol2tp_seq_session_show(struct seq_file *m, void *v)
3706 + {
3707 + struct l2tp_session *session = v;
3708 + struct l2tp_tunnel *tunnel = session->tunnel;
3709 +- struct pppol2tp_session *ps = l2tp_session_priv(session);
3710 +- struct pppox_sock *po = pppox_sk(ps->sock);
3711 ++ unsigned char state;
3712 ++ char user_data_ok;
3713 ++ struct sock *sk;
3714 + u32 ip = 0;
3715 + u16 port = 0;
3716 +
3717 +@@ -1664,6 +1714,15 @@ static void pppol2tp_seq_session_show(struct seq_file *m, void *v)
3718 + port = ntohs(inet->inet_sport);
3719 + }
3720 +
3721 ++ sk = pppol2tp_session_get_sock(session);
3722 ++ if (sk) {
3723 ++ state = sk->sk_state;
3724 ++ user_data_ok = (session == sk->sk_user_data) ? 'Y' : 'N';
3725 ++ } else {
3726 ++ state = 0;
3727 ++ user_data_ok = 'N';
3728 ++ }
3729 ++
3730 + seq_printf(m, " SESSION '%s' %08X/%d %04X/%04X -> "
3731 + "%04X/%04X %d %c\n",
3732 + session->name, ip, port,
3733 +@@ -1671,9 +1730,7 @@ static void pppol2tp_seq_session_show(struct seq_file *m, void *v)
3734 + session->session_id,
3735 + tunnel->peer_tunnel_id,
3736 + session->peer_session_id,
3737 +- ps->sock->sk_state,
3738 +- (session == ps->sock->sk_user_data) ?
3739 +- 'Y' : 'N');
3740 ++ state, user_data_ok);
3741 + seq_printf(m, " %d/%d/%c/%c/%s %08x %u\n",
3742 + session->mtu, session->mru,
3743 + session->recv_seq ? 'R' : '-',
3744 +@@ -1690,8 +1747,12 @@ static void pppol2tp_seq_session_show(struct seq_file *m, void *v)
3745 + atomic_long_read(&session->stats.rx_bytes),
3746 + atomic_long_read(&session->stats.rx_errors));
3747 +
3748 +- if (po)
3749 ++ if (sk) {
3750 ++ struct pppox_sock *po = pppox_sk(sk);
3751 ++
3752 + seq_printf(m, " interface %s\n", ppp_dev_name(&po->chan));
3753 ++ sock_put(sk);
3754 ++ }
3755 + }
3756 +
3757 + static int pppol2tp_seq_show(struct seq_file *m, void *v)
3758 +diff --git a/net/socket.c b/net/socket.c
3759 +index 15bdba4211ad..88086d18c208 100644
3760 +--- a/net/socket.c
3761 ++++ b/net/socket.c
3762 +@@ -3304,3 +3304,49 @@ int kernel_sock_shutdown(struct socket *sock, enum sock_shutdown_cmd how)
3763 + return sock->ops->shutdown(sock, how);
3764 + }
3765 + EXPORT_SYMBOL(kernel_sock_shutdown);
3766 ++
3767 ++/* This routine returns the IP overhead imposed by a socket i.e.
3768 ++ * the length of the underlying IP header, depending on whether
3769 ++ * this is an IPv4 or IPv6 socket and the length from IP options turned
3770 ++ * on at the socket. Assumes that the caller has a lock on the socket.
3771 ++ */
3772 ++u32 kernel_sock_ip_overhead(struct sock *sk)
3773 ++{
3774 ++ struct inet_sock *inet;
3775 ++ struct ip_options_rcu *opt;
3776 ++ u32 overhead = 0;
3777 ++ bool owned_by_user;
3778 ++#if IS_ENABLED(CONFIG_IPV6)
3779 ++ struct ipv6_pinfo *np;
3780 ++ struct ipv6_txoptions *optv6 = NULL;
3781 ++#endif /* IS_ENABLED(CONFIG_IPV6) */
3782 ++
3783 ++ if (!sk)
3784 ++ return overhead;
3785 ++
3786 ++ owned_by_user = sock_owned_by_user(sk);
3787 ++ switch (sk->sk_family) {
3788 ++ case AF_INET:
3789 ++ inet = inet_sk(sk);
3790 ++ overhead += sizeof(struct iphdr);
3791 ++ opt = rcu_dereference_protected(inet->inet_opt,
3792 ++ owned_by_user);
3793 ++ if (opt)
3794 ++ overhead += opt->opt.optlen;
3795 ++ return overhead;
3796 ++#if IS_ENABLED(CONFIG_IPV6)
3797 ++ case AF_INET6:
3798 ++ np = inet6_sk(sk);
3799 ++ overhead += sizeof(struct ipv6hdr);
3800 ++ if (np)
3801 ++ optv6 = rcu_dereference_protected(np->opt,
3802 ++ owned_by_user);
3803 ++ if (optv6)
3804 ++ overhead += (optv6->opt_flen + optv6->opt_nflen);
3805 ++ return overhead;
3806 ++#endif /* IS_ENABLED(CONFIG_IPV6) */
3807 ++ default: /* Returns 0 overhead if the socket is not ipv4 or ipv6 */
3808 ++ return overhead;
3809 ++ }
3810 ++}
3811 ++EXPORT_SYMBOL(kernel_sock_ip_overhead);
3812 +diff --git a/security/integrity/evm/evm_crypto.c b/security/integrity/evm/evm_crypto.c
3813 +index 461f8d891579..44352b0b7510 100644
3814 +--- a/security/integrity/evm/evm_crypto.c
3815 ++++ b/security/integrity/evm/evm_crypto.c
3816 +@@ -47,7 +47,7 @@ static struct shash_desc *init_desc(char type)
3817 + algo = evm_hash;
3818 + }
3819 +
3820 +- if (*tfm == NULL) {
3821 ++ if (IS_ERR_OR_NULL(*tfm)) {
3822 + mutex_lock(&mutex);
3823 + if (*tfm)
3824 + goto out;
3825 +diff --git a/sound/core/pcm_lib.c b/sound/core/pcm_lib.c
3826 +index 950730709d28..ab8846e7e8ff 100644
3827 +--- a/sound/core/pcm_lib.c
3828 ++++ b/sound/core/pcm_lib.c
3829 +@@ -456,6 +456,7 @@ static int snd_pcm_update_hw_ptr0(struct snd_pcm_substream *substream,
3830 +
3831 + no_delta_check:
3832 + if (runtime->status->hw_ptr == new_hw_ptr) {
3833 ++ runtime->hw_ptr_jiffies = curr_jiffies;
3834 + update_audio_tstamp(substream, &curr_tstamp, &audio_tstamp);
3835 + return 0;
3836 + }