Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:4.1 commit in: /
Date: Tue, 15 Dec 2015 11:17:08
Message-Id: 1450178210.56e69a84070c5284582a1a12b7670397aaa78c7e.mpagano@gentoo
1 commit: 56e69a84070c5284582a1a12b7670397aaa78c7e
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Tue Dec 15 11:16:50 2015 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Tue Dec 15 11:16:50 2015 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=56e69a84
7
8 Linux patch 4.1.15
9
10 0000_README | 4 +
11 1014_linux-4.1.15.patch | 2619 +++++++++++++++++++++++++++++++++++++++++++++++
12 2 files changed, 2623 insertions(+)
13
14 diff --git a/0000_README b/0000_README
15 index bb7a9d9..2294ce5 100644
16 --- a/0000_README
17 +++ b/0000_README
18 @@ -99,6 +99,10 @@ Patch: 1013_linux-4.1.14.patch
19 From: http://www.kernel.org
20 Desc: Linux 4.1.14
21
22 +Patch: 1014_linux-4.1.15.patch
23 +From: http://www.kernel.org
24 +Desc: Linux 4.1.15
25 +
26 Patch: 1500_XATTR_USER_PREFIX.patch
27 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
28 Desc: Support for namespace user.pax.* on tmpfs.
29
30 diff --git a/1014_linux-4.1.15.patch b/1014_linux-4.1.15.patch
31 new file mode 100644
32 index 0000000..114781b
33 --- /dev/null
34 +++ b/1014_linux-4.1.15.patch
35 @@ -0,0 +1,2619 @@
36 +diff --git a/Makefile b/Makefile
37 +index 091280d66452..cf35f6bcffd8 100644
38 +--- a/Makefile
39 ++++ b/Makefile
40 +@@ -1,6 +1,6 @@
41 + VERSION = 4
42 + PATCHLEVEL = 1
43 +-SUBLEVEL = 14
44 ++SUBLEVEL = 15
45 + EXTRAVERSION =
46 + NAME = Series 4800
47 +
48 +diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
49 +index 1ec6441fe2a5..09138ceba046 100644
50 +--- a/drivers/block/rbd.c
51 ++++ b/drivers/block/rbd.c
52 +@@ -3417,6 +3417,7 @@ static void rbd_queue_workfn(struct work_struct *work)
53 + goto err_rq;
54 + }
55 + img_request->rq = rq;
56 ++ snapc = NULL; /* img_request consumes a ref */
57 +
58 + if (op_type == OBJ_OP_DISCARD)
59 + result = rbd_img_request_fill(img_request, OBJ_REQUEST_NODATA,
60 +diff --git a/drivers/firewire/ohci.c b/drivers/firewire/ohci.c
61 +index f51d376d10ba..c2f5117fd8cb 100644
62 +--- a/drivers/firewire/ohci.c
63 ++++ b/drivers/firewire/ohci.c
64 +@@ -3675,6 +3675,11 @@ static int pci_probe(struct pci_dev *dev,
65 +
66 + reg_write(ohci, OHCI1394_IsoXmitIntMaskSet, ~0);
67 + ohci->it_context_support = reg_read(ohci, OHCI1394_IsoXmitIntMaskSet);
68 ++ /* JMicron JMB38x often shows 0 at first read, just ignore it */
69 ++ if (!ohci->it_context_support) {
70 ++ ohci_notice(ohci, "overriding IsoXmitIntMask\n");
71 ++ ohci->it_context_support = 0xf;
72 ++ }
73 + reg_write(ohci, OHCI1394_IsoXmitIntMaskClear, ~0);
74 + ohci->it_context_mask = ohci->it_context_support;
75 + ohci->n_it = hweight32(ohci->it_context_mask);
76 +diff --git a/drivers/net/phy/broadcom.c b/drivers/net/phy/broadcom.c
77 +index 9c71295f2fef..85e640440bd9 100644
78 +--- a/drivers/net/phy/broadcom.c
79 ++++ b/drivers/net/phy/broadcom.c
80 +@@ -675,7 +675,7 @@ static struct mdio_device_id __maybe_unused broadcom_tbl[] = {
81 + { PHY_ID_BCM5461, 0xfffffff0 },
82 + { PHY_ID_BCM54616S, 0xfffffff0 },
83 + { PHY_ID_BCM5464, 0xfffffff0 },
84 +- { PHY_ID_BCM5482, 0xfffffff0 },
85 ++ { PHY_ID_BCM5481, 0xfffffff0 },
86 + { PHY_ID_BCM5482, 0xfffffff0 },
87 + { PHY_ID_BCM50610, 0xfffffff0 },
88 + { PHY_ID_BCM50610M, 0xfffffff0 },
89 +diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
90 +index 4e0470d396a3..71190dc1eacf 100644
91 +--- a/drivers/net/usb/qmi_wwan.c
92 ++++ b/drivers/net/usb/qmi_wwan.c
93 +@@ -774,6 +774,7 @@ static const struct usb_device_id products[] = {
94 + {QMI_FIXED_INTF(0x2357, 0x9000, 4)}, /* TP-LINK MA260 */
95 + {QMI_FIXED_INTF(0x1bc7, 0x1200, 5)}, /* Telit LE920 */
96 + {QMI_FIXED_INTF(0x1bc7, 0x1201, 2)}, /* Telit LE920 */
97 ++ {QMI_FIXED_INTF(0x1c9e, 0x9b01, 3)}, /* XS Stick W100-2 from 4G Systems */
98 + {QMI_FIXED_INTF(0x0b3c, 0xc000, 4)}, /* Olivetti Olicard 100 */
99 + {QMI_FIXED_INTF(0x0b3c, 0xc001, 4)}, /* Olivetti Olicard 120 */
100 + {QMI_FIXED_INTF(0x0b3c, 0xc002, 4)}, /* Olivetti Olicard 140 */
101 +diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
102 +index b072e17479aa..2b0d84d32db4 100644
103 +--- a/fs/btrfs/file.c
104 ++++ b/fs/btrfs/file.c
105 +@@ -756,8 +756,16 @@ next_slot:
106 + }
107 +
108 + btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
109 +- if (key.objectid > ino ||
110 +- key.type > BTRFS_EXTENT_DATA_KEY || key.offset >= end)
111 ++
112 ++ if (key.objectid > ino)
113 ++ break;
114 ++ if (WARN_ON_ONCE(key.objectid < ino) ||
115 ++ key.type < BTRFS_EXTENT_DATA_KEY) {
116 ++ ASSERT(del_nr == 0);
117 ++ path->slots[0]++;
118 ++ goto next_slot;
119 ++ }
120 ++ if (key.type > BTRFS_EXTENT_DATA_KEY || key.offset >= end)
121 + break;
122 +
123 + fi = btrfs_item_ptr(leaf, path->slots[0],
124 +@@ -776,8 +784,8 @@ next_slot:
125 + btrfs_file_extent_inline_len(leaf,
126 + path->slots[0], fi);
127 + } else {
128 +- WARN_ON(1);
129 +- extent_end = search_start;
130 ++ /* can't happen */
131 ++ BUG();
132 + }
133 +
134 + /*
135 +diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
136 +index e3b39f0c4666..5136c73b3dce 100644
137 +--- a/fs/btrfs/inode.c
138 ++++ b/fs/btrfs/inode.c
139 +@@ -1294,8 +1294,14 @@ next_slot:
140 + num_bytes = 0;
141 + btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
142 +
143 +- if (found_key.objectid > ino ||
144 +- found_key.type > BTRFS_EXTENT_DATA_KEY ||
145 ++ if (found_key.objectid > ino)
146 ++ break;
147 ++ if (WARN_ON_ONCE(found_key.objectid < ino) ||
148 ++ found_key.type < BTRFS_EXTENT_DATA_KEY) {
149 ++ path->slots[0]++;
150 ++ goto next_slot;
151 ++ }
152 ++ if (found_key.type > BTRFS_EXTENT_DATA_KEY ||
153 + found_key.offset > end)
154 + break;
155 +
156 +@@ -4184,6 +4190,47 @@ static int truncate_space_check(struct btrfs_trans_handle *trans,
157 +
158 + }
159 +
160 ++static int truncate_inline_extent(struct inode *inode,
161 ++ struct btrfs_path *path,
162 ++ struct btrfs_key *found_key,
163 ++ const u64 item_end,
164 ++ const u64 new_size)
165 ++{
166 ++ struct extent_buffer *leaf = path->nodes[0];
167 ++ int slot = path->slots[0];
168 ++ struct btrfs_file_extent_item *fi;
169 ++ u32 size = (u32)(new_size - found_key->offset);
170 ++ struct btrfs_root *root = BTRFS_I(inode)->root;
171 ++
172 ++ fi = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item);
173 ++
174 ++ if (btrfs_file_extent_compression(leaf, fi) != BTRFS_COMPRESS_NONE) {
175 ++ loff_t offset = new_size;
176 ++ loff_t page_end = ALIGN(offset, PAGE_CACHE_SIZE);
177 ++
178 ++ /*
179 ++ * Zero out the remaining of the last page of our inline extent,
180 ++ * instead of directly truncating our inline extent here - that
181 ++ * would be much more complex (decompressing all the data, then
182 ++ * compressing the truncated data, which might be bigger than
183 ++ * the size of the inline extent, resize the extent, etc).
184 ++ * We release the path because to get the page we might need to
185 ++ * read the extent item from disk (data not in the page cache).
186 ++ */
187 ++ btrfs_release_path(path);
188 ++ return btrfs_truncate_page(inode, offset, page_end - offset, 0);
189 ++ }
190 ++
191 ++ btrfs_set_file_extent_ram_bytes(leaf, fi, size);
192 ++ size = btrfs_file_extent_calc_inline_size(size);
193 ++ btrfs_truncate_item(root, path, size, 1);
194 ++
195 ++ if (test_bit(BTRFS_ROOT_REF_COWS, &root->state))
196 ++ inode_sub_bytes(inode, item_end + 1 - new_size);
197 ++
198 ++ return 0;
199 ++}
200 ++
201 + /*
202 + * this can truncate away extent items, csum items and directory items.
203 + * It starts at a high offset and removes keys until it can't find
204 +@@ -4378,27 +4425,40 @@ search_again:
205 + * special encodings
206 + */
207 + if (!del_item &&
208 +- btrfs_file_extent_compression(leaf, fi) == 0 &&
209 + btrfs_file_extent_encryption(leaf, fi) == 0 &&
210 + btrfs_file_extent_other_encoding(leaf, fi) == 0) {
211 +- u32 size = new_size - found_key.offset;
212 +-
213 +- if (test_bit(BTRFS_ROOT_REF_COWS, &root->state))
214 +- inode_sub_bytes(inode, item_end + 1 -
215 +- new_size);
216 +
217 + /*
218 +- * update the ram bytes to properly reflect
219 +- * the new size of our item
220 ++ * Need to release path in order to truncate a
221 ++ * compressed extent. So delete any accumulated
222 ++ * extent items so far.
223 + */
224 +- btrfs_set_file_extent_ram_bytes(leaf, fi, size);
225 +- size =
226 +- btrfs_file_extent_calc_inline_size(size);
227 +- btrfs_truncate_item(root, path, size, 1);
228 ++ if (btrfs_file_extent_compression(leaf, fi) !=
229 ++ BTRFS_COMPRESS_NONE && pending_del_nr) {
230 ++ err = btrfs_del_items(trans, root, path,
231 ++ pending_del_slot,
232 ++ pending_del_nr);
233 ++ if (err) {
234 ++ btrfs_abort_transaction(trans,
235 ++ root,
236 ++ err);
237 ++ goto error;
238 ++ }
239 ++ pending_del_nr = 0;
240 ++ }
241 ++
242 ++ err = truncate_inline_extent(inode, path,
243 ++ &found_key,
244 ++ item_end,
245 ++ new_size);
246 ++ if (err) {
247 ++ btrfs_abort_transaction(trans,
248 ++ root, err);
249 ++ goto error;
250 ++ }
251 + } else if (test_bit(BTRFS_ROOT_REF_COWS,
252 + &root->state)) {
253 +- inode_sub_bytes(inode, item_end + 1 -
254 +- found_key.offset);
255 ++ inode_sub_bytes(inode, item_end + 1 - new_size);
256 + }
257 + }
258 + delete:
259 +diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
260 +index 8b2c82ce36b3..87c720865ebf 100644
261 +--- a/fs/btrfs/ioctl.c
262 ++++ b/fs/btrfs/ioctl.c
263 +@@ -3166,6 +3166,150 @@ static void clone_update_extent_map(struct inode *inode,
264 + &BTRFS_I(inode)->runtime_flags);
265 + }
266 +
267 ++/*
268 ++ * Make sure we do not end up inserting an inline extent into a file that has
269 ++ * already other (non-inline) extents. If a file has an inline extent it can
270 ++ * not have any other extents and the (single) inline extent must start at the
271 ++ * file offset 0. Failing to respect these rules will lead to file corruption,
272 ++ * resulting in EIO errors on read/write operations, hitting BUG_ON's in mm, etc
273 ++ *
274 ++ * We can have extents that have been already written to disk or we can have
275 ++ * dirty ranges still in delalloc, in which case the extent maps and items are
276 ++ * created only when we run delalloc, and the delalloc ranges might fall outside
277 ++ * the range we are currently locking in the inode's io tree. So we check the
278 ++ * inode's i_size because of that (i_size updates are done while holding the
279 ++ * i_mutex, which we are holding here).
280 ++ * We also check to see if the inode has a size not greater than "datal" but has
281 ++ * extents beyond it, due to an fallocate with FALLOC_FL_KEEP_SIZE (and we are
282 ++ * protected against such concurrent fallocate calls by the i_mutex).
283 ++ *
284 ++ * If the file has no extents but a size greater than datal, do not allow the
285 ++ * copy because we would need turn the inline extent into a non-inline one (even
286 ++ * with NO_HOLES enabled). If we find our destination inode only has one inline
287 ++ * extent, just overwrite it with the source inline extent if its size is less
288 ++ * than the source extent's size, or we could copy the source inline extent's
289 ++ * data into the destination inode's inline extent if the later is greater then
290 ++ * the former.
291 ++ */
292 ++static int clone_copy_inline_extent(struct inode *src,
293 ++ struct inode *dst,
294 ++ struct btrfs_trans_handle *trans,
295 ++ struct btrfs_path *path,
296 ++ struct btrfs_key *new_key,
297 ++ const u64 drop_start,
298 ++ const u64 datal,
299 ++ const u64 skip,
300 ++ const u64 size,
301 ++ char *inline_data)
302 ++{
303 ++ struct btrfs_root *root = BTRFS_I(dst)->root;
304 ++ const u64 aligned_end = ALIGN(new_key->offset + datal,
305 ++ root->sectorsize);
306 ++ int ret;
307 ++ struct btrfs_key key;
308 ++
309 ++ if (new_key->offset > 0)
310 ++ return -EOPNOTSUPP;
311 ++
312 ++ key.objectid = btrfs_ino(dst);
313 ++ key.type = BTRFS_EXTENT_DATA_KEY;
314 ++ key.offset = 0;
315 ++ ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
316 ++ if (ret < 0) {
317 ++ return ret;
318 ++ } else if (ret > 0) {
319 ++ if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) {
320 ++ ret = btrfs_next_leaf(root, path);
321 ++ if (ret < 0)
322 ++ return ret;
323 ++ else if (ret > 0)
324 ++ goto copy_inline_extent;
325 ++ }
326 ++ btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
327 ++ if (key.objectid == btrfs_ino(dst) &&
328 ++ key.type == BTRFS_EXTENT_DATA_KEY) {
329 ++ ASSERT(key.offset > 0);
330 ++ return -EOPNOTSUPP;
331 ++ }
332 ++ } else if (i_size_read(dst) <= datal) {
333 ++ struct btrfs_file_extent_item *ei;
334 ++ u64 ext_len;
335 ++
336 ++ /*
337 ++ * If the file size is <= datal, make sure there are no other
338 ++ * extents following (can happen do to an fallocate call with
339 ++ * the flag FALLOC_FL_KEEP_SIZE).
340 ++ */
341 ++ ei = btrfs_item_ptr(path->nodes[0], path->slots[0],
342 ++ struct btrfs_file_extent_item);
343 ++ /*
344 ++ * If it's an inline extent, it can not have other extents
345 ++ * following it.
346 ++ */
347 ++ if (btrfs_file_extent_type(path->nodes[0], ei) ==
348 ++ BTRFS_FILE_EXTENT_INLINE)
349 ++ goto copy_inline_extent;
350 ++
351 ++ ext_len = btrfs_file_extent_num_bytes(path->nodes[0], ei);
352 ++ if (ext_len > aligned_end)
353 ++ return -EOPNOTSUPP;
354 ++
355 ++ ret = btrfs_next_item(root, path);
356 ++ if (ret < 0) {
357 ++ return ret;
358 ++ } else if (ret == 0) {
359 ++ btrfs_item_key_to_cpu(path->nodes[0], &key,
360 ++ path->slots[0]);
361 ++ if (key.objectid == btrfs_ino(dst) &&
362 ++ key.type == BTRFS_EXTENT_DATA_KEY)
363 ++ return -EOPNOTSUPP;
364 ++ }
365 ++ }
366 ++
367 ++copy_inline_extent:
368 ++ /*
369 ++ * We have no extent items, or we have an extent at offset 0 which may
370 ++ * or may not be inlined. All these cases are dealt the same way.
371 ++ */
372 ++ if (i_size_read(dst) > datal) {
373 ++ /*
374 ++ * If the destination inode has an inline extent...
375 ++ * This would require copying the data from the source inline
376 ++ * extent into the beginning of the destination's inline extent.
377 ++ * But this is really complex, both extents can be compressed
378 ++ * or just one of them, which would require decompressing and
379 ++ * re-compressing data (which could increase the new compressed
380 ++ * size, not allowing the compressed data to fit anymore in an
381 ++ * inline extent).
382 ++ * So just don't support this case for now (it should be rare,
383 ++ * we are not really saving space when cloning inline extents).
384 ++ */
385 ++ return -EOPNOTSUPP;
386 ++ }
387 ++
388 ++ btrfs_release_path(path);
389 ++ ret = btrfs_drop_extents(trans, root, dst, drop_start, aligned_end, 1);
390 ++ if (ret)
391 ++ return ret;
392 ++ ret = btrfs_insert_empty_item(trans, root, path, new_key, size);
393 ++ if (ret)
394 ++ return ret;
395 ++
396 ++ if (skip) {
397 ++ const u32 start = btrfs_file_extent_calc_inline_size(0);
398 ++
399 ++ memmove(inline_data + start, inline_data + start + skip, datal);
400 ++ }
401 ++
402 ++ write_extent_buffer(path->nodes[0], inline_data,
403 ++ btrfs_item_ptr_offset(path->nodes[0],
404 ++ path->slots[0]),
405 ++ size);
406 ++ inode_add_bytes(dst, datal);
407 ++
408 ++ return 0;
409 ++}
410 ++
411 + /**
412 + * btrfs_clone() - clone a range from inode file to another
413 + *
414 +@@ -3432,21 +3576,6 @@ process_slot:
415 + } else if (type == BTRFS_FILE_EXTENT_INLINE) {
416 + u64 skip = 0;
417 + u64 trim = 0;
418 +- u64 aligned_end = 0;
419 +-
420 +- /*
421 +- * Don't copy an inline extent into an offset
422 +- * greater than zero. Having an inline extent
423 +- * at such an offset results in chaos as btrfs
424 +- * isn't prepared for such cases. Just skip
425 +- * this case for the same reasons as commented
426 +- * at btrfs_ioctl_clone().
427 +- */
428 +- if (last_dest_end > 0) {
429 +- ret = -EOPNOTSUPP;
430 +- btrfs_end_transaction(trans, root);
431 +- goto out;
432 +- }
433 +
434 + if (off > key.offset) {
435 + skip = off - key.offset;
436 +@@ -3464,42 +3593,22 @@ process_slot:
437 + size -= skip + trim;
438 + datal -= skip + trim;
439 +
440 +- aligned_end = ALIGN(new_key.offset + datal,
441 +- root->sectorsize);
442 +- ret = btrfs_drop_extents(trans, root, inode,
443 +- drop_start,
444 +- aligned_end,
445 +- 1);
446 ++ ret = clone_copy_inline_extent(src, inode,
447 ++ trans, path,
448 ++ &new_key,
449 ++ drop_start,
450 ++ datal,
451 ++ skip, size, buf);
452 + if (ret) {
453 + if (ret != -EOPNOTSUPP)
454 + btrfs_abort_transaction(trans,
455 +- root, ret);
456 +- btrfs_end_transaction(trans, root);
457 +- goto out;
458 +- }
459 +-
460 +- ret = btrfs_insert_empty_item(trans, root, path,
461 +- &new_key, size);
462 +- if (ret) {
463 +- btrfs_abort_transaction(trans, root,
464 +- ret);
465 ++ root,
466 ++ ret);
467 + btrfs_end_transaction(trans, root);
468 + goto out;
469 + }
470 +-
471 +- if (skip) {
472 +- u32 start =
473 +- btrfs_file_extent_calc_inline_size(0);
474 +- memmove(buf+start, buf+start+skip,
475 +- datal);
476 +- }
477 +-
478 + leaf = path->nodes[0];
479 + slot = path->slots[0];
480 +- write_extent_buffer(leaf, buf,
481 +- btrfs_item_ptr_offset(leaf, slot),
482 +- size);
483 +- inode_add_bytes(inode, datal);
484 + }
485 +
486 + /* If we have an implicit hole (NO_HOLES feature). */
487 +diff --git a/fs/btrfs/xattr.c b/fs/btrfs/xattr.c
488 +index 6f518c90e1c1..1fcd7b6e7564 100644
489 +--- a/fs/btrfs/xattr.c
490 ++++ b/fs/btrfs/xattr.c
491 +@@ -313,8 +313,10 @@ ssize_t btrfs_listxattr(struct dentry *dentry, char *buffer, size_t size)
492 + /* check to make sure this item is what we want */
493 + if (found_key.objectid != key.objectid)
494 + break;
495 +- if (found_key.type != BTRFS_XATTR_ITEM_KEY)
496 ++ if (found_key.type > BTRFS_XATTR_ITEM_KEY)
497 + break;
498 ++ if (found_key.type < BTRFS_XATTR_ITEM_KEY)
499 ++ goto next;
500 +
501 + di = btrfs_item_ptr(leaf, slot, struct btrfs_dir_item);
502 + if (verify_dir_item(root, leaf, di))
503 +diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c
504 +index 84f37f34f9aa..1e99b29650a9 100644
505 +--- a/fs/ceph/mds_client.c
506 ++++ b/fs/ceph/mds_client.c
507 +@@ -1905,7 +1905,7 @@ static struct ceph_msg *create_request_message(struct ceph_mds_client *mdsc,
508 +
509 + len = sizeof(*head) +
510 + pathlen1 + pathlen2 + 2*(1 + sizeof(u32) + sizeof(u64)) +
511 +- sizeof(struct timespec);
512 ++ sizeof(struct ceph_timespec);
513 +
514 + /* calculate (max) length for cap releases */
515 + len += sizeof(struct ceph_mds_request_release) *
516 +diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c
517 +index 12756040ca20..8bec8f1e4b31 100644
518 +--- a/fs/debugfs/inode.c
519 ++++ b/fs/debugfs/inode.c
520 +@@ -276,8 +276,12 @@ static struct dentry *start_creating(const char *name, struct dentry *parent)
521 + dput(dentry);
522 + dentry = ERR_PTR(-EEXIST);
523 + }
524 +- if (IS_ERR(dentry))
525 ++
526 ++ if (IS_ERR(dentry)) {
527 + mutex_unlock(&d_inode(parent)->i_mutex);
528 ++ simple_release_fs(&debugfs_mount, &debugfs_mount_count);
529 ++ }
530 ++
531 + return dentry;
532 + }
533 +
534 +diff --git a/fs/ext4/ext4_jbd2.c b/fs/ext4/ext4_jbd2.c
535 +index d41843181818..e770c1ee4613 100644
536 +--- a/fs/ext4/ext4_jbd2.c
537 ++++ b/fs/ext4/ext4_jbd2.c
538 +@@ -88,13 +88,13 @@ int __ext4_journal_stop(const char *where, unsigned int line, handle_t *handle)
539 + return 0;
540 + }
541 +
542 ++ err = handle->h_err;
543 + if (!handle->h_transaction) {
544 +- err = jbd2_journal_stop(handle);
545 +- return handle->h_err ? handle->h_err : err;
546 ++ rc = jbd2_journal_stop(handle);
547 ++ return err ? err : rc;
548 + }
549 +
550 + sb = handle->h_transaction->t_journal->j_private;
551 +- err = handle->h_err;
552 + rc = jbd2_journal_stop(handle);
553 +
554 + if (!err)
555 +diff --git a/fs/ext4/page-io.c b/fs/ext4/page-io.c
556 +index 5765f88b3904..8082565c59a9 100644
557 +--- a/fs/ext4/page-io.c
558 ++++ b/fs/ext4/page-io.c
559 +@@ -426,6 +426,7 @@ int ext4_bio_write_page(struct ext4_io_submit *io,
560 + struct buffer_head *bh, *head;
561 + int ret = 0;
562 + int nr_submitted = 0;
563 ++ int nr_to_submit = 0;
564 +
565 + blocksize = 1 << inode->i_blkbits;
566 +
567 +@@ -478,11 +479,13 @@ int ext4_bio_write_page(struct ext4_io_submit *io,
568 + unmap_underlying_metadata(bh->b_bdev, bh->b_blocknr);
569 + }
570 + set_buffer_async_write(bh);
571 ++ nr_to_submit++;
572 + } while ((bh = bh->b_this_page) != head);
573 +
574 + bh = head = page_buffers(page);
575 +
576 +- if (ext4_encrypted_inode(inode) && S_ISREG(inode->i_mode)) {
577 ++ if (ext4_encrypted_inode(inode) && S_ISREG(inode->i_mode) &&
578 ++ nr_to_submit) {
579 + data_page = ext4_encrypt(inode, page);
580 + if (IS_ERR(data_page)) {
581 + ret = PTR_ERR(data_page);
582 +diff --git a/fs/ext4/super.c b/fs/ext4/super.c
583 +index ff89971e3ee0..8a3b9f14d198 100644
584 +--- a/fs/ext4/super.c
585 ++++ b/fs/ext4/super.c
586 +@@ -396,9 +396,13 @@ static void ext4_handle_error(struct super_block *sb)
587 + smp_wmb();
588 + sb->s_flags |= MS_RDONLY;
589 + }
590 +- if (test_opt(sb, ERRORS_PANIC))
591 ++ if (test_opt(sb, ERRORS_PANIC)) {
592 ++ if (EXT4_SB(sb)->s_journal &&
593 ++ !(EXT4_SB(sb)->s_journal->j_flags & JBD2_REC_ERR))
594 ++ return;
595 + panic("EXT4-fs (device %s): panic forced after error\n",
596 + sb->s_id);
597 ++ }
598 + }
599 +
600 + #define ext4_error_ratelimit(sb) \
601 +@@ -587,8 +591,12 @@ void __ext4_abort(struct super_block *sb, const char *function,
602 + jbd2_journal_abort(EXT4_SB(sb)->s_journal, -EIO);
603 + save_error_info(sb, function, line);
604 + }
605 +- if (test_opt(sb, ERRORS_PANIC))
606 ++ if (test_opt(sb, ERRORS_PANIC)) {
607 ++ if (EXT4_SB(sb)->s_journal &&
608 ++ !(EXT4_SB(sb)->s_journal->j_flags & JBD2_REC_ERR))
609 ++ return;
610 + panic("EXT4-fs panic from previous error\n");
611 ++ }
612 + }
613 +
614 + void __ext4_msg(struct super_block *sb,
615 +diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c
616 +index 7003c0925760..0469f32918a5 100644
617 +--- a/fs/jbd2/journal.c
618 ++++ b/fs/jbd2/journal.c
619 +@@ -2086,8 +2086,12 @@ static void __journal_abort_soft (journal_t *journal, int errno)
620 +
621 + __jbd2_journal_abort_hard(journal);
622 +
623 +- if (errno)
624 ++ if (errno) {
625 + jbd2_journal_update_sb_errno(journal);
626 ++ write_lock(&journal->j_state_lock);
627 ++ journal->j_flags |= JBD2_REC_ERR;
628 ++ write_unlock(&journal->j_state_lock);
629 ++ }
630 + }
631 +
632 + /**
633 +diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
634 +index 976ba792fbc6..7f22b6c6fb50 100644
635 +--- a/fs/nfs/inode.c
636 ++++ b/fs/nfs/inode.c
637 +@@ -1813,7 +1813,11 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr)
638 + if ((long)fattr->gencount - (long)nfsi->attr_gencount > 0)
639 + nfsi->attr_gencount = fattr->gencount;
640 + }
641 +- invalid &= ~NFS_INO_INVALID_ATTR;
642 ++
643 ++ /* Don't declare attrcache up to date if there were no attrs! */
644 ++ if (fattr->valid != 0)
645 ++ invalid &= ~NFS_INO_INVALID_ATTR;
646 ++
647 + /* Don't invalidate the data if we were to blame */
648 + if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode)
649 + || S_ISLNK(inode->i_mode)))
650 +diff --git a/fs/nfs/nfs4client.c b/fs/nfs/nfs4client.c
651 +index e42be52a8c18..5dea913baf46 100644
652 +--- a/fs/nfs/nfs4client.c
653 ++++ b/fs/nfs/nfs4client.c
654 +@@ -33,7 +33,7 @@ static int nfs_get_cb_ident_idr(struct nfs_client *clp, int minorversion)
655 + return ret;
656 + idr_preload(GFP_KERNEL);
657 + spin_lock(&nn->nfs_client_lock);
658 +- ret = idr_alloc(&nn->cb_ident_idr, clp, 0, 0, GFP_NOWAIT);
659 ++ ret = idr_alloc(&nn->cb_ident_idr, clp, 1, 0, GFP_NOWAIT);
660 + if (ret >= 0)
661 + clp->cl_cb_ident = ret;
662 + spin_unlock(&nn->nfs_client_lock);
663 +diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
664 +index 397798368b1a..bb6c324f1f3d 100644
665 +--- a/fs/nfsd/nfs4state.c
666 ++++ b/fs/nfsd/nfs4state.c
667 +@@ -765,16 +765,68 @@ void nfs4_unhash_stid(struct nfs4_stid *s)
668 + s->sc_type = 0;
669 + }
670 +
671 +-static void
672 ++/**
673 ++ * nfs4_get_existing_delegation - Discover if this delegation already exists
674 ++ * @clp: a pointer to the nfs4_client we're granting a delegation to
675 ++ * @fp: a pointer to the nfs4_file we're granting a delegation on
676 ++ *
677 ++ * Return:
678 ++ * On success: NULL if an existing delegation was not found.
679 ++ *
680 ++ * On error: -EAGAIN if one was previously granted to this nfs4_client
681 ++ * for this nfs4_file.
682 ++ *
683 ++ */
684 ++
685 ++static int
686 ++nfs4_get_existing_delegation(struct nfs4_client *clp, struct nfs4_file *fp)
687 ++{
688 ++ struct nfs4_delegation *searchdp = NULL;
689 ++ struct nfs4_client *searchclp = NULL;
690 ++
691 ++ lockdep_assert_held(&state_lock);
692 ++ lockdep_assert_held(&fp->fi_lock);
693 ++
694 ++ list_for_each_entry(searchdp, &fp->fi_delegations, dl_perfile) {
695 ++ searchclp = searchdp->dl_stid.sc_client;
696 ++ if (clp == searchclp) {
697 ++ return -EAGAIN;
698 ++ }
699 ++ }
700 ++ return 0;
701 ++}
702 ++
703 ++/**
704 ++ * hash_delegation_locked - Add a delegation to the appropriate lists
705 ++ * @dp: a pointer to the nfs4_delegation we are adding.
706 ++ * @fp: a pointer to the nfs4_file we're granting a delegation on
707 ++ *
708 ++ * Return:
709 ++ * On success: NULL if the delegation was successfully hashed.
710 ++ *
711 ++ * On error: -EAGAIN if one was previously granted to this
712 ++ * nfs4_client for this nfs4_file. Delegation is not hashed.
713 ++ *
714 ++ */
715 ++
716 ++static int
717 + hash_delegation_locked(struct nfs4_delegation *dp, struct nfs4_file *fp)
718 + {
719 ++ int status;
720 ++ struct nfs4_client *clp = dp->dl_stid.sc_client;
721 ++
722 + lockdep_assert_held(&state_lock);
723 + lockdep_assert_held(&fp->fi_lock);
724 +
725 ++ status = nfs4_get_existing_delegation(clp, fp);
726 ++ if (status)
727 ++ return status;
728 ++ ++fp->fi_delegees;
729 + atomic_inc(&dp->dl_stid.sc_count);
730 + dp->dl_stid.sc_type = NFS4_DELEG_STID;
731 + list_add(&dp->dl_perfile, &fp->fi_delegations);
732 +- list_add(&dp->dl_perclnt, &dp->dl_stid.sc_client->cl_delegations);
733 ++ list_add(&dp->dl_perclnt, &clp->cl_delegations);
734 ++ return 0;
735 + }
736 +
737 + static bool
738 +@@ -3351,6 +3403,7 @@ static void init_open_stateid(struct nfs4_ol_stateid *stp, struct nfs4_file *fp,
739 + stp->st_access_bmap = 0;
740 + stp->st_deny_bmap = 0;
741 + stp->st_openstp = NULL;
742 ++ init_rwsem(&stp->st_rwsem);
743 + spin_lock(&oo->oo_owner.so_client->cl_lock);
744 + list_add(&stp->st_perstateowner, &oo->oo_owner.so_stateids);
745 + spin_lock(&fp->fi_lock);
746 +@@ -3940,6 +3993,18 @@ static struct file_lock *nfs4_alloc_init_lease(struct nfs4_file *fp, int flag)
747 + return fl;
748 + }
749 +
750 ++/**
751 ++ * nfs4_setlease - Obtain a delegation by requesting lease from vfs layer
752 ++ * @dp: a pointer to the nfs4_delegation we're adding.
753 ++ *
754 ++ * Return:
755 ++ * On success: Return code will be 0 on success.
756 ++ *
757 ++ * On error: -EAGAIN if there was an existing delegation.
758 ++ * nonzero if there is an error in other cases.
759 ++ *
760 ++ */
761 ++
762 + static int nfs4_setlease(struct nfs4_delegation *dp)
763 + {
764 + struct nfs4_file *fp = dp->dl_stid.sc_file;
765 +@@ -3971,16 +4036,19 @@ static int nfs4_setlease(struct nfs4_delegation *dp)
766 + goto out_unlock;
767 + /* Race breaker */
768 + if (fp->fi_deleg_file) {
769 +- status = 0;
770 +- ++fp->fi_delegees;
771 +- hash_delegation_locked(dp, fp);
772 ++ status = hash_delegation_locked(dp, fp);
773 + goto out_unlock;
774 + }
775 + fp->fi_deleg_file = filp;
776 +- fp->fi_delegees = 1;
777 +- hash_delegation_locked(dp, fp);
778 ++ fp->fi_delegees = 0;
779 ++ status = hash_delegation_locked(dp, fp);
780 + spin_unlock(&fp->fi_lock);
781 + spin_unlock(&state_lock);
782 ++ if (status) {
783 ++ /* Should never happen, this is a new fi_deleg_file */
784 ++ WARN_ON_ONCE(1);
785 ++ goto out_fput;
786 ++ }
787 + return 0;
788 + out_unlock:
789 + spin_unlock(&fp->fi_lock);
790 +@@ -4000,6 +4068,15 @@ nfs4_set_delegation(struct nfs4_client *clp, struct svc_fh *fh,
791 + if (fp->fi_had_conflict)
792 + return ERR_PTR(-EAGAIN);
793 +
794 ++ spin_lock(&state_lock);
795 ++ spin_lock(&fp->fi_lock);
796 ++ status = nfs4_get_existing_delegation(clp, fp);
797 ++ spin_unlock(&fp->fi_lock);
798 ++ spin_unlock(&state_lock);
799 ++
800 ++ if (status)
801 ++ return ERR_PTR(status);
802 ++
803 + dp = alloc_init_deleg(clp, fh, odstate);
804 + if (!dp)
805 + return ERR_PTR(-ENOMEM);
806 +@@ -4018,9 +4095,7 @@ nfs4_set_delegation(struct nfs4_client *clp, struct svc_fh *fh,
807 + status = -EAGAIN;
808 + goto out_unlock;
809 + }
810 +- ++fp->fi_delegees;
811 +- hash_delegation_locked(dp, fp);
812 +- status = 0;
813 ++ status = hash_delegation_locked(dp, fp);
814 + out_unlock:
815 + spin_unlock(&fp->fi_lock);
816 + spin_unlock(&state_lock);
817 +@@ -4181,15 +4256,20 @@ nfsd4_process_open2(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nf
818 + */
819 + if (stp) {
820 + /* Stateid was found, this is an OPEN upgrade */
821 ++ down_read(&stp->st_rwsem);
822 + status = nfs4_upgrade_open(rqstp, fp, current_fh, stp, open);
823 +- if (status)
824 ++ if (status) {
825 ++ up_read(&stp->st_rwsem);
826 + goto out;
827 ++ }
828 + } else {
829 + stp = open->op_stp;
830 + open->op_stp = NULL;
831 + init_open_stateid(stp, fp, open);
832 ++ down_read(&stp->st_rwsem);
833 + status = nfs4_get_vfs_file(rqstp, fp, current_fh, stp, open);
834 + if (status) {
835 ++ up_read(&stp->st_rwsem);
836 + release_open_stateid(stp);
837 + goto out;
838 + }
839 +@@ -4201,6 +4281,7 @@ nfsd4_process_open2(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nf
840 + }
841 + update_stateid(&stp->st_stid.sc_stateid);
842 + memcpy(&open->op_stateid, &stp->st_stid.sc_stateid, sizeof(stateid_t));
843 ++ up_read(&stp->st_rwsem);
844 +
845 + if (nfsd4_has_session(&resp->cstate)) {
846 + if (open->op_deleg_want & NFS4_SHARE_WANT_NO_DELEG) {
847 +@@ -4777,10 +4858,13 @@ static __be32 nfs4_seqid_op_checks(struct nfsd4_compound_state *cstate, stateid_
848 + * revoked delegations are kept only for free_stateid.
849 + */
850 + return nfserr_bad_stateid;
851 ++ down_write(&stp->st_rwsem);
852 + status = check_stateid_generation(stateid, &stp->st_stid.sc_stateid, nfsd4_has_session(cstate));
853 +- if (status)
854 +- return status;
855 +- return nfs4_check_fh(current_fh, &stp->st_stid);
856 ++ if (status == nfs_ok)
857 ++ status = nfs4_check_fh(current_fh, &stp->st_stid);
858 ++ if (status != nfs_ok)
859 ++ up_write(&stp->st_rwsem);
860 ++ return status;
861 + }
862 +
863 + /*
864 +@@ -4827,6 +4911,7 @@ static __be32 nfs4_preprocess_confirmed_seqid_op(struct nfsd4_compound_state *cs
865 + return status;
866 + oo = openowner(stp->st_stateowner);
867 + if (!(oo->oo_flags & NFS4_OO_CONFIRMED)) {
868 ++ up_write(&stp->st_rwsem);
869 + nfs4_put_stid(&stp->st_stid);
870 + return nfserr_bad_stateid;
871 + }
872 +@@ -4857,11 +4942,14 @@ nfsd4_open_confirm(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
873 + goto out;
874 + oo = openowner(stp->st_stateowner);
875 + status = nfserr_bad_stateid;
876 +- if (oo->oo_flags & NFS4_OO_CONFIRMED)
877 ++ if (oo->oo_flags & NFS4_OO_CONFIRMED) {
878 ++ up_write(&stp->st_rwsem);
879 + goto put_stateid;
880 ++ }
881 + oo->oo_flags |= NFS4_OO_CONFIRMED;
882 + update_stateid(&stp->st_stid.sc_stateid);
883 + memcpy(&oc->oc_resp_stateid, &stp->st_stid.sc_stateid, sizeof(stateid_t));
884 ++ up_write(&stp->st_rwsem);
885 + dprintk("NFSD: %s: success, seqid=%d stateid=" STATEID_FMT "\n",
886 + __func__, oc->oc_seqid, STATEID_VAL(&stp->st_stid.sc_stateid));
887 +
888 +@@ -4940,6 +5028,7 @@ nfsd4_open_downgrade(struct svc_rqst *rqstp,
889 + memcpy(&od->od_stateid, &stp->st_stid.sc_stateid, sizeof(stateid_t));
890 + status = nfs_ok;
891 + put_stateid:
892 ++ up_write(&stp->st_rwsem);
893 + nfs4_put_stid(&stp->st_stid);
894 + out:
895 + nfsd4_bump_seqid(cstate, status);
896 +@@ -4993,6 +5082,7 @@ nfsd4_close(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
897 + goto out;
898 + update_stateid(&stp->st_stid.sc_stateid);
899 + memcpy(&close->cl_stateid, &stp->st_stid.sc_stateid, sizeof(stateid_t));
900 ++ up_write(&stp->st_rwsem);
901 +
902 + nfsd4_close_open_stateid(stp);
903 +
904 +@@ -5223,6 +5313,7 @@ init_lock_stateid(struct nfs4_ol_stateid *stp, struct nfs4_lockowner *lo,
905 + stp->st_access_bmap = 0;
906 + stp->st_deny_bmap = open_stp->st_deny_bmap;
907 + stp->st_openstp = open_stp;
908 ++ init_rwsem(&stp->st_rwsem);
909 + list_add(&stp->st_locks, &open_stp->st_locks);
910 + list_add(&stp->st_perstateowner, &lo->lo_owner.so_stateids);
911 + spin_lock(&fp->fi_lock);
912 +@@ -5391,6 +5482,7 @@ nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
913 + &open_stp, nn);
914 + if (status)
915 + goto out;
916 ++ up_write(&open_stp->st_rwsem);
917 + open_sop = openowner(open_stp->st_stateowner);
918 + status = nfserr_bad_stateid;
919 + if (!same_clid(&open_sop->oo_owner.so_client->cl_clientid,
920 +@@ -5398,6 +5490,8 @@ nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
921 + goto out;
922 + status = lookup_or_create_lock_state(cstate, open_stp, lock,
923 + &lock_stp, &new);
924 ++ if (status == nfs_ok)
925 ++ down_write(&lock_stp->st_rwsem);
926 + } else {
927 + status = nfs4_preprocess_seqid_op(cstate,
928 + lock->lk_old_lock_seqid,
929 +@@ -5503,6 +5597,8 @@ out:
930 + seqid_mutating_err(ntohl(status)))
931 + lock_sop->lo_owner.so_seqid++;
932 +
933 ++ up_write(&lock_stp->st_rwsem);
934 ++
935 + /*
936 + * If this is a new, never-before-used stateid, and we are
937 + * returning an error, then just go ahead and release it.
938 +@@ -5673,6 +5769,7 @@ nfsd4_locku(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
939 + fput:
940 + fput(filp);
941 + put_stateid:
942 ++ up_write(&stp->st_rwsem);
943 + nfs4_put_stid(&stp->st_stid);
944 + out:
945 + nfsd4_bump_seqid(cstate, status);
946 +diff --git a/fs/nfsd/state.h b/fs/nfsd/state.h
947 +index dbc4f85a5008..67685b6cfef3 100644
948 +--- a/fs/nfsd/state.h
949 ++++ b/fs/nfsd/state.h
950 +@@ -533,15 +533,16 @@ struct nfs4_file {
951 + * Better suggestions welcome.
952 + */
953 + struct nfs4_ol_stateid {
954 +- struct nfs4_stid st_stid; /* must be first field */
955 +- struct list_head st_perfile;
956 +- struct list_head st_perstateowner;
957 +- struct list_head st_locks;
958 +- struct nfs4_stateowner * st_stateowner;
959 +- struct nfs4_clnt_odstate * st_clnt_odstate;
960 +- unsigned char st_access_bmap;
961 +- unsigned char st_deny_bmap;
962 +- struct nfs4_ol_stateid * st_openstp;
963 ++ struct nfs4_stid st_stid;
964 ++ struct list_head st_perfile;
965 ++ struct list_head st_perstateowner;
966 ++ struct list_head st_locks;
967 ++ struct nfs4_stateowner *st_stateowner;
968 ++ struct nfs4_clnt_odstate *st_clnt_odstate;
969 ++ unsigned char st_access_bmap;
970 ++ unsigned char st_deny_bmap;
971 ++ struct nfs4_ol_stateid *st_openstp;
972 ++ struct rw_semaphore st_rwsem;
973 + };
974 +
975 + static inline struct nfs4_ol_stateid *openlockstateid(struct nfs4_stid *s)
976 +diff --git a/fs/ocfs2/namei.c b/fs/ocfs2/namei.c
977 +index 176fe6afd94e..4d5e0a573f4f 100644
978 +--- a/fs/ocfs2/namei.c
979 ++++ b/fs/ocfs2/namei.c
980 +@@ -365,6 +365,8 @@ static int ocfs2_mknod(struct inode *dir,
981 + mlog_errno(status);
982 + goto leave;
983 + }
984 ++ /* update inode->i_mode after mask with "umask". */
985 ++ inode->i_mode = mode;
986 +
987 + handle = ocfs2_start_trans(osb, ocfs2_mknod_credits(osb->sb,
988 + S_ISDIR(mode),
989 +diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h
990 +index 82806c60aa42..e4b464983322 100644
991 +--- a/include/linux/ipv6.h
992 ++++ b/include/linux/ipv6.h
993 +@@ -224,7 +224,7 @@ struct ipv6_pinfo {
994 + struct ipv6_ac_socklist *ipv6_ac_list;
995 + struct ipv6_fl_socklist __rcu *ipv6_fl_list;
996 +
997 +- struct ipv6_txoptions *opt;
998 ++ struct ipv6_txoptions __rcu *opt;
999 + struct sk_buff *pktoptions;
1000 + struct sk_buff *rxpmtu;
1001 + struct inet6_cork cork;
1002 +diff --git a/include/linux/jbd2.h b/include/linux/jbd2.h
1003 +index eb1cebed3f36..c90c9b70e568 100644
1004 +--- a/include/linux/jbd2.h
1005 ++++ b/include/linux/jbd2.h
1006 +@@ -1007,6 +1007,7 @@ struct journal_s
1007 + #define JBD2_ABORT_ON_SYNCDATA_ERR 0x040 /* Abort the journal on file
1008 + * data write error in ordered
1009 + * mode */
1010 ++#define JBD2_REC_ERR 0x080 /* The errno in the sb has been recorded */
1011 +
1012 + /*
1013 + * Function declarations for the journaling transaction and buffer
1014 +diff --git a/include/net/af_unix.h b/include/net/af_unix.h
1015 +index dfe4ddfbb43c..e830c3dff61a 100644
1016 +--- a/include/net/af_unix.h
1017 ++++ b/include/net/af_unix.h
1018 +@@ -63,6 +63,7 @@ struct unix_sock {
1019 + #define UNIX_GC_CANDIDATE 0
1020 + #define UNIX_GC_MAYBE_CYCLE 1
1021 + struct socket_wq peer_wq;
1022 ++ wait_queue_t peer_wake;
1023 + };
1024 +
1025 + static inline struct unix_sock *unix_sk(struct sock *sk)
1026 +diff --git a/include/net/ip6_tunnel.h b/include/net/ip6_tunnel.h
1027 +index b8529aa1dae7..b0f7445c0fdc 100644
1028 +--- a/include/net/ip6_tunnel.h
1029 ++++ b/include/net/ip6_tunnel.h
1030 +@@ -83,11 +83,12 @@ static inline void ip6tunnel_xmit(struct sock *sk, struct sk_buff *skb,
1031 + err = ip6_local_out_sk(sk, skb);
1032 +
1033 + if (net_xmit_eval(err) == 0) {
1034 +- struct pcpu_sw_netstats *tstats = this_cpu_ptr(dev->tstats);
1035 ++ struct pcpu_sw_netstats *tstats = get_cpu_ptr(dev->tstats);
1036 + u64_stats_update_begin(&tstats->syncp);
1037 + tstats->tx_bytes += pkt_len;
1038 + tstats->tx_packets++;
1039 + u64_stats_update_end(&tstats->syncp);
1040 ++ put_cpu_ptr(tstats);
1041 + } else {
1042 + stats->tx_errors++;
1043 + stats->tx_aborted_errors++;
1044 +diff --git a/include/net/ip_tunnels.h b/include/net/ip_tunnels.h
1045 +index d8214cb88bbc..9c2897e56ee1 100644
1046 +--- a/include/net/ip_tunnels.h
1047 ++++ b/include/net/ip_tunnels.h
1048 +@@ -207,12 +207,13 @@ static inline void iptunnel_xmit_stats(int err,
1049 + struct pcpu_sw_netstats __percpu *stats)
1050 + {
1051 + if (err > 0) {
1052 +- struct pcpu_sw_netstats *tstats = this_cpu_ptr(stats);
1053 ++ struct pcpu_sw_netstats *tstats = get_cpu_ptr(stats);
1054 +
1055 + u64_stats_update_begin(&tstats->syncp);
1056 + tstats->tx_bytes += err;
1057 + tstats->tx_packets++;
1058 + u64_stats_update_end(&tstats->syncp);
1059 ++ put_cpu_ptr(tstats);
1060 + } else if (err < 0) {
1061 + err_stats->tx_errors++;
1062 + err_stats->tx_aborted_errors++;
1063 +diff --git a/include/net/ipv6.h b/include/net/ipv6.h
1064 +index eec8ad3c9843..df555ecd4002 100644
1065 +--- a/include/net/ipv6.h
1066 ++++ b/include/net/ipv6.h
1067 +@@ -205,6 +205,7 @@ extern rwlock_t ip6_ra_lock;
1068 + */
1069 +
1070 + struct ipv6_txoptions {
1071 ++ atomic_t refcnt;
1072 + /* Length of this structure */
1073 + int tot_len;
1074 +
1075 +@@ -217,7 +218,7 @@ struct ipv6_txoptions {
1076 + struct ipv6_opt_hdr *dst0opt;
1077 + struct ipv6_rt_hdr *srcrt; /* Routing Header */
1078 + struct ipv6_opt_hdr *dst1opt;
1079 +-
1080 ++ struct rcu_head rcu;
1081 + /* Option buffer, as read by IPV6_PKTOPTIONS, starts here. */
1082 + };
1083 +
1084 +@@ -250,6 +251,24 @@ struct ipv6_fl_socklist {
1085 + struct rcu_head rcu;
1086 + };
1087 +
1088 ++static inline struct ipv6_txoptions *txopt_get(const struct ipv6_pinfo *np)
1089 ++{
1090 ++ struct ipv6_txoptions *opt;
1091 ++
1092 ++ rcu_read_lock();
1093 ++ opt = rcu_dereference(np->opt);
1094 ++ if (opt && !atomic_inc_not_zero(&opt->refcnt))
1095 ++ opt = NULL;
1096 ++ rcu_read_unlock();
1097 ++ return opt;
1098 ++}
1099 ++
1100 ++static inline void txopt_put(struct ipv6_txoptions *opt)
1101 ++{
1102 ++ if (opt && atomic_dec_and_test(&opt->refcnt))
1103 ++ kfree_rcu(opt, rcu);
1104 ++}
1105 ++
1106 + struct ip6_flowlabel *fl6_sock_lookup(struct sock *sk, __be32 label);
1107 + struct ipv6_txoptions *fl6_merge_options(struct ipv6_txoptions *opt_space,
1108 + struct ip6_flowlabel *fl,
1109 +@@ -488,6 +507,7 @@ struct ip6_create_arg {
1110 + u32 user;
1111 + const struct in6_addr *src;
1112 + const struct in6_addr *dst;
1113 ++ int iif;
1114 + u8 ecn;
1115 + };
1116 +
1117 +diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
1118 +index 6d778efcfdfd..080b657ef8fb 100644
1119 +--- a/include/net/sch_generic.h
1120 ++++ b/include/net/sch_generic.h
1121 +@@ -61,6 +61,9 @@ struct Qdisc {
1122 + */
1123 + #define TCQ_F_WARN_NONWC (1 << 16)
1124 + #define TCQ_F_CPUSTATS 0x20 /* run using percpu statistics */
1125 ++#define TCQ_F_NOPARENT 0x40 /* root of its hierarchy :
1126 ++ * qdisc_tree_decrease_qlen() should stop.
1127 ++ */
1128 + u32 limit;
1129 + const struct Qdisc_ops *ops;
1130 + struct qdisc_size_table __rcu *stab;
1131 +diff --git a/kernel/bpf/arraymap.c b/kernel/bpf/arraymap.c
1132 +index 8a6616583f38..1c1b8ab34037 100644
1133 +--- a/kernel/bpf/arraymap.c
1134 ++++ b/kernel/bpf/arraymap.c
1135 +@@ -109,7 +109,7 @@ static int array_map_update_elem(struct bpf_map *map, void *key, void *value,
1136 + /* all elements already exist */
1137 + return -EEXIST;
1138 +
1139 +- memcpy(array->value + array->elem_size * index, value, array->elem_size);
1140 ++ memcpy(array->value + array->elem_size * index, value, map->value_size);
1141 + return 0;
1142 + }
1143 +
1144 +diff --git a/net/core/neighbour.c b/net/core/neighbour.c
1145 +index 2237c1b3cdd2..d6e8cfcb6f7c 100644
1146 +--- a/net/core/neighbour.c
1147 ++++ b/net/core/neighbour.c
1148 +@@ -2207,7 +2207,7 @@ static int pneigh_fill_info(struct sk_buff *skb, struct pneigh_entry *pn,
1149 + ndm->ndm_pad2 = 0;
1150 + ndm->ndm_flags = pn->flags | NTF_PROXY;
1151 + ndm->ndm_type = RTN_UNICAST;
1152 +- ndm->ndm_ifindex = pn->dev->ifindex;
1153 ++ ndm->ndm_ifindex = pn->dev ? pn->dev->ifindex : 0;
1154 + ndm->ndm_state = NUD_NONE;
1155 +
1156 + if (nla_put(skb, NDA_DST, tbl->key_len, pn->key))
1157 +@@ -2282,7 +2282,7 @@ static int pneigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb,
1158 + if (h > s_h)
1159 + s_idx = 0;
1160 + for (n = tbl->phash_buckets[h], idx = 0; n; n = n->next) {
1161 +- if (dev_net(n->dev) != net)
1162 ++ if (pneigh_net(n) != net)
1163 + continue;
1164 + if (idx < s_idx)
1165 + goto next;
1166 +diff --git a/net/core/scm.c b/net/core/scm.c
1167 +index 3b6899b7d810..8a1741b14302 100644
1168 +--- a/net/core/scm.c
1169 ++++ b/net/core/scm.c
1170 +@@ -305,6 +305,8 @@ void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm)
1171 + err = put_user(cmlen, &cm->cmsg_len);
1172 + if (!err) {
1173 + cmlen = CMSG_SPACE(i*sizeof(int));
1174 ++ if (msg->msg_controllen < cmlen)
1175 ++ cmlen = msg->msg_controllen;
1176 + msg->msg_control += cmlen;
1177 + msg->msg_controllen -= cmlen;
1178 + }
1179 +diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c
1180 +index 5165571f397a..a0490508d213 100644
1181 +--- a/net/dccp/ipv6.c
1182 ++++ b/net/dccp/ipv6.c
1183 +@@ -202,7 +202,9 @@ static int dccp_v6_send_response(struct sock *sk, struct request_sock *req)
1184 + security_req_classify_flow(req, flowi6_to_flowi(&fl6));
1185 +
1186 +
1187 +- final_p = fl6_update_dst(&fl6, np->opt, &final);
1188 ++ rcu_read_lock();
1189 ++ final_p = fl6_update_dst(&fl6, rcu_dereference(np->opt), &final);
1190 ++ rcu_read_unlock();
1191 +
1192 + dst = ip6_dst_lookup_flow(sk, &fl6, final_p);
1193 + if (IS_ERR(dst)) {
1194 +@@ -219,7 +221,10 @@ static int dccp_v6_send_response(struct sock *sk, struct request_sock *req)
1195 + &ireq->ir_v6_loc_addr,
1196 + &ireq->ir_v6_rmt_addr);
1197 + fl6.daddr = ireq->ir_v6_rmt_addr;
1198 +- err = ip6_xmit(sk, skb, &fl6, np->opt, np->tclass);
1199 ++ rcu_read_lock();
1200 ++ err = ip6_xmit(sk, skb, &fl6, rcu_dereference(np->opt),
1201 ++ np->tclass);
1202 ++ rcu_read_unlock();
1203 + err = net_xmit_eval(err);
1204 + }
1205 +
1206 +@@ -415,6 +420,7 @@ static struct sock *dccp_v6_request_recv_sock(struct sock *sk,
1207 + {
1208 + struct inet_request_sock *ireq = inet_rsk(req);
1209 + struct ipv6_pinfo *newnp, *np = inet6_sk(sk);
1210 ++ struct ipv6_txoptions *opt;
1211 + struct inet_sock *newinet;
1212 + struct dccp6_sock *newdp6;
1213 + struct sock *newsk;
1214 +@@ -534,13 +540,15 @@ static struct sock *dccp_v6_request_recv_sock(struct sock *sk,
1215 + * Yes, keeping reference count would be much more clever, but we make
1216 + * one more one thing there: reattach optmem to newsk.
1217 + */
1218 +- if (np->opt != NULL)
1219 +- newnp->opt = ipv6_dup_options(newsk, np->opt);
1220 +-
1221 ++ opt = rcu_dereference(np->opt);
1222 ++ if (opt) {
1223 ++ opt = ipv6_dup_options(newsk, opt);
1224 ++ RCU_INIT_POINTER(newnp->opt, opt);
1225 ++ }
1226 + inet_csk(newsk)->icsk_ext_hdr_len = 0;
1227 +- if (newnp->opt != NULL)
1228 +- inet_csk(newsk)->icsk_ext_hdr_len = (newnp->opt->opt_nflen +
1229 +- newnp->opt->opt_flen);
1230 ++ if (opt)
1231 ++ inet_csk(newsk)->icsk_ext_hdr_len = opt->opt_nflen +
1232 ++ opt->opt_flen;
1233 +
1234 + dccp_sync_mss(newsk, dst_mtu(dst));
1235 +
1236 +@@ -793,6 +801,7 @@ static int dccp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
1237 + struct ipv6_pinfo *np = inet6_sk(sk);
1238 + struct dccp_sock *dp = dccp_sk(sk);
1239 + struct in6_addr *saddr = NULL, *final_p, final;
1240 ++ struct ipv6_txoptions *opt;
1241 + struct flowi6 fl6;
1242 + struct dst_entry *dst;
1243 + int addr_type;
1244 +@@ -892,7 +901,8 @@ static int dccp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
1245 + fl6.fl6_sport = inet->inet_sport;
1246 + security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
1247 +
1248 +- final_p = fl6_update_dst(&fl6, np->opt, &final);
1249 ++ opt = rcu_dereference_protected(np->opt, sock_owned_by_user(sk));
1250 ++ final_p = fl6_update_dst(&fl6, opt, &final);
1251 +
1252 + dst = ip6_dst_lookup_flow(sk, &fl6, final_p);
1253 + if (IS_ERR(dst)) {
1254 +@@ -912,9 +922,8 @@ static int dccp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
1255 + __ip6_dst_store(sk, dst, NULL, NULL);
1256 +
1257 + icsk->icsk_ext_hdr_len = 0;
1258 +- if (np->opt != NULL)
1259 +- icsk->icsk_ext_hdr_len = (np->opt->opt_flen +
1260 +- np->opt->opt_nflen);
1261 ++ if (opt)
1262 ++ icsk->icsk_ext_hdr_len = opt->opt_flen + opt->opt_nflen;
1263 +
1264 + inet->inet_dport = usin->sin6_port;
1265 +
1266 +diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c
1267 +index df28693f32e1..c3bfebd501ed 100644
1268 +--- a/net/ipv4/ipmr.c
1269 ++++ b/net/ipv4/ipmr.c
1270 +@@ -134,7 +134,7 @@ static int __ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb,
1271 + struct mfc_cache *c, struct rtmsg *rtm);
1272 + static void mroute_netlink_event(struct mr_table *mrt, struct mfc_cache *mfc,
1273 + int cmd);
1274 +-static void mroute_clean_tables(struct mr_table *mrt);
1275 ++static void mroute_clean_tables(struct mr_table *mrt, bool all);
1276 + static void ipmr_expire_process(unsigned long arg);
1277 +
1278 + #ifdef CONFIG_IP_MROUTE_MULTIPLE_TABLES
1279 +@@ -351,7 +351,7 @@ static struct mr_table *ipmr_new_table(struct net *net, u32 id)
1280 + static void ipmr_free_table(struct mr_table *mrt)
1281 + {
1282 + del_timer_sync(&mrt->ipmr_expire_timer);
1283 +- mroute_clean_tables(mrt);
1284 ++ mroute_clean_tables(mrt, true);
1285 + kfree(mrt);
1286 + }
1287 +
1288 +@@ -1209,7 +1209,7 @@ static int ipmr_mfc_add(struct net *net, struct mr_table *mrt,
1289 + * Close the multicast socket, and clear the vif tables etc
1290 + */
1291 +
1292 +-static void mroute_clean_tables(struct mr_table *mrt)
1293 ++static void mroute_clean_tables(struct mr_table *mrt, bool all)
1294 + {
1295 + int i;
1296 + LIST_HEAD(list);
1297 +@@ -1218,8 +1218,9 @@ static void mroute_clean_tables(struct mr_table *mrt)
1298 + /* Shut down all active vif entries */
1299 +
1300 + for (i = 0; i < mrt->maxvif; i++) {
1301 +- if (!(mrt->vif_table[i].flags & VIFF_STATIC))
1302 +- vif_delete(mrt, i, 0, &list);
1303 ++ if (!all && (mrt->vif_table[i].flags & VIFF_STATIC))
1304 ++ continue;
1305 ++ vif_delete(mrt, i, 0, &list);
1306 + }
1307 + unregister_netdevice_many(&list);
1308 +
1309 +@@ -1227,7 +1228,7 @@ static void mroute_clean_tables(struct mr_table *mrt)
1310 +
1311 + for (i = 0; i < MFC_LINES; i++) {
1312 + list_for_each_entry_safe(c, next, &mrt->mfc_cache_array[i], list) {
1313 +- if (c->mfc_flags & MFC_STATIC)
1314 ++ if (!all && (c->mfc_flags & MFC_STATIC))
1315 + continue;
1316 + list_del_rcu(&c->list);
1317 + mroute_netlink_event(mrt, c, RTM_DELROUTE);
1318 +@@ -1262,7 +1263,7 @@ static void mrtsock_destruct(struct sock *sk)
1319 + NETCONFA_IFINDEX_ALL,
1320 + net->ipv4.devconf_all);
1321 + RCU_INIT_POINTER(mrt->mroute_sk, NULL);
1322 +- mroute_clean_tables(mrt);
1323 ++ mroute_clean_tables(mrt, false);
1324 + }
1325 + }
1326 + rtnl_unlock();
1327 +diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
1328 +index c9ab964189a0..87463c814896 100644
1329 +--- a/net/ipv4/tcp_input.c
1330 ++++ b/net/ipv4/tcp_input.c
1331 +@@ -4438,19 +4438,34 @@ static int __must_check tcp_queue_rcv(struct sock *sk, struct sk_buff *skb, int
1332 + int tcp_send_rcvq(struct sock *sk, struct msghdr *msg, size_t size)
1333 + {
1334 + struct sk_buff *skb;
1335 ++ int err = -ENOMEM;
1336 ++ int data_len = 0;
1337 + bool fragstolen;
1338 +
1339 + if (size == 0)
1340 + return 0;
1341 +
1342 +- skb = alloc_skb(size, sk->sk_allocation);
1343 ++ if (size > PAGE_SIZE) {
1344 ++ int npages = min_t(size_t, size >> PAGE_SHIFT, MAX_SKB_FRAGS);
1345 ++
1346 ++ data_len = npages << PAGE_SHIFT;
1347 ++ size = data_len + (size & ~PAGE_MASK);
1348 ++ }
1349 ++ skb = alloc_skb_with_frags(size - data_len, data_len,
1350 ++ PAGE_ALLOC_COSTLY_ORDER,
1351 ++ &err, sk->sk_allocation);
1352 + if (!skb)
1353 + goto err;
1354 +
1355 ++ skb_put(skb, size - data_len);
1356 ++ skb->data_len = data_len;
1357 ++ skb->len = size;
1358 ++
1359 + if (tcp_try_rmem_schedule(sk, skb, skb->truesize))
1360 + goto err_free;
1361 +
1362 +- if (memcpy_from_msg(skb_put(skb, size), msg, size))
1363 ++ err = skb_copy_datagram_from_iter(skb, 0, &msg->msg_iter, size);
1364 ++ if (err)
1365 + goto err_free;
1366 +
1367 + TCP_SKB_CB(skb)->seq = tcp_sk(sk)->rcv_nxt;
1368 +@@ -4466,7 +4481,8 @@ int tcp_send_rcvq(struct sock *sk, struct msghdr *msg, size_t size)
1369 + err_free:
1370 + kfree_skb(skb);
1371 + err:
1372 +- return -ENOMEM;
1373 ++ return err;
1374 ++
1375 + }
1376 +
1377 + static void tcp_data_queue(struct sock *sk, struct sk_buff *skb)
1378 +@@ -5622,6 +5638,7 @@ discard:
1379 + }
1380 +
1381 + tp->rcv_nxt = TCP_SKB_CB(skb)->seq + 1;
1382 ++ tp->copied_seq = tp->rcv_nxt;
1383 + tp->rcv_wup = TCP_SKB_CB(skb)->seq + 1;
1384 +
1385 + /* RFC1323: The window in SYN & SYN/ACK segments is
1386 +diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
1387 +index 441ca6f38981..88203e755af8 100644
1388 +--- a/net/ipv4/tcp_ipv4.c
1389 ++++ b/net/ipv4/tcp_ipv4.c
1390 +@@ -922,7 +922,8 @@ int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr,
1391 + }
1392 +
1393 + md5sig = rcu_dereference_protected(tp->md5sig_info,
1394 +- sock_owned_by_user(sk));
1395 ++ sock_owned_by_user(sk) ||
1396 ++ lockdep_is_held(&sk->sk_lock.slock));
1397 + if (!md5sig) {
1398 + md5sig = kmalloc(sizeof(*md5sig), gfp);
1399 + if (!md5sig)
1400 +diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
1401 +index 8c65dc147d8b..c8f97858d6f6 100644
1402 +--- a/net/ipv4/tcp_timer.c
1403 ++++ b/net/ipv4/tcp_timer.c
1404 +@@ -176,6 +176,18 @@ static int tcp_write_timeout(struct sock *sk)
1405 + syn_set = true;
1406 + } else {
1407 + if (retransmits_timed_out(sk, sysctl_tcp_retries1, 0, 0)) {
1408 ++ /* Some middle-boxes may black-hole Fast Open _after_
1409 ++ * the handshake. Therefore we conservatively disable
1410 ++ * Fast Open on this path on recurring timeouts with
1411 ++ * few or zero bytes acked after Fast Open.
1412 ++ */
1413 ++ if (tp->syn_data_acked &&
1414 ++ tp->bytes_acked <= tp->rx_opt.mss_clamp) {
1415 ++ tcp_fastopen_cache_set(sk, 0, NULL, true, 0);
1416 ++ if (icsk->icsk_retransmits == sysctl_tcp_retries1)
1417 ++ NET_INC_STATS_BH(sock_net(sk),
1418 ++ LINUX_MIB_TCPFASTOPENACTIVEFAIL);
1419 ++ }
1420 + /* Black hole detection */
1421 + tcp_mtu_probing(icsk, sk);
1422 +
1423 +diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c
1424 +index eef63b394c5a..2d044d2a2ccf 100644
1425 +--- a/net/ipv6/af_inet6.c
1426 ++++ b/net/ipv6/af_inet6.c
1427 +@@ -425,9 +425,11 @@ void inet6_destroy_sock(struct sock *sk)
1428 +
1429 + /* Free tx options */
1430 +
1431 +- opt = xchg(&np->opt, NULL);
1432 +- if (opt)
1433 +- sock_kfree_s(sk, opt, opt->tot_len);
1434 ++ opt = xchg((__force struct ipv6_txoptions **)&np->opt, NULL);
1435 ++ if (opt) {
1436 ++ atomic_sub(opt->tot_len, &sk->sk_omem_alloc);
1437 ++ txopt_put(opt);
1438 ++ }
1439 + }
1440 + EXPORT_SYMBOL_GPL(inet6_destroy_sock);
1441 +
1442 +@@ -656,7 +658,10 @@ int inet6_sk_rebuild_header(struct sock *sk)
1443 + fl6.fl6_sport = inet->inet_sport;
1444 + security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
1445 +
1446 +- final_p = fl6_update_dst(&fl6, np->opt, &final);
1447 ++ rcu_read_lock();
1448 ++ final_p = fl6_update_dst(&fl6, rcu_dereference(np->opt),
1449 ++ &final);
1450 ++ rcu_read_unlock();
1451 +
1452 + dst = ip6_dst_lookup_flow(sk, &fl6, final_p);
1453 + if (IS_ERR(dst)) {
1454 +diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c
1455 +index b10a88986a98..13ca4cf5616f 100644
1456 +--- a/net/ipv6/datagram.c
1457 ++++ b/net/ipv6/datagram.c
1458 +@@ -167,8 +167,10 @@ ipv4_connected:
1459 +
1460 + security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
1461 +
1462 +- opt = flowlabel ? flowlabel->opt : np->opt;
1463 ++ rcu_read_lock();
1464 ++ opt = flowlabel ? flowlabel->opt : rcu_dereference(np->opt);
1465 + final_p = fl6_update_dst(&fl6, opt, &final);
1466 ++ rcu_read_unlock();
1467 +
1468 + dst = ip6_dst_lookup_flow(sk, &fl6, final_p);
1469 + err = 0;
1470 +diff --git a/net/ipv6/exthdrs.c b/net/ipv6/exthdrs.c
1471 +index a7bbbe45570b..adbd6958c398 100644
1472 +--- a/net/ipv6/exthdrs.c
1473 ++++ b/net/ipv6/exthdrs.c
1474 +@@ -727,6 +727,7 @@ ipv6_dup_options(struct sock *sk, struct ipv6_txoptions *opt)
1475 + *((char **)&opt2->dst1opt) += dif;
1476 + if (opt2->srcrt)
1477 + *((char **)&opt2->srcrt) += dif;
1478 ++ atomic_set(&opt2->refcnt, 1);
1479 + }
1480 + return opt2;
1481 + }
1482 +@@ -790,7 +791,7 @@ ipv6_renew_options(struct sock *sk, struct ipv6_txoptions *opt,
1483 + return ERR_PTR(-ENOBUFS);
1484 +
1485 + memset(opt2, 0, tot_len);
1486 +-
1487 ++ atomic_set(&opt2->refcnt, 1);
1488 + opt2->tot_len = tot_len;
1489 + p = (char *)(opt2 + 1);
1490 +
1491 +diff --git a/net/ipv6/inet6_connection_sock.c b/net/ipv6/inet6_connection_sock.c
1492 +index 6927f3fb5597..9beed302eb36 100644
1493 +--- a/net/ipv6/inet6_connection_sock.c
1494 ++++ b/net/ipv6/inet6_connection_sock.c
1495 +@@ -77,7 +77,9 @@ struct dst_entry *inet6_csk_route_req(struct sock *sk,
1496 + memset(fl6, 0, sizeof(*fl6));
1497 + fl6->flowi6_proto = IPPROTO_TCP;
1498 + fl6->daddr = ireq->ir_v6_rmt_addr;
1499 +- final_p = fl6_update_dst(fl6, np->opt, &final);
1500 ++ rcu_read_lock();
1501 ++ final_p = fl6_update_dst(fl6, rcu_dereference(np->opt), &final);
1502 ++ rcu_read_unlock();
1503 + fl6->saddr = ireq->ir_v6_loc_addr;
1504 + fl6->flowi6_oif = ireq->ir_iif;
1505 + fl6->flowi6_mark = ireq->ir_mark;
1506 +@@ -207,7 +209,9 @@ static struct dst_entry *inet6_csk_route_socket(struct sock *sk,
1507 + fl6->fl6_dport = inet->inet_dport;
1508 + security_sk_classify_flow(sk, flowi6_to_flowi(fl6));
1509 +
1510 +- final_p = fl6_update_dst(fl6, np->opt, &final);
1511 ++ rcu_read_lock();
1512 ++ final_p = fl6_update_dst(fl6, rcu_dereference(np->opt), &final);
1513 ++ rcu_read_unlock();
1514 +
1515 + dst = __inet6_csk_dst_check(sk, np->dst_cookie);
1516 + if (!dst) {
1517 +@@ -240,7 +244,8 @@ int inet6_csk_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl_unused
1518 + /* Restore final destination back after routing done */
1519 + fl6.daddr = sk->sk_v6_daddr;
1520 +
1521 +- res = ip6_xmit(sk, skb, &fl6, np->opt, np->tclass);
1522 ++ res = ip6_xmit(sk, skb, &fl6, rcu_dereference(np->opt),
1523 ++ np->tclass);
1524 + rcu_read_unlock();
1525 + return res;
1526 + }
1527 +diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c
1528 +index 5f36266b1f5e..a7aef4b52d65 100644
1529 +--- a/net/ipv6/ip6mr.c
1530 ++++ b/net/ipv6/ip6mr.c
1531 +@@ -118,7 +118,7 @@ static void mr6_netlink_event(struct mr6_table *mrt, struct mfc6_cache *mfc,
1532 + int cmd);
1533 + static int ip6mr_rtm_dumproute(struct sk_buff *skb,
1534 + struct netlink_callback *cb);
1535 +-static void mroute_clean_tables(struct mr6_table *mrt);
1536 ++static void mroute_clean_tables(struct mr6_table *mrt, bool all);
1537 + static void ipmr_expire_process(unsigned long arg);
1538 +
1539 + #ifdef CONFIG_IPV6_MROUTE_MULTIPLE_TABLES
1540 +@@ -335,7 +335,7 @@ static struct mr6_table *ip6mr_new_table(struct net *net, u32 id)
1541 + static void ip6mr_free_table(struct mr6_table *mrt)
1542 + {
1543 + del_timer_sync(&mrt->ipmr_expire_timer);
1544 +- mroute_clean_tables(mrt);
1545 ++ mroute_clean_tables(mrt, true);
1546 + kfree(mrt);
1547 + }
1548 +
1549 +@@ -1543,7 +1543,7 @@ static int ip6mr_mfc_add(struct net *net, struct mr6_table *mrt,
1550 + * Close the multicast socket, and clear the vif tables etc
1551 + */
1552 +
1553 +-static void mroute_clean_tables(struct mr6_table *mrt)
1554 ++static void mroute_clean_tables(struct mr6_table *mrt, bool all)
1555 + {
1556 + int i;
1557 + LIST_HEAD(list);
1558 +@@ -1553,8 +1553,9 @@ static void mroute_clean_tables(struct mr6_table *mrt)
1559 + * Shut down all active vif entries
1560 + */
1561 + for (i = 0; i < mrt->maxvif; i++) {
1562 +- if (!(mrt->vif6_table[i].flags & VIFF_STATIC))
1563 +- mif6_delete(mrt, i, &list);
1564 ++ if (!all && (mrt->vif6_table[i].flags & VIFF_STATIC))
1565 ++ continue;
1566 ++ mif6_delete(mrt, i, &list);
1567 + }
1568 + unregister_netdevice_many(&list);
1569 +
1570 +@@ -1563,7 +1564,7 @@ static void mroute_clean_tables(struct mr6_table *mrt)
1571 + */
1572 + for (i = 0; i < MFC6_LINES; i++) {
1573 + list_for_each_entry_safe(c, next, &mrt->mfc6_cache_array[i], list) {
1574 +- if (c->mfc_flags & MFC_STATIC)
1575 ++ if (!all && (c->mfc_flags & MFC_STATIC))
1576 + continue;
1577 + write_lock_bh(&mrt_lock);
1578 + list_del(&c->list);
1579 +@@ -1626,7 +1627,7 @@ int ip6mr_sk_done(struct sock *sk)
1580 + net->ipv6.devconf_all);
1581 + write_unlock_bh(&mrt_lock);
1582 +
1583 +- mroute_clean_tables(mrt);
1584 ++ mroute_clean_tables(mrt, false);
1585 + err = 0;
1586 + break;
1587 + }
1588 +diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
1589 +index 63e6956917c9..4449ad1f8114 100644
1590 +--- a/net/ipv6/ipv6_sockglue.c
1591 ++++ b/net/ipv6/ipv6_sockglue.c
1592 +@@ -111,7 +111,8 @@ struct ipv6_txoptions *ipv6_update_options(struct sock *sk,
1593 + icsk->icsk_sync_mss(sk, icsk->icsk_pmtu_cookie);
1594 + }
1595 + }
1596 +- opt = xchg(&inet6_sk(sk)->opt, opt);
1597 ++ opt = xchg((__force struct ipv6_txoptions **)&inet6_sk(sk)->opt,
1598 ++ opt);
1599 + sk_dst_reset(sk);
1600 +
1601 + return opt;
1602 +@@ -231,9 +232,12 @@ static int do_ipv6_setsockopt(struct sock *sk, int level, int optname,
1603 + sk->sk_socket->ops = &inet_dgram_ops;
1604 + sk->sk_family = PF_INET;
1605 + }
1606 +- opt = xchg(&np->opt, NULL);
1607 +- if (opt)
1608 +- sock_kfree_s(sk, opt, opt->tot_len);
1609 ++ opt = xchg((__force struct ipv6_txoptions **)&np->opt,
1610 ++ NULL);
1611 ++ if (opt) {
1612 ++ atomic_sub(opt->tot_len, &sk->sk_omem_alloc);
1613 ++ txopt_put(opt);
1614 ++ }
1615 + pktopt = xchg(&np->pktoptions, NULL);
1616 + kfree_skb(pktopt);
1617 +
1618 +@@ -403,7 +407,8 @@ static int do_ipv6_setsockopt(struct sock *sk, int level, int optname,
1619 + if (optname != IPV6_RTHDR && !ns_capable(net->user_ns, CAP_NET_RAW))
1620 + break;
1621 +
1622 +- opt = ipv6_renew_options(sk, np->opt, optname,
1623 ++ opt = rcu_dereference_protected(np->opt, sock_owned_by_user(sk));
1624 ++ opt = ipv6_renew_options(sk, opt, optname,
1625 + (struct ipv6_opt_hdr __user *)optval,
1626 + optlen);
1627 + if (IS_ERR(opt)) {
1628 +@@ -432,8 +437,10 @@ static int do_ipv6_setsockopt(struct sock *sk, int level, int optname,
1629 + retv = 0;
1630 + opt = ipv6_update_options(sk, opt);
1631 + sticky_done:
1632 +- if (opt)
1633 +- sock_kfree_s(sk, opt, opt->tot_len);
1634 ++ if (opt) {
1635 ++ atomic_sub(opt->tot_len, &sk->sk_omem_alloc);
1636 ++ txopt_put(opt);
1637 ++ }
1638 + break;
1639 + }
1640 +
1641 +@@ -486,6 +493,7 @@ sticky_done:
1642 + break;
1643 +
1644 + memset(opt, 0, sizeof(*opt));
1645 ++ atomic_set(&opt->refcnt, 1);
1646 + opt->tot_len = sizeof(*opt) + optlen;
1647 + retv = -EFAULT;
1648 + if (copy_from_user(opt+1, optval, optlen))
1649 +@@ -502,8 +510,10 @@ update:
1650 + retv = 0;
1651 + opt = ipv6_update_options(sk, opt);
1652 + done:
1653 +- if (opt)
1654 +- sock_kfree_s(sk, opt, opt->tot_len);
1655 ++ if (opt) {
1656 ++ atomic_sub(opt->tot_len, &sk->sk_omem_alloc);
1657 ++ txopt_put(opt);
1658 ++ }
1659 + break;
1660 + }
1661 + case IPV6_UNICAST_HOPS:
1662 +@@ -1110,10 +1120,11 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname,
1663 + case IPV6_RTHDR:
1664 + case IPV6_DSTOPTS:
1665 + {
1666 ++ struct ipv6_txoptions *opt;
1667 +
1668 + lock_sock(sk);
1669 +- len = ipv6_getsockopt_sticky(sk, np->opt,
1670 +- optname, optval, len);
1671 ++ opt = rcu_dereference_protected(np->opt, sock_owned_by_user(sk));
1672 ++ len = ipv6_getsockopt_sticky(sk, opt, optname, optval, len);
1673 + release_sock(sk);
1674 + /* check if ipv6_getsockopt_sticky() returns err code */
1675 + if (len < 0)
1676 +diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c
1677 +index 083b2927fc67..41e3b5ee8d0b 100644
1678 +--- a/net/ipv6/mcast.c
1679 ++++ b/net/ipv6/mcast.c
1680 +@@ -1651,7 +1651,6 @@ out:
1681 + if (!err) {
1682 + ICMP6MSGOUT_INC_STATS(net, idev, ICMPV6_MLD2_REPORT);
1683 + ICMP6_INC_STATS(net, idev, ICMP6_MIB_OUTMSGS);
1684 +- IP6_UPD_PO_STATS(net, idev, IPSTATS_MIB_OUTMCAST, payload_len);
1685 + } else {
1686 + IP6_INC_STATS(net, idev, IPSTATS_MIB_OUTDISCARDS);
1687 + }
1688 +@@ -2014,7 +2013,6 @@ out:
1689 + if (!err) {
1690 + ICMP6MSGOUT_INC_STATS(net, idev, type);
1691 + ICMP6_INC_STATS(net, idev, ICMP6_MIB_OUTMSGS);
1692 +- IP6_UPD_PO_STATS(net, idev, IPSTATS_MIB_OUTMCAST, full_len);
1693 + } else
1694 + IP6_INC_STATS(net, idev, IPSTATS_MIB_OUTDISCARDS);
1695 +
1696 +diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c
1697 +index 6f187c8d8a1b..d235ed7f47ab 100644
1698 +--- a/net/ipv6/netfilter/nf_conntrack_reasm.c
1699 ++++ b/net/ipv6/netfilter/nf_conntrack_reasm.c
1700 +@@ -190,7 +190,7 @@ static void nf_ct_frag6_expire(unsigned long data)
1701 + /* Creation primitives. */
1702 + static inline struct frag_queue *fq_find(struct net *net, __be32 id,
1703 + u32 user, struct in6_addr *src,
1704 +- struct in6_addr *dst, u8 ecn)
1705 ++ struct in6_addr *dst, int iif, u8 ecn)
1706 + {
1707 + struct inet_frag_queue *q;
1708 + struct ip6_create_arg arg;
1709 +@@ -200,6 +200,7 @@ static inline struct frag_queue *fq_find(struct net *net, __be32 id,
1710 + arg.user = user;
1711 + arg.src = src;
1712 + arg.dst = dst;
1713 ++ arg.iif = iif;
1714 + arg.ecn = ecn;
1715 +
1716 + local_bh_disable();
1717 +@@ -603,7 +604,7 @@ struct sk_buff *nf_ct_frag6_gather(struct sk_buff *skb, u32 user)
1718 + fhdr = (struct frag_hdr *)skb_transport_header(clone);
1719 +
1720 + fq = fq_find(net, fhdr->identification, user, &hdr->saddr, &hdr->daddr,
1721 +- ip6_frag_ecn(hdr));
1722 ++ skb->dev ? skb->dev->ifindex : 0, ip6_frag_ecn(hdr));
1723 + if (fq == NULL) {
1724 + pr_debug("Can't find and can't create new queue\n");
1725 + goto ret_orig;
1726 +diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
1727 +index 8072bd4139b7..2c639aee12cb 100644
1728 +--- a/net/ipv6/raw.c
1729 ++++ b/net/ipv6/raw.c
1730 +@@ -731,6 +731,7 @@ static int raw6_getfrag(void *from, char *to, int offset, int len, int odd,
1731 +
1732 + static int rawv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
1733 + {
1734 ++ struct ipv6_txoptions *opt_to_free = NULL;
1735 + struct ipv6_txoptions opt_space;
1736 + DECLARE_SOCKADDR(struct sockaddr_in6 *, sin6, msg->msg_name);
1737 + struct in6_addr *daddr, *final_p, final;
1738 +@@ -837,8 +838,10 @@ static int rawv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
1739 + if (!(opt->opt_nflen|opt->opt_flen))
1740 + opt = NULL;
1741 + }
1742 +- if (!opt)
1743 +- opt = np->opt;
1744 ++ if (!opt) {
1745 ++ opt = txopt_get(np);
1746 ++ opt_to_free = opt;
1747 ++ }
1748 + if (flowlabel)
1749 + opt = fl6_merge_options(&opt_space, flowlabel, opt);
1750 + opt = ipv6_fixup_options(&opt_space, opt);
1751 +@@ -901,6 +904,7 @@ done:
1752 + dst_release(dst);
1753 + out:
1754 + fl6_sock_release(flowlabel);
1755 ++ txopt_put(opt_to_free);
1756 + return err < 0 ? err : len;
1757 + do_confirm:
1758 + dst_confirm(dst);
1759 +diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c
1760 +index 8ffa2c8cce77..9d1f6a28b284 100644
1761 +--- a/net/ipv6/reassembly.c
1762 ++++ b/net/ipv6/reassembly.c
1763 +@@ -108,7 +108,10 @@ bool ip6_frag_match(const struct inet_frag_queue *q, const void *a)
1764 + return fq->id == arg->id &&
1765 + fq->user == arg->user &&
1766 + ipv6_addr_equal(&fq->saddr, arg->src) &&
1767 +- ipv6_addr_equal(&fq->daddr, arg->dst);
1768 ++ ipv6_addr_equal(&fq->daddr, arg->dst) &&
1769 ++ (arg->iif == fq->iif ||
1770 ++ !(ipv6_addr_type(arg->dst) & (IPV6_ADDR_MULTICAST |
1771 ++ IPV6_ADDR_LINKLOCAL)));
1772 + }
1773 + EXPORT_SYMBOL(ip6_frag_match);
1774 +
1775 +@@ -180,7 +183,7 @@ static void ip6_frag_expire(unsigned long data)
1776 +
1777 + static struct frag_queue *
1778 + fq_find(struct net *net, __be32 id, const struct in6_addr *src,
1779 +- const struct in6_addr *dst, u8 ecn)
1780 ++ const struct in6_addr *dst, int iif, u8 ecn)
1781 + {
1782 + struct inet_frag_queue *q;
1783 + struct ip6_create_arg arg;
1784 +@@ -190,6 +193,7 @@ fq_find(struct net *net, __be32 id, const struct in6_addr *src,
1785 + arg.user = IP6_DEFRAG_LOCAL_DELIVER;
1786 + arg.src = src;
1787 + arg.dst = dst;
1788 ++ arg.iif = iif;
1789 + arg.ecn = ecn;
1790 +
1791 + hash = inet6_hash_frag(id, src, dst);
1792 +@@ -551,7 +555,7 @@ static int ipv6_frag_rcv(struct sk_buff *skb)
1793 + }
1794 +
1795 + fq = fq_find(net, fhdr->identification, &hdr->saddr, &hdr->daddr,
1796 +- ip6_frag_ecn(hdr));
1797 ++ skb->dev ? skb->dev->ifindex : 0, ip6_frag_ecn(hdr));
1798 + if (fq) {
1799 + int ret;
1800 +
1801 +diff --git a/net/ipv6/syncookies.c b/net/ipv6/syncookies.c
1802 +index 21bc2eb53c57..a4cf004f44d0 100644
1803 +--- a/net/ipv6/syncookies.c
1804 ++++ b/net/ipv6/syncookies.c
1805 +@@ -242,7 +242,7 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb)
1806 + memset(&fl6, 0, sizeof(fl6));
1807 + fl6.flowi6_proto = IPPROTO_TCP;
1808 + fl6.daddr = ireq->ir_v6_rmt_addr;
1809 +- final_p = fl6_update_dst(&fl6, np->opt, &final);
1810 ++ final_p = fl6_update_dst(&fl6, rcu_dereference(np->opt), &final);
1811 + fl6.saddr = ireq->ir_v6_loc_addr;
1812 + fl6.flowi6_oif = sk->sk_bound_dev_if;
1813 + fl6.flowi6_mark = ireq->ir_mark;
1814 +diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
1815 +index e541d68dba8b..cfb27f56c62f 100644
1816 +--- a/net/ipv6/tcp_ipv6.c
1817 ++++ b/net/ipv6/tcp_ipv6.c
1818 +@@ -121,6 +121,7 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
1819 + struct ipv6_pinfo *np = inet6_sk(sk);
1820 + struct tcp_sock *tp = tcp_sk(sk);
1821 + struct in6_addr *saddr = NULL, *final_p, final;
1822 ++ struct ipv6_txoptions *opt;
1823 + struct rt6_info *rt;
1824 + struct flowi6 fl6;
1825 + struct dst_entry *dst;
1826 +@@ -237,7 +238,8 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
1827 + fl6.fl6_dport = usin->sin6_port;
1828 + fl6.fl6_sport = inet->inet_sport;
1829 +
1830 +- final_p = fl6_update_dst(&fl6, np->opt, &final);
1831 ++ opt = rcu_dereference_protected(np->opt, sock_owned_by_user(sk));
1832 ++ final_p = fl6_update_dst(&fl6, opt, &final);
1833 +
1834 + security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
1835 +
1836 +@@ -266,9 +268,9 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
1837 + tcp_fetch_timewait_stamp(sk, dst);
1838 +
1839 + icsk->icsk_ext_hdr_len = 0;
1840 +- if (np->opt)
1841 +- icsk->icsk_ext_hdr_len = (np->opt->opt_flen +
1842 +- np->opt->opt_nflen);
1843 ++ if (opt)
1844 ++ icsk->icsk_ext_hdr_len = opt->opt_flen +
1845 ++ opt->opt_nflen;
1846 +
1847 + tp->rx_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr);
1848 +
1849 +@@ -464,7 +466,8 @@ static int tcp_v6_send_synack(struct sock *sk, struct dst_entry *dst,
1850 + fl6->flowlabel = ip6_flowlabel(ipv6_hdr(ireq->pktopts));
1851 +
1852 + skb_set_queue_mapping(skb, queue_mapping);
1853 +- err = ip6_xmit(sk, skb, fl6, np->opt, np->tclass);
1854 ++ err = ip6_xmit(sk, skb, fl6, rcu_dereference(np->opt),
1855 ++ np->tclass);
1856 + err = net_xmit_eval(err);
1857 + }
1858 +
1859 +@@ -994,6 +997,7 @@ static struct sock *tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
1860 + struct inet_request_sock *ireq;
1861 + struct ipv6_pinfo *newnp, *np = inet6_sk(sk);
1862 + struct tcp6_sock *newtcp6sk;
1863 ++ struct ipv6_txoptions *opt;
1864 + struct inet_sock *newinet;
1865 + struct tcp_sock *newtp;
1866 + struct sock *newsk;
1867 +@@ -1129,13 +1133,15 @@ static struct sock *tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
1868 + but we make one more one thing there: reattach optmem
1869 + to newsk.
1870 + */
1871 +- if (np->opt)
1872 +- newnp->opt = ipv6_dup_options(newsk, np->opt);
1873 +-
1874 ++ opt = rcu_dereference(np->opt);
1875 ++ if (opt) {
1876 ++ opt = ipv6_dup_options(newsk, opt);
1877 ++ RCU_INIT_POINTER(newnp->opt, opt);
1878 ++ }
1879 + inet_csk(newsk)->icsk_ext_hdr_len = 0;
1880 +- if (newnp->opt)
1881 +- inet_csk(newsk)->icsk_ext_hdr_len = (newnp->opt->opt_nflen +
1882 +- newnp->opt->opt_flen);
1883 ++ if (opt)
1884 ++ inet_csk(newsk)->icsk_ext_hdr_len = opt->opt_nflen +
1885 ++ opt->opt_flen;
1886 +
1887 + tcp_ca_openreq_child(newsk, dst);
1888 +
1889 +diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
1890 +index e51fc3eee6db..7333f3575fc5 100644
1891 +--- a/net/ipv6/udp.c
1892 ++++ b/net/ipv6/udp.c
1893 +@@ -1107,6 +1107,7 @@ int udpv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
1894 + DECLARE_SOCKADDR(struct sockaddr_in6 *, sin6, msg->msg_name);
1895 + struct in6_addr *daddr, *final_p, final;
1896 + struct ipv6_txoptions *opt = NULL;
1897 ++ struct ipv6_txoptions *opt_to_free = NULL;
1898 + struct ip6_flowlabel *flowlabel = NULL;
1899 + struct flowi6 fl6;
1900 + struct dst_entry *dst;
1901 +@@ -1260,8 +1261,10 @@ do_udp_sendmsg:
1902 + opt = NULL;
1903 + connected = 0;
1904 + }
1905 +- if (!opt)
1906 +- opt = np->opt;
1907 ++ if (!opt) {
1908 ++ opt = txopt_get(np);
1909 ++ opt_to_free = opt;
1910 ++ }
1911 + if (flowlabel)
1912 + opt = fl6_merge_options(&opt_space, flowlabel, opt);
1913 + opt = ipv6_fixup_options(&opt_space, opt);
1914 +@@ -1370,6 +1373,7 @@ release_dst:
1915 + out:
1916 + dst_release(dst);
1917 + fl6_sock_release(flowlabel);
1918 ++ txopt_put(opt_to_free);
1919 + if (!err)
1920 + return len;
1921 + /*
1922 +diff --git a/net/l2tp/l2tp_ip6.c b/net/l2tp/l2tp_ip6.c
1923 +index d1ded3777815..0ce9da948ad7 100644
1924 +--- a/net/l2tp/l2tp_ip6.c
1925 ++++ b/net/l2tp/l2tp_ip6.c
1926 +@@ -486,6 +486,7 @@ static int l2tp_ip6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
1927 + DECLARE_SOCKADDR(struct sockaddr_l2tpip6 *, lsa, msg->msg_name);
1928 + struct in6_addr *daddr, *final_p, final;
1929 + struct ipv6_pinfo *np = inet6_sk(sk);
1930 ++ struct ipv6_txoptions *opt_to_free = NULL;
1931 + struct ipv6_txoptions *opt = NULL;
1932 + struct ip6_flowlabel *flowlabel = NULL;
1933 + struct dst_entry *dst = NULL;
1934 +@@ -575,8 +576,10 @@ static int l2tp_ip6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
1935 + opt = NULL;
1936 + }
1937 +
1938 +- if (opt == NULL)
1939 +- opt = np->opt;
1940 ++ if (!opt) {
1941 ++ opt = txopt_get(np);
1942 ++ opt_to_free = opt;
1943 ++ }
1944 + if (flowlabel)
1945 + opt = fl6_merge_options(&opt_space, flowlabel, opt);
1946 + opt = ipv6_fixup_options(&opt_space, opt);
1947 +@@ -631,6 +634,7 @@ done:
1948 + dst_release(dst);
1949 + out:
1950 + fl6_sock_release(flowlabel);
1951 ++ txopt_put(opt_to_free);
1952 +
1953 + return err < 0 ? err : len;
1954 +
1955 +diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
1956 +index 686e60187401..ebc39e66d704 100644
1957 +--- a/net/packet/af_packet.c
1958 ++++ b/net/packet/af_packet.c
1959 +@@ -1524,6 +1524,20 @@ static void fanout_release(struct sock *sk)
1960 + mutex_unlock(&fanout_mutex);
1961 + }
1962 +
1963 ++static bool packet_extra_vlan_len_allowed(const struct net_device *dev,
1964 ++ struct sk_buff *skb)
1965 ++{
1966 ++ /* Earlier code assumed this would be a VLAN pkt, double-check
1967 ++ * this now that we have the actual packet in hand. We can only
1968 ++ * do this check on Ethernet devices.
1969 ++ */
1970 ++ if (unlikely(dev->type != ARPHRD_ETHER))
1971 ++ return false;
1972 ++
1973 ++ skb_reset_mac_header(skb);
1974 ++ return likely(eth_hdr(skb)->h_proto == htons(ETH_P_8021Q));
1975 ++}
1976 ++
1977 + static const struct proto_ops packet_ops;
1978 +
1979 + static const struct proto_ops packet_ops_spkt;
1980 +@@ -1685,18 +1699,10 @@ retry:
1981 + goto retry;
1982 + }
1983 +
1984 +- if (len > (dev->mtu + dev->hard_header_len + extra_len)) {
1985 +- /* Earlier code assumed this would be a VLAN pkt,
1986 +- * double-check this now that we have the actual
1987 +- * packet in hand.
1988 +- */
1989 +- struct ethhdr *ehdr;
1990 +- skb_reset_mac_header(skb);
1991 +- ehdr = eth_hdr(skb);
1992 +- if (ehdr->h_proto != htons(ETH_P_8021Q)) {
1993 +- err = -EMSGSIZE;
1994 +- goto out_unlock;
1995 +- }
1996 ++ if (len > (dev->mtu + dev->hard_header_len + extra_len) &&
1997 ++ !packet_extra_vlan_len_allowed(dev, skb)) {
1998 ++ err = -EMSGSIZE;
1999 ++ goto out_unlock;
2000 + }
2001 +
2002 + skb->protocol = proto;
2003 +@@ -2115,6 +2121,15 @@ static bool ll_header_truncated(const struct net_device *dev, int len)
2004 + return false;
2005 + }
2006 +
2007 ++static void tpacket_set_protocol(const struct net_device *dev,
2008 ++ struct sk_buff *skb)
2009 ++{
2010 ++ if (dev->type == ARPHRD_ETHER) {
2011 ++ skb_reset_mac_header(skb);
2012 ++ skb->protocol = eth_hdr(skb)->h_proto;
2013 ++ }
2014 ++}
2015 ++
2016 + static int tpacket_fill_skb(struct packet_sock *po, struct sk_buff *skb,
2017 + void *frame, struct net_device *dev, int size_max,
2018 + __be16 proto, unsigned char *addr, int hlen)
2019 +@@ -2151,8 +2166,6 @@ static int tpacket_fill_skb(struct packet_sock *po, struct sk_buff *skb,
2020 + skb_reserve(skb, hlen);
2021 + skb_reset_network_header(skb);
2022 +
2023 +- if (!packet_use_direct_xmit(po))
2024 +- skb_probe_transport_header(skb, 0);
2025 + if (unlikely(po->tp_tx_has_off)) {
2026 + int off_min, off_max, off;
2027 + off_min = po->tp_hdrlen - sizeof(struct sockaddr_ll);
2028 +@@ -2198,6 +2211,8 @@ static int tpacket_fill_skb(struct packet_sock *po, struct sk_buff *skb,
2029 + dev->hard_header_len);
2030 + if (unlikely(err))
2031 + return err;
2032 ++ if (!skb->protocol)
2033 ++ tpacket_set_protocol(dev, skb);
2034 +
2035 + data += dev->hard_header_len;
2036 + to_write -= dev->hard_header_len;
2037 +@@ -2232,6 +2247,8 @@ static int tpacket_fill_skb(struct packet_sock *po, struct sk_buff *skb,
2038 + len = ((to_write > len_max) ? len_max : to_write);
2039 + }
2040 +
2041 ++ skb_probe_transport_header(skb, 0);
2042 ++
2043 + return tp_len;
2044 + }
2045 +
2046 +@@ -2276,12 +2293,13 @@ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
2047 + if (unlikely(!(dev->flags & IFF_UP)))
2048 + goto out_put;
2049 +
2050 +- reserve = dev->hard_header_len + VLAN_HLEN;
2051 ++ if (po->sk.sk_socket->type == SOCK_RAW)
2052 ++ reserve = dev->hard_header_len;
2053 + size_max = po->tx_ring.frame_size
2054 + - (po->tp_hdrlen - sizeof(struct sockaddr_ll));
2055 +
2056 +- if (size_max > dev->mtu + reserve)
2057 +- size_max = dev->mtu + reserve;
2058 ++ if (size_max > dev->mtu + reserve + VLAN_HLEN)
2059 ++ size_max = dev->mtu + reserve + VLAN_HLEN;
2060 +
2061 + do {
2062 + ph = packet_current_frame(po, &po->tx_ring,
2063 +@@ -2308,18 +2326,10 @@ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
2064 + tp_len = tpacket_fill_skb(po, skb, ph, dev, size_max, proto,
2065 + addr, hlen);
2066 + if (likely(tp_len >= 0) &&
2067 +- tp_len > dev->mtu + dev->hard_header_len) {
2068 +- struct ethhdr *ehdr;
2069 +- /* Earlier code assumed this would be a VLAN pkt,
2070 +- * double-check this now that we have the actual
2071 +- * packet in hand.
2072 +- */
2073 ++ tp_len > dev->mtu + reserve &&
2074 ++ !packet_extra_vlan_len_allowed(dev, skb))
2075 ++ tp_len = -EMSGSIZE;
2076 +
2077 +- skb_reset_mac_header(skb);
2078 +- ehdr = eth_hdr(skb);
2079 +- if (ehdr->h_proto != htons(ETH_P_8021Q))
2080 +- tp_len = -EMSGSIZE;
2081 +- }
2082 + if (unlikely(tp_len < 0)) {
2083 + if (po->tp_loss) {
2084 + __packet_set_status(po, ph,
2085 +@@ -2540,18 +2550,10 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
2086 +
2087 + sock_tx_timestamp(sk, &skb_shinfo(skb)->tx_flags);
2088 +
2089 +- if (!gso_type && (len > dev->mtu + reserve + extra_len)) {
2090 +- /* Earlier code assumed this would be a VLAN pkt,
2091 +- * double-check this now that we have the actual
2092 +- * packet in hand.
2093 +- */
2094 +- struct ethhdr *ehdr;
2095 +- skb_reset_mac_header(skb);
2096 +- ehdr = eth_hdr(skb);
2097 +- if (ehdr->h_proto != htons(ETH_P_8021Q)) {
2098 +- err = -EMSGSIZE;
2099 +- goto out_free;
2100 +- }
2101 ++ if (!gso_type && (len > dev->mtu + reserve + extra_len) &&
2102 ++ !packet_extra_vlan_len_allowed(dev, skb)) {
2103 ++ err = -EMSGSIZE;
2104 ++ goto out_free;
2105 + }
2106 +
2107 + skb->protocol = proto;
2108 +@@ -2582,8 +2584,8 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
2109 + len += vnet_hdr_len;
2110 + }
2111 +
2112 +- if (!packet_use_direct_xmit(po))
2113 +- skb_probe_transport_header(skb, reserve);
2114 ++ skb_probe_transport_header(skb, reserve);
2115 ++
2116 + if (unlikely(extra_len == 4))
2117 + skb->no_fcs = 1;
2118 +
2119 +diff --git a/net/rds/connection.c b/net/rds/connection.c
2120 +index 9d66705f9d41..da6da57e5f36 100644
2121 +--- a/net/rds/connection.c
2122 ++++ b/net/rds/connection.c
2123 +@@ -187,12 +187,6 @@ new_conn:
2124 + }
2125 + }
2126 +
2127 +- if (trans == NULL) {
2128 +- kmem_cache_free(rds_conn_slab, conn);
2129 +- conn = ERR_PTR(-ENODEV);
2130 +- goto out;
2131 +- }
2132 +-
2133 + conn->c_trans = trans;
2134 +
2135 + ret = trans->conn_alloc(conn, gfp);
2136 +diff --git a/net/rds/send.c b/net/rds/send.c
2137 +index e9430f537f9c..7b30c0f3180d 100644
2138 +--- a/net/rds/send.c
2139 ++++ b/net/rds/send.c
2140 +@@ -986,11 +986,13 @@ int rds_sendmsg(struct socket *sock, struct msghdr *msg, size_t payload_len)
2141 + release_sock(sk);
2142 + }
2143 +
2144 +- /* racing with another thread binding seems ok here */
2145 ++ lock_sock(sk);
2146 + if (daddr == 0 || rs->rs_bound_addr == 0) {
2147 ++ release_sock(sk);
2148 + ret = -ENOTCONN; /* XXX not a great errno */
2149 + goto out;
2150 + }
2151 ++ release_sock(sk);
2152 +
2153 + /* size of rm including all sgs */
2154 + ret = rds_rm_size(msg, payload_len);
2155 +diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
2156 +index 1e1c89e51a11..d4b6f3682c14 100644
2157 +--- a/net/sched/sch_api.c
2158 ++++ b/net/sched/sch_api.c
2159 +@@ -253,7 +253,8 @@ int qdisc_set_default(const char *name)
2160 + }
2161 +
2162 + /* We know handle. Find qdisc among all qdisc's attached to device
2163 +- (root qdisc, all its children, children of children etc.)
2164 ++ * (root qdisc, all its children, children of children etc.)
2165 ++ * Note: caller either uses rtnl or rcu_read_lock()
2166 + */
2167 +
2168 + static struct Qdisc *qdisc_match_from_root(struct Qdisc *root, u32 handle)
2169 +@@ -264,7 +265,7 @@ static struct Qdisc *qdisc_match_from_root(struct Qdisc *root, u32 handle)
2170 + root->handle == handle)
2171 + return root;
2172 +
2173 +- list_for_each_entry(q, &root->list, list) {
2174 ++ list_for_each_entry_rcu(q, &root->list, list) {
2175 + if (q->handle == handle)
2176 + return q;
2177 + }
2178 +@@ -277,15 +278,18 @@ void qdisc_list_add(struct Qdisc *q)
2179 + struct Qdisc *root = qdisc_dev(q)->qdisc;
2180 +
2181 + WARN_ON_ONCE(root == &noop_qdisc);
2182 +- list_add_tail(&q->list, &root->list);
2183 ++ ASSERT_RTNL();
2184 ++ list_add_tail_rcu(&q->list, &root->list);
2185 + }
2186 + }
2187 + EXPORT_SYMBOL(qdisc_list_add);
2188 +
2189 + void qdisc_list_del(struct Qdisc *q)
2190 + {
2191 +- if ((q->parent != TC_H_ROOT) && !(q->flags & TCQ_F_INGRESS))
2192 +- list_del(&q->list);
2193 ++ if ((q->parent != TC_H_ROOT) && !(q->flags & TCQ_F_INGRESS)) {
2194 ++ ASSERT_RTNL();
2195 ++ list_del_rcu(&q->list);
2196 ++ }
2197 + }
2198 + EXPORT_SYMBOL(qdisc_list_del);
2199 +
2200 +@@ -750,14 +754,18 @@ void qdisc_tree_decrease_qlen(struct Qdisc *sch, unsigned int n)
2201 + if (n == 0)
2202 + return;
2203 + drops = max_t(int, n, 0);
2204 ++ rcu_read_lock();
2205 + while ((parentid = sch->parent)) {
2206 + if (TC_H_MAJ(parentid) == TC_H_MAJ(TC_H_INGRESS))
2207 +- return;
2208 ++ break;
2209 +
2210 ++ if (sch->flags & TCQ_F_NOPARENT)
2211 ++ break;
2212 ++ /* TODO: perform the search on a per txq basis */
2213 + sch = qdisc_lookup(qdisc_dev(sch), TC_H_MAJ(parentid));
2214 + if (sch == NULL) {
2215 +- WARN_ON(parentid != TC_H_ROOT);
2216 +- return;
2217 ++ WARN_ON_ONCE(parentid != TC_H_ROOT);
2218 ++ break;
2219 + }
2220 + cops = sch->ops->cl_ops;
2221 + if (cops->qlen_notify) {
2222 +@@ -768,6 +776,7 @@ void qdisc_tree_decrease_qlen(struct Qdisc *sch, unsigned int n)
2223 + sch->q.qlen -= n;
2224 + __qdisc_qstats_drop(sch, drops);
2225 + }
2226 ++ rcu_read_unlock();
2227 + }
2228 + EXPORT_SYMBOL(qdisc_tree_decrease_qlen);
2229 +
2230 +@@ -941,7 +950,7 @@ qdisc_create(struct net_device *dev, struct netdev_queue *dev_queue,
2231 + }
2232 + lockdep_set_class(qdisc_lock(sch), &qdisc_tx_lock);
2233 + if (!netif_is_multiqueue(dev))
2234 +- sch->flags |= TCQ_F_ONETXQUEUE;
2235 ++ sch->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT;
2236 + }
2237 +
2238 + sch->handle = handle;
2239 +diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
2240 +index 6efca30894aa..b453270be3fd 100644
2241 +--- a/net/sched/sch_generic.c
2242 ++++ b/net/sched/sch_generic.c
2243 +@@ -743,7 +743,7 @@ static void attach_one_default_qdisc(struct net_device *dev,
2244 + return;
2245 + }
2246 + if (!netif_is_multiqueue(dev))
2247 +- qdisc->flags |= TCQ_F_ONETXQUEUE;
2248 ++ qdisc->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT;
2249 + }
2250 + dev_queue->qdisc_sleeping = qdisc;
2251 + }
2252 +diff --git a/net/sched/sch_mq.c b/net/sched/sch_mq.c
2253 +index f3cbaecd283a..3e82f047caaf 100644
2254 +--- a/net/sched/sch_mq.c
2255 ++++ b/net/sched/sch_mq.c
2256 +@@ -63,7 +63,7 @@ static int mq_init(struct Qdisc *sch, struct nlattr *opt)
2257 + if (qdisc == NULL)
2258 + goto err;
2259 + priv->qdiscs[ntx] = qdisc;
2260 +- qdisc->flags |= TCQ_F_ONETXQUEUE;
2261 ++ qdisc->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT;
2262 + }
2263 +
2264 + sch->flags |= TCQ_F_MQROOT;
2265 +@@ -156,7 +156,7 @@ static int mq_graft(struct Qdisc *sch, unsigned long cl, struct Qdisc *new,
2266 +
2267 + *old = dev_graft_qdisc(dev_queue, new);
2268 + if (new)
2269 +- new->flags |= TCQ_F_ONETXQUEUE;
2270 ++ new->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT;
2271 + if (dev->flags & IFF_UP)
2272 + dev_activate(dev);
2273 + return 0;
2274 +diff --git a/net/sched/sch_mqprio.c b/net/sched/sch_mqprio.c
2275 +index 3811a745452c..ad70ecf57ce7 100644
2276 +--- a/net/sched/sch_mqprio.c
2277 ++++ b/net/sched/sch_mqprio.c
2278 +@@ -132,7 +132,7 @@ static int mqprio_init(struct Qdisc *sch, struct nlattr *opt)
2279 + goto err;
2280 + }
2281 + priv->qdiscs[i] = qdisc;
2282 +- qdisc->flags |= TCQ_F_ONETXQUEUE;
2283 ++ qdisc->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT;
2284 + }
2285 +
2286 + /* If the mqprio options indicate that hardware should own
2287 +@@ -209,7 +209,7 @@ static int mqprio_graft(struct Qdisc *sch, unsigned long cl, struct Qdisc *new,
2288 + *old = dev_graft_qdisc(dev_queue, new);
2289 +
2290 + if (new)
2291 +- new->flags |= TCQ_F_ONETXQUEUE;
2292 ++ new->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT;
2293 +
2294 + if (dev->flags & IFF_UP)
2295 + dev_activate(dev);
2296 +diff --git a/net/sctp/auth.c b/net/sctp/auth.c
2297 +index 4f15b7d730e1..1543e39f47c3 100644
2298 +--- a/net/sctp/auth.c
2299 ++++ b/net/sctp/auth.c
2300 +@@ -809,8 +809,8 @@ int sctp_auth_ep_set_hmacs(struct sctp_endpoint *ep,
2301 + if (!has_sha1)
2302 + return -EINVAL;
2303 +
2304 +- memcpy(ep->auth_hmacs_list->hmac_ids, &hmacs->shmac_idents[0],
2305 +- hmacs->shmac_num_idents * sizeof(__u16));
2306 ++ for (i = 0; i < hmacs->shmac_num_idents; i++)
2307 ++ ep->auth_hmacs_list->hmac_ids[i] = htons(hmacs->shmac_idents[i]);
2308 + ep->auth_hmacs_list->param_hdr.length = htons(sizeof(sctp_paramhdr_t) +
2309 + hmacs->shmac_num_idents * sizeof(__u16));
2310 + return 0;
2311 +diff --git a/net/sctp/socket.c b/net/sctp/socket.c
2312 +index 5f6c4e61325b..66d796075050 100644
2313 +--- a/net/sctp/socket.c
2314 ++++ b/net/sctp/socket.c
2315 +@@ -7387,6 +7387,13 @@ struct proto sctp_prot = {
2316 +
2317 + #if IS_ENABLED(CONFIG_IPV6)
2318 +
2319 ++#include <net/transp_v6.h>
2320 ++static void sctp_v6_destroy_sock(struct sock *sk)
2321 ++{
2322 ++ sctp_destroy_sock(sk);
2323 ++ inet6_destroy_sock(sk);
2324 ++}
2325 ++
2326 + struct proto sctpv6_prot = {
2327 + .name = "SCTPv6",
2328 + .owner = THIS_MODULE,
2329 +@@ -7396,7 +7403,7 @@ struct proto sctpv6_prot = {
2330 + .accept = sctp_accept,
2331 + .ioctl = sctp_ioctl,
2332 + .init = sctp_init_sock,
2333 +- .destroy = sctp_destroy_sock,
2334 ++ .destroy = sctp_v6_destroy_sock,
2335 + .shutdown = sctp_shutdown,
2336 + .setsockopt = sctp_setsockopt,
2337 + .getsockopt = sctp_getsockopt,
2338 +diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
2339 +index 76e66695621c..1975fd8d1c10 100644
2340 +--- a/net/unix/af_unix.c
2341 ++++ b/net/unix/af_unix.c
2342 +@@ -316,6 +316,118 @@ found:
2343 + return s;
2344 + }
2345 +
2346 ++/* Support code for asymmetrically connected dgram sockets
2347 ++ *
2348 ++ * If a datagram socket is connected to a socket not itself connected
2349 ++ * to the first socket (eg, /dev/log), clients may only enqueue more
2350 ++ * messages if the present receive queue of the server socket is not
2351 ++ * "too large". This means there's a second writeability condition
2352 ++ * poll and sendmsg need to test. The dgram recv code will do a wake
2353 ++ * up on the peer_wait wait queue of a socket upon reception of a
2354 ++ * datagram which needs to be propagated to sleeping would-be writers
2355 ++ * since these might not have sent anything so far. This can't be
2356 ++ * accomplished via poll_wait because the lifetime of the server
2357 ++ * socket might be less than that of its clients if these break their
2358 ++ * association with it or if the server socket is closed while clients
2359 ++ * are still connected to it and there's no way to inform "a polling
2360 ++ * implementation" that it should let go of a certain wait queue
2361 ++ *
2362 ++ * In order to propagate a wake up, a wait_queue_t of the client
2363 ++ * socket is enqueued on the peer_wait queue of the server socket
2364 ++ * whose wake function does a wake_up on the ordinary client socket
2365 ++ * wait queue. This connection is established whenever a write (or
2366 ++ * poll for write) hit the flow control condition and broken when the
2367 ++ * association to the server socket is dissolved or after a wake up
2368 ++ * was relayed.
2369 ++ */
2370 ++
2371 ++static int unix_dgram_peer_wake_relay(wait_queue_t *q, unsigned mode, int flags,
2372 ++ void *key)
2373 ++{
2374 ++ struct unix_sock *u;
2375 ++ wait_queue_head_t *u_sleep;
2376 ++
2377 ++ u = container_of(q, struct unix_sock, peer_wake);
2378 ++
2379 ++ __remove_wait_queue(&unix_sk(u->peer_wake.private)->peer_wait,
2380 ++ q);
2381 ++ u->peer_wake.private = NULL;
2382 ++
2383 ++ /* relaying can only happen while the wq still exists */
2384 ++ u_sleep = sk_sleep(&u->sk);
2385 ++ if (u_sleep)
2386 ++ wake_up_interruptible_poll(u_sleep, key);
2387 ++
2388 ++ return 0;
2389 ++}
2390 ++
2391 ++static int unix_dgram_peer_wake_connect(struct sock *sk, struct sock *other)
2392 ++{
2393 ++ struct unix_sock *u, *u_other;
2394 ++ int rc;
2395 ++
2396 ++ u = unix_sk(sk);
2397 ++ u_other = unix_sk(other);
2398 ++ rc = 0;
2399 ++ spin_lock(&u_other->peer_wait.lock);
2400 ++
2401 ++ if (!u->peer_wake.private) {
2402 ++ u->peer_wake.private = other;
2403 ++ __add_wait_queue(&u_other->peer_wait, &u->peer_wake);
2404 ++
2405 ++ rc = 1;
2406 ++ }
2407 ++
2408 ++ spin_unlock(&u_other->peer_wait.lock);
2409 ++ return rc;
2410 ++}
2411 ++
2412 ++static void unix_dgram_peer_wake_disconnect(struct sock *sk,
2413 ++ struct sock *other)
2414 ++{
2415 ++ struct unix_sock *u, *u_other;
2416 ++
2417 ++ u = unix_sk(sk);
2418 ++ u_other = unix_sk(other);
2419 ++ spin_lock(&u_other->peer_wait.lock);
2420 ++
2421 ++ if (u->peer_wake.private == other) {
2422 ++ __remove_wait_queue(&u_other->peer_wait, &u->peer_wake);
2423 ++ u->peer_wake.private = NULL;
2424 ++ }
2425 ++
2426 ++ spin_unlock(&u_other->peer_wait.lock);
2427 ++}
2428 ++
2429 ++static void unix_dgram_peer_wake_disconnect_wakeup(struct sock *sk,
2430 ++ struct sock *other)
2431 ++{
2432 ++ unix_dgram_peer_wake_disconnect(sk, other);
2433 ++ wake_up_interruptible_poll(sk_sleep(sk),
2434 ++ POLLOUT |
2435 ++ POLLWRNORM |
2436 ++ POLLWRBAND);
2437 ++}
2438 ++
2439 ++/* preconditions:
2440 ++ * - unix_peer(sk) == other
2441 ++ * - association is stable
2442 ++ */
2443 ++static int unix_dgram_peer_wake_me(struct sock *sk, struct sock *other)
2444 ++{
2445 ++ int connected;
2446 ++
2447 ++ connected = unix_dgram_peer_wake_connect(sk, other);
2448 ++
2449 ++ if (unix_recvq_full(other))
2450 ++ return 1;
2451 ++
2452 ++ if (connected)
2453 ++ unix_dgram_peer_wake_disconnect(sk, other);
2454 ++
2455 ++ return 0;
2456 ++}
2457 ++
2458 + static inline int unix_writable(struct sock *sk)
2459 + {
2460 + return (atomic_read(&sk->sk_wmem_alloc) << 2) <= sk->sk_sndbuf;
2461 +@@ -420,6 +532,8 @@ static void unix_release_sock(struct sock *sk, int embrion)
2462 + skpair->sk_state_change(skpair);
2463 + sk_wake_async(skpair, SOCK_WAKE_WAITD, POLL_HUP);
2464 + }
2465 ++
2466 ++ unix_dgram_peer_wake_disconnect(sk, skpair);
2467 + sock_put(skpair); /* It may now die */
2468 + unix_peer(sk) = NULL;
2469 + }
2470 +@@ -648,6 +762,7 @@ static struct sock *unix_create1(struct net *net, struct socket *sock)
2471 + INIT_LIST_HEAD(&u->link);
2472 + mutex_init(&u->readlock); /* single task reading lock */
2473 + init_waitqueue_head(&u->peer_wait);
2474 ++ init_waitqueue_func_entry(&u->peer_wake, unix_dgram_peer_wake_relay);
2475 + unix_insert_socket(unix_sockets_unbound(sk), sk);
2476 + out:
2477 + if (sk == NULL)
2478 +@@ -1015,6 +1130,8 @@ restart:
2479 + if (unix_peer(sk)) {
2480 + struct sock *old_peer = unix_peer(sk);
2481 + unix_peer(sk) = other;
2482 ++ unix_dgram_peer_wake_disconnect_wakeup(sk, old_peer);
2483 ++
2484 + unix_state_double_unlock(sk, other);
2485 +
2486 + if (other != old_peer)
2487 +@@ -1453,6 +1570,7 @@ static int unix_dgram_sendmsg(struct socket *sock, struct msghdr *msg,
2488 + struct scm_cookie scm;
2489 + int max_level;
2490 + int data_len = 0;
2491 ++ int sk_locked;
2492 +
2493 + wait_for_unix_gc();
2494 + err = scm_send(sock, msg, &scm, false);
2495 +@@ -1532,12 +1650,14 @@ restart:
2496 + goto out_free;
2497 + }
2498 +
2499 ++ sk_locked = 0;
2500 + unix_state_lock(other);
2501 ++restart_locked:
2502 + err = -EPERM;
2503 + if (!unix_may_send(sk, other))
2504 + goto out_unlock;
2505 +
2506 +- if (sock_flag(other, SOCK_DEAD)) {
2507 ++ if (unlikely(sock_flag(other, SOCK_DEAD))) {
2508 + /*
2509 + * Check with 1003.1g - what should
2510 + * datagram error
2511 +@@ -1545,10 +1665,14 @@ restart:
2512 + unix_state_unlock(other);
2513 + sock_put(other);
2514 +
2515 ++ if (!sk_locked)
2516 ++ unix_state_lock(sk);
2517 ++
2518 + err = 0;
2519 +- unix_state_lock(sk);
2520 + if (unix_peer(sk) == other) {
2521 + unix_peer(sk) = NULL;
2522 ++ unix_dgram_peer_wake_disconnect_wakeup(sk, other);
2523 ++
2524 + unix_state_unlock(sk);
2525 +
2526 + unix_dgram_disconnected(sk, other);
2527 +@@ -1574,21 +1698,38 @@ restart:
2528 + goto out_unlock;
2529 + }
2530 +
2531 +- if (unix_peer(other) != sk && unix_recvq_full(other)) {
2532 +- if (!timeo) {
2533 +- err = -EAGAIN;
2534 +- goto out_unlock;
2535 ++ if (unlikely(unix_peer(other) != sk && unix_recvq_full(other))) {
2536 ++ if (timeo) {
2537 ++ timeo = unix_wait_for_peer(other, timeo);
2538 ++
2539 ++ err = sock_intr_errno(timeo);
2540 ++ if (signal_pending(current))
2541 ++ goto out_free;
2542 ++
2543 ++ goto restart;
2544 + }
2545 +
2546 +- timeo = unix_wait_for_peer(other, timeo);
2547 ++ if (!sk_locked) {
2548 ++ unix_state_unlock(other);
2549 ++ unix_state_double_lock(sk, other);
2550 ++ }
2551 +
2552 +- err = sock_intr_errno(timeo);
2553 +- if (signal_pending(current))
2554 +- goto out_free;
2555 ++ if (unix_peer(sk) != other ||
2556 ++ unix_dgram_peer_wake_me(sk, other)) {
2557 ++ err = -EAGAIN;
2558 ++ sk_locked = 1;
2559 ++ goto out_unlock;
2560 ++ }
2561 +
2562 +- goto restart;
2563 ++ if (!sk_locked) {
2564 ++ sk_locked = 1;
2565 ++ goto restart_locked;
2566 ++ }
2567 + }
2568 +
2569 ++ if (unlikely(sk_locked))
2570 ++ unix_state_unlock(sk);
2571 ++
2572 + if (sock_flag(other, SOCK_RCVTSTAMP))
2573 + __net_timestamp(skb);
2574 + maybe_add_creds(skb, sock, other);
2575 +@@ -1602,6 +1743,8 @@ restart:
2576 + return len;
2577 +
2578 + out_unlock:
2579 ++ if (sk_locked)
2580 ++ unix_state_unlock(sk);
2581 + unix_state_unlock(other);
2582 + out_free:
2583 + kfree_skb(skb);
2584 +@@ -2245,14 +2388,16 @@ static unsigned int unix_dgram_poll(struct file *file, struct socket *sock,
2585 + return mask;
2586 +
2587 + writable = unix_writable(sk);
2588 +- other = unix_peer_get(sk);
2589 +- if (other) {
2590 +- if (unix_peer(other) != sk) {
2591 +- sock_poll_wait(file, &unix_sk(other)->peer_wait, wait);
2592 +- if (unix_recvq_full(other))
2593 +- writable = 0;
2594 +- }
2595 +- sock_put(other);
2596 ++ if (writable) {
2597 ++ unix_state_lock(sk);
2598 ++
2599 ++ other = unix_peer(sk);
2600 ++ if (other && unix_peer(other) != sk &&
2601 ++ unix_recvq_full(other) &&
2602 ++ unix_dgram_peer_wake_me(sk, other))
2603 ++ writable = 0;
2604 ++
2605 ++ unix_state_unlock(sk);
2606 + }
2607 +
2608 + if (writable)
2609 +diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
2610 +index 225b78b4ef12..d02eccd51f6e 100644
2611 +--- a/sound/pci/hda/patch_hdmi.c
2612 ++++ b/sound/pci/hda/patch_hdmi.c
2613 +@@ -48,8 +48,9 @@ MODULE_PARM_DESC(static_hdmi_pcm, "Don't restrict PCM parameters per ELD info");
2614 + #define is_haswell(codec) ((codec)->core.vendor_id == 0x80862807)
2615 + #define is_broadwell(codec) ((codec)->core.vendor_id == 0x80862808)
2616 + #define is_skylake(codec) ((codec)->core.vendor_id == 0x80862809)
2617 ++#define is_broxton(codec) ((codec)->core.vendor_id == 0x8086280a)
2618 + #define is_haswell_plus(codec) (is_haswell(codec) || is_broadwell(codec) \
2619 +- || is_skylake(codec))
2620 ++ || is_skylake(codec) || is_broxton(codec))
2621 +
2622 + #define is_valleyview(codec) ((codec)->core.vendor_id == 0x80862882)
2623 + #define is_cherryview(codec) ((codec)->core.vendor_id == 0x80862883)
2624 +diff --git a/tools/net/Makefile b/tools/net/Makefile
2625 +index ee577ea03ba5..ddf888010652 100644
2626 +--- a/tools/net/Makefile
2627 ++++ b/tools/net/Makefile
2628 +@@ -4,6 +4,9 @@ CC = gcc
2629 + LEX = flex
2630 + YACC = bison
2631 +
2632 ++CFLAGS += -Wall -O2
2633 ++CFLAGS += -D__EXPORTED_HEADERS__ -I../../include/uapi -I../../include
2634 ++
2635 + %.yacc.c: %.y
2636 + $(YACC) -o $@ -d $<
2637 +
2638 +@@ -12,15 +15,13 @@ YACC = bison
2639 +
2640 + all : bpf_jit_disasm bpf_dbg bpf_asm
2641 +
2642 +-bpf_jit_disasm : CFLAGS = -Wall -O2 -DPACKAGE='bpf_jit_disasm'
2643 ++bpf_jit_disasm : CFLAGS += -DPACKAGE='bpf_jit_disasm'
2644 + bpf_jit_disasm : LDLIBS = -lopcodes -lbfd -ldl
2645 + bpf_jit_disasm : bpf_jit_disasm.o
2646 +
2647 +-bpf_dbg : CFLAGS = -Wall -O2
2648 + bpf_dbg : LDLIBS = -lreadline
2649 + bpf_dbg : bpf_dbg.o
2650 +
2651 +-bpf_asm : CFLAGS = -Wall -O2 -I.
2652 + bpf_asm : LDLIBS =
2653 + bpf_asm : bpf_asm.o bpf_exp.yacc.o bpf_exp.lex.o
2654 + bpf_exp.lex.o : bpf_exp.yacc.c