Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:4.3 commit in: /
Date: Tue, 15 Dec 2015 11:14:21
Message-Id: 1450178037.adfe38f2fe47d59f83fb2135810d41e997022b61.mpagano@gentoo
1 commit: adfe38f2fe47d59f83fb2135810d41e997022b61
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Tue Dec 15 11:13:57 2015 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Tue Dec 15 11:13:57 2015 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=adfe38f2
7
8 Linux patch 4.3.3
9
10 0000_README | 4 +
11 1002_linux-4.3.3.patch | 4424 ++++++++++++++++++++++++++++++++++++++++++++++++
12 2 files changed, 4428 insertions(+)
13
14 diff --git a/0000_README b/0000_README
15 index 5fc79da..7b7e0b4 100644
16 --- a/0000_README
17 +++ b/0000_README
18 @@ -51,6 +51,10 @@ Patch: 1001_linux-4.3.2.patch
19 From: http://www.kernel.org
20 Desc: Linux 4.3.2
21
22 +Patch: 1002_linux-4.3.3.patch
23 +From: http://www.kernel.org
24 +Desc: Linux 4.3.3
25 +
26 Patch: 1500_XATTR_USER_PREFIX.patch
27 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
28 Desc: Support for namespace user.pax.* on tmpfs.
29
30 diff --git a/1002_linux-4.3.3.patch b/1002_linux-4.3.3.patch
31 new file mode 100644
32 index 0000000..7a2500e
33 --- /dev/null
34 +++ b/1002_linux-4.3.3.patch
35 @@ -0,0 +1,4424 @@
36 +diff --git a/Makefile b/Makefile
37 +index 1a4953b3e10f..2070d16bb5a4 100644
38 +--- a/Makefile
39 ++++ b/Makefile
40 +@@ -1,6 +1,6 @@
41 + VERSION = 4
42 + PATCHLEVEL = 3
43 +-SUBLEVEL = 2
44 ++SUBLEVEL = 3
45 + EXTRAVERSION =
46 + NAME = Blurry Fish Butt
47 +
48 +diff --git a/block/blk-merge.c b/block/blk-merge.c
49 +index c4e9c37f3e38..0e5f4fc12449 100644
50 +--- a/block/blk-merge.c
51 ++++ b/block/blk-merge.c
52 +@@ -91,7 +91,7 @@ static struct bio *blk_bio_segment_split(struct request_queue *q,
53 +
54 + seg_size += bv.bv_len;
55 + bvprv = bv;
56 +- bvprvp = &bv;
57 ++ bvprvp = &bvprv;
58 + sectors += bv.bv_len >> 9;
59 + continue;
60 + }
61 +@@ -101,7 +101,7 @@ new_segment:
62 +
63 + nsegs++;
64 + bvprv = bv;
65 +- bvprvp = &bv;
66 ++ bvprvp = &bvprv;
67 + seg_size = bv.bv_len;
68 + sectors += bv.bv_len >> 9;
69 + }
70 +diff --git a/certs/.gitignore b/certs/.gitignore
71 +new file mode 100644
72 +index 000000000000..f51aea4a71ec
73 +--- /dev/null
74 ++++ b/certs/.gitignore
75 +@@ -0,0 +1,4 @@
76 ++#
77 ++# Generated files
78 ++#
79 ++x509_certificate_list
80 +diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
81 +index 128e7df5b807..8630a77ea462 100644
82 +--- a/drivers/block/rbd.c
83 ++++ b/drivers/block/rbd.c
84 +@@ -3444,6 +3444,7 @@ static void rbd_queue_workfn(struct work_struct *work)
85 + goto err_rq;
86 + }
87 + img_request->rq = rq;
88 ++ snapc = NULL; /* img_request consumes a ref */
89 +
90 + if (op_type == OBJ_OP_DISCARD)
91 + result = rbd_img_request_fill(img_request, OBJ_REQUEST_NODATA,
92 +diff --git a/drivers/firewire/ohci.c b/drivers/firewire/ohci.c
93 +index f51d376d10ba..c2f5117fd8cb 100644
94 +--- a/drivers/firewire/ohci.c
95 ++++ b/drivers/firewire/ohci.c
96 +@@ -3675,6 +3675,11 @@ static int pci_probe(struct pci_dev *dev,
97 +
98 + reg_write(ohci, OHCI1394_IsoXmitIntMaskSet, ~0);
99 + ohci->it_context_support = reg_read(ohci, OHCI1394_IsoXmitIntMaskSet);
100 ++ /* JMicron JMB38x often shows 0 at first read, just ignore it */
101 ++ if (!ohci->it_context_support) {
102 ++ ohci_notice(ohci, "overriding IsoXmitIntMask\n");
103 ++ ohci->it_context_support = 0xf;
104 ++ }
105 + reg_write(ohci, OHCI1394_IsoXmitIntMaskClear, ~0);
106 + ohci->it_context_mask = ohci->it_context_support;
107 + ohci->n_it = hweight32(ohci->it_context_mask);
108 +diff --git a/drivers/media/pci/cobalt/Kconfig b/drivers/media/pci/cobalt/Kconfig
109 +index 1f88ccc174da..a01f0cc745cc 100644
110 +--- a/drivers/media/pci/cobalt/Kconfig
111 ++++ b/drivers/media/pci/cobalt/Kconfig
112 +@@ -1,6 +1,6 @@
113 + config VIDEO_COBALT
114 + tristate "Cisco Cobalt support"
115 +- depends on VIDEO_V4L2 && I2C && MEDIA_CONTROLLER
116 ++ depends on VIDEO_V4L2 && I2C && VIDEO_V4L2_SUBDEV_API
117 + depends on PCI_MSI && MTD_COMPLEX_MAPPINGS
118 + depends on GPIOLIB || COMPILE_TEST
119 + depends on SND
120 +diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
121 +index a9377727c11c..7f709cbdcd87 100644
122 +--- a/drivers/net/ethernet/cavium/thunder/nicvf_main.c
123 ++++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
124 +@@ -1583,8 +1583,14 @@ err_disable_device:
125 + static void nicvf_remove(struct pci_dev *pdev)
126 + {
127 + struct net_device *netdev = pci_get_drvdata(pdev);
128 +- struct nicvf *nic = netdev_priv(netdev);
129 +- struct net_device *pnetdev = nic->pnicvf->netdev;
130 ++ struct nicvf *nic;
131 ++ struct net_device *pnetdev;
132 ++
133 ++ if (!netdev)
134 ++ return;
135 ++
136 ++ nic = netdev_priv(netdev);
137 ++ pnetdev = nic->pnicvf->netdev;
138 +
139 + /* Check if this Qset is assigned to different VF.
140 + * If yes, clean primary and all secondary Qsets.
141 +diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
142 +index 731423ca575d..8bead97373ab 100644
143 +--- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
144 ++++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
145 +@@ -4934,26 +4934,41 @@ static void rem_slave_counters(struct mlx4_dev *dev, int slave)
146 + struct res_counter *counter;
147 + struct res_counter *tmp;
148 + int err;
149 +- int index;
150 ++ int *counters_arr = NULL;
151 ++ int i, j;
152 +
153 + err = move_all_busy(dev, slave, RES_COUNTER);
154 + if (err)
155 + mlx4_warn(dev, "rem_slave_counters: Could not move all counters - too busy for slave %d\n",
156 + slave);
157 +
158 +- spin_lock_irq(mlx4_tlock(dev));
159 +- list_for_each_entry_safe(counter, tmp, counter_list, com.list) {
160 +- if (counter->com.owner == slave) {
161 +- index = counter->com.res_id;
162 +- rb_erase(&counter->com.node,
163 +- &tracker->res_tree[RES_COUNTER]);
164 +- list_del(&counter->com.list);
165 +- kfree(counter);
166 +- __mlx4_counter_free(dev, index);
167 ++ counters_arr = kmalloc_array(dev->caps.max_counters,
168 ++ sizeof(*counters_arr), GFP_KERNEL);
169 ++ if (!counters_arr)
170 ++ return;
171 ++
172 ++ do {
173 ++ i = 0;
174 ++ j = 0;
175 ++ spin_lock_irq(mlx4_tlock(dev));
176 ++ list_for_each_entry_safe(counter, tmp, counter_list, com.list) {
177 ++ if (counter->com.owner == slave) {
178 ++ counters_arr[i++] = counter->com.res_id;
179 ++ rb_erase(&counter->com.node,
180 ++ &tracker->res_tree[RES_COUNTER]);
181 ++ list_del(&counter->com.list);
182 ++ kfree(counter);
183 ++ }
184 ++ }
185 ++ spin_unlock_irq(mlx4_tlock(dev));
186 ++
187 ++ while (j < i) {
188 ++ __mlx4_counter_free(dev, counters_arr[j++]);
189 + mlx4_release_resource(dev, slave, RES_COUNTER, 1, 0);
190 + }
191 +- }
192 +- spin_unlock_irq(mlx4_tlock(dev));
193 ++ } while (i);
194 ++
195 ++ kfree(counters_arr);
196 + }
197 +
198 + static void rem_slave_xrcdns(struct mlx4_dev *dev, int slave)
199 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
200 +index 59874d666cff..443632df2010 100644
201 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
202 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
203 +@@ -1332,6 +1332,42 @@ static int mlx5e_modify_tir_lro(struct mlx5e_priv *priv, int tt)
204 + return err;
205 + }
206 +
207 ++static int mlx5e_refresh_tir_self_loopback_enable(struct mlx5_core_dev *mdev,
208 ++ u32 tirn)
209 ++{
210 ++ void *in;
211 ++ int inlen;
212 ++ int err;
213 ++
214 ++ inlen = MLX5_ST_SZ_BYTES(modify_tir_in);
215 ++ in = mlx5_vzalloc(inlen);
216 ++ if (!in)
217 ++ return -ENOMEM;
218 ++
219 ++ MLX5_SET(modify_tir_in, in, bitmask.self_lb_en, 1);
220 ++
221 ++ err = mlx5_core_modify_tir(mdev, tirn, in, inlen);
222 ++
223 ++ kvfree(in);
224 ++
225 ++ return err;
226 ++}
227 ++
228 ++static int mlx5e_refresh_tirs_self_loopback_enable(struct mlx5e_priv *priv)
229 ++{
230 ++ int err;
231 ++ int i;
232 ++
233 ++ for (i = 0; i < MLX5E_NUM_TT; i++) {
234 ++ err = mlx5e_refresh_tir_self_loopback_enable(priv->mdev,
235 ++ priv->tirn[i]);
236 ++ if (err)
237 ++ return err;
238 ++ }
239 ++
240 ++ return 0;
241 ++}
242 ++
243 + static int mlx5e_set_dev_port_mtu(struct net_device *netdev)
244 + {
245 + struct mlx5e_priv *priv = netdev_priv(netdev);
246 +@@ -1367,13 +1403,20 @@ int mlx5e_open_locked(struct net_device *netdev)
247 +
248 + err = mlx5e_set_dev_port_mtu(netdev);
249 + if (err)
250 +- return err;
251 ++ goto err_clear_state_opened_flag;
252 +
253 + err = mlx5e_open_channels(priv);
254 + if (err) {
255 + netdev_err(netdev, "%s: mlx5e_open_channels failed, %d\n",
256 + __func__, err);
257 +- return err;
258 ++ goto err_clear_state_opened_flag;
259 ++ }
260 ++
261 ++ err = mlx5e_refresh_tirs_self_loopback_enable(priv);
262 ++ if (err) {
263 ++ netdev_err(netdev, "%s: mlx5e_refresh_tirs_self_loopback_enable failed, %d\n",
264 ++ __func__, err);
265 ++ goto err_close_channels;
266 + }
267 +
268 + mlx5e_update_carrier(priv);
269 +@@ -1382,6 +1425,12 @@ int mlx5e_open_locked(struct net_device *netdev)
270 + schedule_delayed_work(&priv->update_stats_work, 0);
271 +
272 + return 0;
273 ++
274 ++err_close_channels:
275 ++ mlx5e_close_channels(priv);
276 ++err_clear_state_opened_flag:
277 ++ clear_bit(MLX5E_STATE_OPENED, &priv->state);
278 ++ return err;
279 + }
280 +
281 + static int mlx5e_open(struct net_device *netdev)
282 +@@ -1899,6 +1948,9 @@ static int mlx5e_check_required_hca_cap(struct mlx5_core_dev *mdev)
283 + "Not creating net device, some required device capabilities are missing\n");
284 + return -ENOTSUPP;
285 + }
286 ++ if (!MLX5_CAP_ETH(mdev, self_lb_en_modifiable))
287 ++ mlx5_core_warn(mdev, "Self loop back prevention is not supported\n");
288 ++
289 + return 0;
290 + }
291 +
292 +diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
293 +index b4f21232019a..79ef799f88ab 100644
294 +--- a/drivers/net/ethernet/realtek/r8169.c
295 ++++ b/drivers/net/ethernet/realtek/r8169.c
296 +@@ -7429,15 +7429,15 @@ process_pkt:
297 +
298 + rtl8169_rx_vlan_tag(desc, skb);
299 +
300 ++ if (skb->pkt_type == PACKET_MULTICAST)
301 ++ dev->stats.multicast++;
302 ++
303 + napi_gro_receive(&tp->napi, skb);
304 +
305 + u64_stats_update_begin(&tp->rx_stats.syncp);
306 + tp->rx_stats.packets++;
307 + tp->rx_stats.bytes += pkt_size;
308 + u64_stats_update_end(&tp->rx_stats.syncp);
309 +-
310 +- if (skb->pkt_type == PACKET_MULTICAST)
311 +- dev->stats.multicast++;
312 + }
313 + release_descriptor:
314 + desc->opts2 = 0;
315 +diff --git a/drivers/net/phy/broadcom.c b/drivers/net/phy/broadcom.c
316 +index 9c71295f2fef..85e640440bd9 100644
317 +--- a/drivers/net/phy/broadcom.c
318 ++++ b/drivers/net/phy/broadcom.c
319 +@@ -675,7 +675,7 @@ static struct mdio_device_id __maybe_unused broadcom_tbl[] = {
320 + { PHY_ID_BCM5461, 0xfffffff0 },
321 + { PHY_ID_BCM54616S, 0xfffffff0 },
322 + { PHY_ID_BCM5464, 0xfffffff0 },
323 +- { PHY_ID_BCM5482, 0xfffffff0 },
324 ++ { PHY_ID_BCM5481, 0xfffffff0 },
325 + { PHY_ID_BCM5482, 0xfffffff0 },
326 + { PHY_ID_BCM50610, 0xfffffff0 },
327 + { PHY_ID_BCM50610M, 0xfffffff0 },
328 +diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
329 +index 2a7c1be23c4f..66e0853d1680 100644
330 +--- a/drivers/net/usb/qmi_wwan.c
331 ++++ b/drivers/net/usb/qmi_wwan.c
332 +@@ -775,6 +775,7 @@ static const struct usb_device_id products[] = {
333 + {QMI_FIXED_INTF(0x2357, 0x9000, 4)}, /* TP-LINK MA260 */
334 + {QMI_FIXED_INTF(0x1bc7, 0x1200, 5)}, /* Telit LE920 */
335 + {QMI_FIXED_INTF(0x1bc7, 0x1201, 2)}, /* Telit LE920 */
336 ++ {QMI_FIXED_INTF(0x1c9e, 0x9b01, 3)}, /* XS Stick W100-2 from 4G Systems */
337 + {QMI_FIXED_INTF(0x0b3c, 0xc000, 4)}, /* Olivetti Olicard 100 */
338 + {QMI_FIXED_INTF(0x0b3c, 0xc001, 4)}, /* Olivetti Olicard 120 */
339 + {QMI_FIXED_INTF(0x0b3c, 0xc002, 4)}, /* Olivetti Olicard 140 */
340 +diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c
341 +index 488c6f50df73..c9e309cd9d82 100644
342 +--- a/drivers/net/vrf.c
343 ++++ b/drivers/net/vrf.c
344 +@@ -581,7 +581,6 @@ static int vrf_newlink(struct net *src_net, struct net_device *dev,
345 + {
346 + struct net_vrf *vrf = netdev_priv(dev);
347 + struct net_vrf_dev *vrf_ptr;
348 +- int err;
349 +
350 + if (!data || !data[IFLA_VRF_TABLE])
351 + return -EINVAL;
352 +@@ -590,26 +589,16 @@ static int vrf_newlink(struct net *src_net, struct net_device *dev,
353 +
354 + dev->priv_flags |= IFF_VRF_MASTER;
355 +
356 +- err = -ENOMEM;
357 + vrf_ptr = kmalloc(sizeof(*dev->vrf_ptr), GFP_KERNEL);
358 + if (!vrf_ptr)
359 +- goto out_fail;
360 ++ return -ENOMEM;
361 +
362 + vrf_ptr->ifindex = dev->ifindex;
363 + vrf_ptr->tb_id = vrf->tb_id;
364 +
365 +- err = register_netdevice(dev);
366 +- if (err < 0)
367 +- goto out_fail;
368 +-
369 + rcu_assign_pointer(dev->vrf_ptr, vrf_ptr);
370 +
371 +- return 0;
372 +-
373 +-out_fail:
374 +- kfree(vrf_ptr);
375 +- free_netdev(dev);
376 +- return err;
377 ++ return register_netdev(dev);
378 + }
379 +
380 + static size_t vrf_nl_getsize(const struct net_device *dev)
381 +diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
382 +index 938efe33be80..94eea1f43280 100644
383 +--- a/fs/btrfs/ctree.h
384 ++++ b/fs/btrfs/ctree.h
385 +@@ -3398,7 +3398,7 @@ int btrfs_set_disk_extent_flags(struct btrfs_trans_handle *trans,
386 + int btrfs_free_extent(struct btrfs_trans_handle *trans,
387 + struct btrfs_root *root,
388 + u64 bytenr, u64 num_bytes, u64 parent, u64 root_objectid,
389 +- u64 owner, u64 offset, int no_quota);
390 ++ u64 owner, u64 offset);
391 +
392 + int btrfs_free_reserved_extent(struct btrfs_root *root, u64 start, u64 len,
393 + int delalloc);
394 +@@ -3411,7 +3411,7 @@ int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans,
395 + int btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
396 + struct btrfs_root *root,
397 + u64 bytenr, u64 num_bytes, u64 parent,
398 +- u64 root_objectid, u64 owner, u64 offset, int no_quota);
399 ++ u64 root_objectid, u64 owner, u64 offset);
400 +
401 + int btrfs_start_dirty_block_groups(struct btrfs_trans_handle *trans,
402 + struct btrfs_root *root);
403 +diff --git a/fs/btrfs/delayed-ref.c b/fs/btrfs/delayed-ref.c
404 +index ac3e81da6d4e..7832031fef68 100644
405 +--- a/fs/btrfs/delayed-ref.c
406 ++++ b/fs/btrfs/delayed-ref.c
407 +@@ -197,6 +197,119 @@ static inline void drop_delayed_ref(struct btrfs_trans_handle *trans,
408 + trans->delayed_ref_updates--;
409 + }
410 +
411 ++static bool merge_ref(struct btrfs_trans_handle *trans,
412 ++ struct btrfs_delayed_ref_root *delayed_refs,
413 ++ struct btrfs_delayed_ref_head *head,
414 ++ struct btrfs_delayed_ref_node *ref,
415 ++ u64 seq)
416 ++{
417 ++ struct btrfs_delayed_ref_node *next;
418 ++ bool done = false;
419 ++
420 ++ next = list_first_entry(&head->ref_list, struct btrfs_delayed_ref_node,
421 ++ list);
422 ++ while (!done && &next->list != &head->ref_list) {
423 ++ int mod;
424 ++ struct btrfs_delayed_ref_node *next2;
425 ++
426 ++ next2 = list_next_entry(next, list);
427 ++
428 ++ if (next == ref)
429 ++ goto next;
430 ++
431 ++ if (seq && next->seq >= seq)
432 ++ goto next;
433 ++
434 ++ if (next->type != ref->type)
435 ++ goto next;
436 ++
437 ++ if ((ref->type == BTRFS_TREE_BLOCK_REF_KEY ||
438 ++ ref->type == BTRFS_SHARED_BLOCK_REF_KEY) &&
439 ++ comp_tree_refs(btrfs_delayed_node_to_tree_ref(ref),
440 ++ btrfs_delayed_node_to_tree_ref(next),
441 ++ ref->type))
442 ++ goto next;
443 ++ if ((ref->type == BTRFS_EXTENT_DATA_REF_KEY ||
444 ++ ref->type == BTRFS_SHARED_DATA_REF_KEY) &&
445 ++ comp_data_refs(btrfs_delayed_node_to_data_ref(ref),
446 ++ btrfs_delayed_node_to_data_ref(next)))
447 ++ goto next;
448 ++
449 ++ if (ref->action == next->action) {
450 ++ mod = next->ref_mod;
451 ++ } else {
452 ++ if (ref->ref_mod < next->ref_mod) {
453 ++ swap(ref, next);
454 ++ done = true;
455 ++ }
456 ++ mod = -next->ref_mod;
457 ++ }
458 ++
459 ++ drop_delayed_ref(trans, delayed_refs, head, next);
460 ++ ref->ref_mod += mod;
461 ++ if (ref->ref_mod == 0) {
462 ++ drop_delayed_ref(trans, delayed_refs, head, ref);
463 ++ done = true;
464 ++ } else {
465 ++ /*
466 ++ * Can't have multiples of the same ref on a tree block.
467 ++ */
468 ++ WARN_ON(ref->type == BTRFS_TREE_BLOCK_REF_KEY ||
469 ++ ref->type == BTRFS_SHARED_BLOCK_REF_KEY);
470 ++ }
471 ++next:
472 ++ next = next2;
473 ++ }
474 ++
475 ++ return done;
476 ++}
477 ++
478 ++void btrfs_merge_delayed_refs(struct btrfs_trans_handle *trans,
479 ++ struct btrfs_fs_info *fs_info,
480 ++ struct btrfs_delayed_ref_root *delayed_refs,
481 ++ struct btrfs_delayed_ref_head *head)
482 ++{
483 ++ struct btrfs_delayed_ref_node *ref;
484 ++ u64 seq = 0;
485 ++
486 ++ assert_spin_locked(&head->lock);
487 ++
488 ++ if (list_empty(&head->ref_list))
489 ++ return;
490 ++
491 ++ /* We don't have too many refs to merge for data. */
492 ++ if (head->is_data)
493 ++ return;
494 ++
495 ++ spin_lock(&fs_info->tree_mod_seq_lock);
496 ++ if (!list_empty(&fs_info->tree_mod_seq_list)) {
497 ++ struct seq_list *elem;
498 ++
499 ++ elem = list_first_entry(&fs_info->tree_mod_seq_list,
500 ++ struct seq_list, list);
501 ++ seq = elem->seq;
502 ++ }
503 ++ spin_unlock(&fs_info->tree_mod_seq_lock);
504 ++
505 ++ ref = list_first_entry(&head->ref_list, struct btrfs_delayed_ref_node,
506 ++ list);
507 ++ while (&ref->list != &head->ref_list) {
508 ++ if (seq && ref->seq >= seq)
509 ++ goto next;
510 ++
511 ++ if (merge_ref(trans, delayed_refs, head, ref, seq)) {
512 ++ if (list_empty(&head->ref_list))
513 ++ break;
514 ++ ref = list_first_entry(&head->ref_list,
515 ++ struct btrfs_delayed_ref_node,
516 ++ list);
517 ++ continue;
518 ++ }
519 ++next:
520 ++ ref = list_next_entry(ref, list);
521 ++ }
522 ++}
523 ++
524 + int btrfs_check_delayed_seq(struct btrfs_fs_info *fs_info,
525 + struct btrfs_delayed_ref_root *delayed_refs,
526 + u64 seq)
527 +@@ -292,8 +405,7 @@ add_delayed_ref_tail_merge(struct btrfs_trans_handle *trans,
528 + exist = list_entry(href->ref_list.prev, struct btrfs_delayed_ref_node,
529 + list);
530 + /* No need to compare bytenr nor is_head */
531 +- if (exist->type != ref->type || exist->no_quota != ref->no_quota ||
532 +- exist->seq != ref->seq)
533 ++ if (exist->type != ref->type || exist->seq != ref->seq)
534 + goto add_tail;
535 +
536 + if ((exist->type == BTRFS_TREE_BLOCK_REF_KEY ||
537 +@@ -524,7 +636,7 @@ add_delayed_tree_ref(struct btrfs_fs_info *fs_info,
538 + struct btrfs_delayed_ref_head *head_ref,
539 + struct btrfs_delayed_ref_node *ref, u64 bytenr,
540 + u64 num_bytes, u64 parent, u64 ref_root, int level,
541 +- int action, int no_quota)
542 ++ int action)
543 + {
544 + struct btrfs_delayed_tree_ref *full_ref;
545 + struct btrfs_delayed_ref_root *delayed_refs;
546 +@@ -546,7 +658,6 @@ add_delayed_tree_ref(struct btrfs_fs_info *fs_info,
547 + ref->action = action;
548 + ref->is_head = 0;
549 + ref->in_tree = 1;
550 +- ref->no_quota = no_quota;
551 + ref->seq = seq;
552 +
553 + full_ref = btrfs_delayed_node_to_tree_ref(ref);
554 +@@ -579,7 +690,7 @@ add_delayed_data_ref(struct btrfs_fs_info *fs_info,
555 + struct btrfs_delayed_ref_head *head_ref,
556 + struct btrfs_delayed_ref_node *ref, u64 bytenr,
557 + u64 num_bytes, u64 parent, u64 ref_root, u64 owner,
558 +- u64 offset, int action, int no_quota)
559 ++ u64 offset, int action)
560 + {
561 + struct btrfs_delayed_data_ref *full_ref;
562 + struct btrfs_delayed_ref_root *delayed_refs;
563 +@@ -602,7 +713,6 @@ add_delayed_data_ref(struct btrfs_fs_info *fs_info,
564 + ref->action = action;
565 + ref->is_head = 0;
566 + ref->in_tree = 1;
567 +- ref->no_quota = no_quota;
568 + ref->seq = seq;
569 +
570 + full_ref = btrfs_delayed_node_to_data_ref(ref);
571 +@@ -633,17 +743,13 @@ int btrfs_add_delayed_tree_ref(struct btrfs_fs_info *fs_info,
572 + struct btrfs_trans_handle *trans,
573 + u64 bytenr, u64 num_bytes, u64 parent,
574 + u64 ref_root, int level, int action,
575 +- struct btrfs_delayed_extent_op *extent_op,
576 +- int no_quota)
577 ++ struct btrfs_delayed_extent_op *extent_op)
578 + {
579 + struct btrfs_delayed_tree_ref *ref;
580 + struct btrfs_delayed_ref_head *head_ref;
581 + struct btrfs_delayed_ref_root *delayed_refs;
582 + struct btrfs_qgroup_extent_record *record = NULL;
583 +
584 +- if (!is_fstree(ref_root) || !fs_info->quota_enabled)
585 +- no_quota = 0;
586 +-
587 + BUG_ON(extent_op && extent_op->is_data);
588 + ref = kmem_cache_alloc(btrfs_delayed_tree_ref_cachep, GFP_NOFS);
589 + if (!ref)
590 +@@ -672,8 +778,7 @@ int btrfs_add_delayed_tree_ref(struct btrfs_fs_info *fs_info,
591 + bytenr, num_bytes, action, 0);
592 +
593 + add_delayed_tree_ref(fs_info, trans, head_ref, &ref->node, bytenr,
594 +- num_bytes, parent, ref_root, level, action,
595 +- no_quota);
596 ++ num_bytes, parent, ref_root, level, action);
597 + spin_unlock(&delayed_refs->lock);
598 +
599 + return 0;
600 +@@ -694,17 +799,13 @@ int btrfs_add_delayed_data_ref(struct btrfs_fs_info *fs_info,
601 + u64 bytenr, u64 num_bytes,
602 + u64 parent, u64 ref_root,
603 + u64 owner, u64 offset, int action,
604 +- struct btrfs_delayed_extent_op *extent_op,
605 +- int no_quota)
606 ++ struct btrfs_delayed_extent_op *extent_op)
607 + {
608 + struct btrfs_delayed_data_ref *ref;
609 + struct btrfs_delayed_ref_head *head_ref;
610 + struct btrfs_delayed_ref_root *delayed_refs;
611 + struct btrfs_qgroup_extent_record *record = NULL;
612 +
613 +- if (!is_fstree(ref_root) || !fs_info->quota_enabled)
614 +- no_quota = 0;
615 +-
616 + BUG_ON(extent_op && !extent_op->is_data);
617 + ref = kmem_cache_alloc(btrfs_delayed_data_ref_cachep, GFP_NOFS);
618 + if (!ref)
619 +@@ -740,7 +841,7 @@ int btrfs_add_delayed_data_ref(struct btrfs_fs_info *fs_info,
620 +
621 + add_delayed_data_ref(fs_info, trans, head_ref, &ref->node, bytenr,
622 + num_bytes, parent, ref_root, owner, offset,
623 +- action, no_quota);
624 ++ action);
625 + spin_unlock(&delayed_refs->lock);
626 +
627 + return 0;
628 +diff --git a/fs/btrfs/delayed-ref.h b/fs/btrfs/delayed-ref.h
629 +index 13fb5e6090fe..930887a4275f 100644
630 +--- a/fs/btrfs/delayed-ref.h
631 ++++ b/fs/btrfs/delayed-ref.h
632 +@@ -68,7 +68,6 @@ struct btrfs_delayed_ref_node {
633 +
634 + unsigned int action:8;
635 + unsigned int type:8;
636 +- unsigned int no_quota:1;
637 + /* is this node still in the rbtree? */
638 + unsigned int is_head:1;
639 + unsigned int in_tree:1;
640 +@@ -233,15 +232,13 @@ int btrfs_add_delayed_tree_ref(struct btrfs_fs_info *fs_info,
641 + struct btrfs_trans_handle *trans,
642 + u64 bytenr, u64 num_bytes, u64 parent,
643 + u64 ref_root, int level, int action,
644 +- struct btrfs_delayed_extent_op *extent_op,
645 +- int no_quota);
646 ++ struct btrfs_delayed_extent_op *extent_op);
647 + int btrfs_add_delayed_data_ref(struct btrfs_fs_info *fs_info,
648 + struct btrfs_trans_handle *trans,
649 + u64 bytenr, u64 num_bytes,
650 + u64 parent, u64 ref_root,
651 + u64 owner, u64 offset, int action,
652 +- struct btrfs_delayed_extent_op *extent_op,
653 +- int no_quota);
654 ++ struct btrfs_delayed_extent_op *extent_op);
655 + int btrfs_add_delayed_extent_op(struct btrfs_fs_info *fs_info,
656 + struct btrfs_trans_handle *trans,
657 + u64 bytenr, u64 num_bytes,
658 +diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
659 +index 601d7d45d164..cadacf643bd0 100644
660 +--- a/fs/btrfs/extent-tree.c
661 ++++ b/fs/btrfs/extent-tree.c
662 +@@ -95,8 +95,7 @@ static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans,
663 + struct btrfs_root *root,
664 + u64 parent, u64 root_objectid,
665 + u64 flags, struct btrfs_disk_key *key,
666 +- int level, struct btrfs_key *ins,
667 +- int no_quota);
668 ++ int level, struct btrfs_key *ins);
669 + static int do_chunk_alloc(struct btrfs_trans_handle *trans,
670 + struct btrfs_root *extent_root, u64 flags,
671 + int force);
672 +@@ -2009,8 +2008,7 @@ int btrfs_discard_extent(struct btrfs_root *root, u64 bytenr,
673 + int btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
674 + struct btrfs_root *root,
675 + u64 bytenr, u64 num_bytes, u64 parent,
676 +- u64 root_objectid, u64 owner, u64 offset,
677 +- int no_quota)
678 ++ u64 root_objectid, u64 owner, u64 offset)
679 + {
680 + int ret;
681 + struct btrfs_fs_info *fs_info = root->fs_info;
682 +@@ -2022,12 +2020,12 @@ int btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
683 + ret = btrfs_add_delayed_tree_ref(fs_info, trans, bytenr,
684 + num_bytes,
685 + parent, root_objectid, (int)owner,
686 +- BTRFS_ADD_DELAYED_REF, NULL, no_quota);
687 ++ BTRFS_ADD_DELAYED_REF, NULL);
688 + } else {
689 + ret = btrfs_add_delayed_data_ref(fs_info, trans, bytenr,
690 + num_bytes,
691 + parent, root_objectid, owner, offset,
692 +- BTRFS_ADD_DELAYED_REF, NULL, no_quota);
693 ++ BTRFS_ADD_DELAYED_REF, NULL);
694 + }
695 + return ret;
696 + }
697 +@@ -2048,15 +2046,11 @@ static int __btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
698 + u64 num_bytes = node->num_bytes;
699 + u64 refs;
700 + int ret;
701 +- int no_quota = node->no_quota;
702 +
703 + path = btrfs_alloc_path();
704 + if (!path)
705 + return -ENOMEM;
706 +
707 +- if (!is_fstree(root_objectid) || !root->fs_info->quota_enabled)
708 +- no_quota = 1;
709 +-
710 + path->reada = 1;
711 + path->leave_spinning = 1;
712 + /* this will setup the path even if it fails to insert the back ref */
713 +@@ -2291,8 +2285,7 @@ static int run_delayed_tree_ref(struct btrfs_trans_handle *trans,
714 + parent, ref_root,
715 + extent_op->flags_to_set,
716 + &extent_op->key,
717 +- ref->level, &ins,
718 +- node->no_quota);
719 ++ ref->level, &ins);
720 + } else if (node->action == BTRFS_ADD_DELAYED_REF) {
721 + ret = __btrfs_inc_extent_ref(trans, root, node,
722 + parent, ref_root,
723 +@@ -2433,7 +2426,21 @@ static noinline int __btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
724 + }
725 + }
726 +
727 ++ /*
728 ++ * We need to try and merge add/drops of the same ref since we
729 ++ * can run into issues with relocate dropping the implicit ref
730 ++ * and then it being added back again before the drop can
731 ++ * finish. If we merged anything we need to re-loop so we can
732 ++ * get a good ref.
733 ++ * Or we can get node references of the same type that weren't
734 ++ * merged when created due to bumps in the tree mod seq, and
735 ++ * we need to merge them to prevent adding an inline extent
736 ++ * backref before dropping it (triggering a BUG_ON at
737 ++ * insert_inline_extent_backref()).
738 ++ */
739 + spin_lock(&locked_ref->lock);
740 ++ btrfs_merge_delayed_refs(trans, fs_info, delayed_refs,
741 ++ locked_ref);
742 +
743 + /*
744 + * locked_ref is the head node, so we have to go one
745 +@@ -3109,7 +3116,7 @@ static int __btrfs_mod_ref(struct btrfs_trans_handle *trans,
746 + int level;
747 + int ret = 0;
748 + int (*process_func)(struct btrfs_trans_handle *, struct btrfs_root *,
749 +- u64, u64, u64, u64, u64, u64, int);
750 ++ u64, u64, u64, u64, u64, u64);
751 +
752 +
753 + if (btrfs_test_is_dummy_root(root))
754 +@@ -3150,15 +3157,14 @@ static int __btrfs_mod_ref(struct btrfs_trans_handle *trans,
755 + key.offset -= btrfs_file_extent_offset(buf, fi);
756 + ret = process_func(trans, root, bytenr, num_bytes,
757 + parent, ref_root, key.objectid,
758 +- key.offset, 1);
759 ++ key.offset);
760 + if (ret)
761 + goto fail;
762 + } else {
763 + bytenr = btrfs_node_blockptr(buf, i);
764 + num_bytes = root->nodesize;
765 + ret = process_func(trans, root, bytenr, num_bytes,
766 +- parent, ref_root, level - 1, 0,
767 +- 1);
768 ++ parent, ref_root, level - 1, 0);
769 + if (ret)
770 + goto fail;
771 + }
772 +@@ -6233,7 +6239,6 @@ static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
773 + int extent_slot = 0;
774 + int found_extent = 0;
775 + int num_to_del = 1;
776 +- int no_quota = node->no_quota;
777 + u32 item_size;
778 + u64 refs;
779 + u64 bytenr = node->bytenr;
780 +@@ -6242,9 +6247,6 @@ static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
781 + bool skinny_metadata = btrfs_fs_incompat(root->fs_info,
782 + SKINNY_METADATA);
783 +
784 +- if (!info->quota_enabled || !is_fstree(root_objectid))
785 +- no_quota = 1;
786 +-
787 + path = btrfs_alloc_path();
788 + if (!path)
789 + return -ENOMEM;
790 +@@ -6570,7 +6572,7 @@ void btrfs_free_tree_block(struct btrfs_trans_handle *trans,
791 + buf->start, buf->len,
792 + parent, root->root_key.objectid,
793 + btrfs_header_level(buf),
794 +- BTRFS_DROP_DELAYED_REF, NULL, 0);
795 ++ BTRFS_DROP_DELAYED_REF, NULL);
796 + BUG_ON(ret); /* -ENOMEM */
797 + }
798 +
799 +@@ -6618,7 +6620,7 @@ out:
800 + /* Can return -ENOMEM */
801 + int btrfs_free_extent(struct btrfs_trans_handle *trans, struct btrfs_root *root,
802 + u64 bytenr, u64 num_bytes, u64 parent, u64 root_objectid,
803 +- u64 owner, u64 offset, int no_quota)
804 ++ u64 owner, u64 offset)
805 + {
806 + int ret;
807 + struct btrfs_fs_info *fs_info = root->fs_info;
808 +@@ -6641,13 +6643,13 @@ int btrfs_free_extent(struct btrfs_trans_handle *trans, struct btrfs_root *root,
809 + ret = btrfs_add_delayed_tree_ref(fs_info, trans, bytenr,
810 + num_bytes,
811 + parent, root_objectid, (int)owner,
812 +- BTRFS_DROP_DELAYED_REF, NULL, no_quota);
813 ++ BTRFS_DROP_DELAYED_REF, NULL);
814 + } else {
815 + ret = btrfs_add_delayed_data_ref(fs_info, trans, bytenr,
816 + num_bytes,
817 + parent, root_objectid, owner,
818 + offset, BTRFS_DROP_DELAYED_REF,
819 +- NULL, no_quota);
820 ++ NULL);
821 + }
822 + return ret;
823 + }
824 +@@ -7429,8 +7431,7 @@ static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans,
825 + struct btrfs_root *root,
826 + u64 parent, u64 root_objectid,
827 + u64 flags, struct btrfs_disk_key *key,
828 +- int level, struct btrfs_key *ins,
829 +- int no_quota)
830 ++ int level, struct btrfs_key *ins)
831 + {
832 + int ret;
833 + struct btrfs_fs_info *fs_info = root->fs_info;
834 +@@ -7520,7 +7521,7 @@ int btrfs_alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
835 + ret = btrfs_add_delayed_data_ref(root->fs_info, trans, ins->objectid,
836 + ins->offset, 0,
837 + root_objectid, owner, offset,
838 +- BTRFS_ADD_DELAYED_EXTENT, NULL, 0);
839 ++ BTRFS_ADD_DELAYED_EXTENT, NULL);
840 + return ret;
841 + }
842 +
843 +@@ -7734,7 +7735,7 @@ struct extent_buffer *btrfs_alloc_tree_block(struct btrfs_trans_handle *trans,
844 + ins.objectid, ins.offset,
845 + parent, root_objectid, level,
846 + BTRFS_ADD_DELAYED_EXTENT,
847 +- extent_op, 0);
848 ++ extent_op);
849 + if (ret)
850 + goto out_free_delayed;
851 + }
852 +@@ -8282,7 +8283,7 @@ skip:
853 + }
854 + }
855 + ret = btrfs_free_extent(trans, root, bytenr, blocksize, parent,
856 +- root->root_key.objectid, level - 1, 0, 0);
857 ++ root->root_key.objectid, level - 1, 0);
858 + BUG_ON(ret); /* -ENOMEM */
859 + }
860 + btrfs_tree_unlock(next);
861 +diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
862 +index 8c6f247ba81d..e27ea7ae7f26 100644
863 +--- a/fs/btrfs/file.c
864 ++++ b/fs/btrfs/file.c
865 +@@ -756,8 +756,16 @@ next_slot:
866 + }
867 +
868 + btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
869 +- if (key.objectid > ino ||
870 +- key.type > BTRFS_EXTENT_DATA_KEY || key.offset >= end)
871 ++
872 ++ if (key.objectid > ino)
873 ++ break;
874 ++ if (WARN_ON_ONCE(key.objectid < ino) ||
875 ++ key.type < BTRFS_EXTENT_DATA_KEY) {
876 ++ ASSERT(del_nr == 0);
877 ++ path->slots[0]++;
878 ++ goto next_slot;
879 ++ }
880 ++ if (key.type > BTRFS_EXTENT_DATA_KEY || key.offset >= end)
881 + break;
882 +
883 + fi = btrfs_item_ptr(leaf, path->slots[0],
884 +@@ -776,8 +784,8 @@ next_slot:
885 + btrfs_file_extent_inline_len(leaf,
886 + path->slots[0], fi);
887 + } else {
888 +- WARN_ON(1);
889 +- extent_end = search_start;
890 ++ /* can't happen */
891 ++ BUG();
892 + }
893 +
894 + /*
895 +@@ -847,7 +855,7 @@ next_slot:
896 + disk_bytenr, num_bytes, 0,
897 + root->root_key.objectid,
898 + new_key.objectid,
899 +- start - extent_offset, 1);
900 ++ start - extent_offset);
901 + BUG_ON(ret); /* -ENOMEM */
902 + }
903 + key.offset = start;
904 +@@ -925,7 +933,7 @@ delete_extent_item:
905 + disk_bytenr, num_bytes, 0,
906 + root->root_key.objectid,
907 + key.objectid, key.offset -
908 +- extent_offset, 0);
909 ++ extent_offset);
910 + BUG_ON(ret); /* -ENOMEM */
911 + inode_sub_bytes(inode,
912 + extent_end - key.offset);
913 +@@ -1204,7 +1212,7 @@ again:
914 +
915 + ret = btrfs_inc_extent_ref(trans, root, bytenr, num_bytes, 0,
916 + root->root_key.objectid,
917 +- ino, orig_offset, 1);
918 ++ ino, orig_offset);
919 + BUG_ON(ret); /* -ENOMEM */
920 +
921 + if (split == start) {
922 +@@ -1231,7 +1239,7 @@ again:
923 + del_nr++;
924 + ret = btrfs_free_extent(trans, root, bytenr, num_bytes,
925 + 0, root->root_key.objectid,
926 +- ino, orig_offset, 0);
927 ++ ino, orig_offset);
928 + BUG_ON(ret); /* -ENOMEM */
929 + }
930 + other_start = 0;
931 +@@ -1248,7 +1256,7 @@ again:
932 + del_nr++;
933 + ret = btrfs_free_extent(trans, root, bytenr, num_bytes,
934 + 0, root->root_key.objectid,
935 +- ino, orig_offset, 0);
936 ++ ino, orig_offset);
937 + BUG_ON(ret); /* -ENOMEM */
938 + }
939 + if (del_nr == 0) {
940 +@@ -1868,8 +1876,13 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
941 + struct btrfs_log_ctx ctx;
942 + int ret = 0;
943 + bool full_sync = 0;
944 +- const u64 len = end - start + 1;
945 ++ u64 len;
946 +
947 ++ /*
948 ++ * The range length can be represented by u64, we have to do the typecasts
949 ++ * to avoid signed overflow if it's [0, LLONG_MAX] eg. from fsync()
950 ++ */
951 ++ len = (u64)end - (u64)start + 1;
952 + trace_btrfs_sync_file(file, datasync);
953 +
954 + /*
955 +@@ -2057,8 +2070,7 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
956 + }
957 + }
958 + if (!full_sync) {
959 +- ret = btrfs_wait_ordered_range(inode, start,
960 +- end - start + 1);
961 ++ ret = btrfs_wait_ordered_range(inode, start, len);
962 + if (ret) {
963 + btrfs_end_transaction(trans, root);
964 + goto out;
965 +diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
966 +index 611b66d73e80..396e3d5c4e83 100644
967 +--- a/fs/btrfs/inode.c
968 ++++ b/fs/btrfs/inode.c
969 +@@ -1294,8 +1294,14 @@ next_slot:
970 + num_bytes = 0;
971 + btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
972 +
973 +- if (found_key.objectid > ino ||
974 +- found_key.type > BTRFS_EXTENT_DATA_KEY ||
975 ++ if (found_key.objectid > ino)
976 ++ break;
977 ++ if (WARN_ON_ONCE(found_key.objectid < ino) ||
978 ++ found_key.type < BTRFS_EXTENT_DATA_KEY) {
979 ++ path->slots[0]++;
980 ++ goto next_slot;
981 ++ }
982 ++ if (found_key.type > BTRFS_EXTENT_DATA_KEY ||
983 + found_key.offset > end)
984 + break;
985 +
986 +@@ -2573,7 +2579,7 @@ again:
987 + ret = btrfs_inc_extent_ref(trans, root, new->bytenr,
988 + new->disk_len, 0,
989 + backref->root_id, backref->inum,
990 +- new->file_pos, 0); /* start - extent_offset */
991 ++ new->file_pos); /* start - extent_offset */
992 + if (ret) {
993 + btrfs_abort_transaction(trans, root, ret);
994 + goto out_free_path;
995 +@@ -4217,6 +4223,47 @@ static int truncate_space_check(struct btrfs_trans_handle *trans,
996 +
997 + }
998 +
999 ++static int truncate_inline_extent(struct inode *inode,
1000 ++ struct btrfs_path *path,
1001 ++ struct btrfs_key *found_key,
1002 ++ const u64 item_end,
1003 ++ const u64 new_size)
1004 ++{
1005 ++ struct extent_buffer *leaf = path->nodes[0];
1006 ++ int slot = path->slots[0];
1007 ++ struct btrfs_file_extent_item *fi;
1008 ++ u32 size = (u32)(new_size - found_key->offset);
1009 ++ struct btrfs_root *root = BTRFS_I(inode)->root;
1010 ++
1011 ++ fi = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item);
1012 ++
1013 ++ if (btrfs_file_extent_compression(leaf, fi) != BTRFS_COMPRESS_NONE) {
1014 ++ loff_t offset = new_size;
1015 ++ loff_t page_end = ALIGN(offset, PAGE_CACHE_SIZE);
1016 ++
1017 ++ /*
1018 ++ * Zero out the remaining of the last page of our inline extent,
1019 ++ * instead of directly truncating our inline extent here - that
1020 ++ * would be much more complex (decompressing all the data, then
1021 ++ * compressing the truncated data, which might be bigger than
1022 ++ * the size of the inline extent, resize the extent, etc).
1023 ++ * We release the path because to get the page we might need to
1024 ++ * read the extent item from disk (data not in the page cache).
1025 ++ */
1026 ++ btrfs_release_path(path);
1027 ++ return btrfs_truncate_page(inode, offset, page_end - offset, 0);
1028 ++ }
1029 ++
1030 ++ btrfs_set_file_extent_ram_bytes(leaf, fi, size);
1031 ++ size = btrfs_file_extent_calc_inline_size(size);
1032 ++ btrfs_truncate_item(root, path, size, 1);
1033 ++
1034 ++ if (test_bit(BTRFS_ROOT_REF_COWS, &root->state))
1035 ++ inode_sub_bytes(inode, item_end + 1 - new_size);
1036 ++
1037 ++ return 0;
1038 ++}
1039 ++
1040 + /*
1041 + * this can truncate away extent items, csum items and directory items.
1042 + * It starts at a high offset and removes keys until it can't find
1043 +@@ -4411,27 +4458,40 @@ search_again:
1044 + * special encodings
1045 + */
1046 + if (!del_item &&
1047 +- btrfs_file_extent_compression(leaf, fi) == 0 &&
1048 + btrfs_file_extent_encryption(leaf, fi) == 0 &&
1049 + btrfs_file_extent_other_encoding(leaf, fi) == 0) {
1050 +- u32 size = new_size - found_key.offset;
1051 +-
1052 +- if (test_bit(BTRFS_ROOT_REF_COWS, &root->state))
1053 +- inode_sub_bytes(inode, item_end + 1 -
1054 +- new_size);
1055 +
1056 + /*
1057 +- * update the ram bytes to properly reflect
1058 +- * the new size of our item
1059 ++ * Need to release path in order to truncate a
1060 ++ * compressed extent. So delete any accumulated
1061 ++ * extent items so far.
1062 + */
1063 +- btrfs_set_file_extent_ram_bytes(leaf, fi, size);
1064 +- size =
1065 +- btrfs_file_extent_calc_inline_size(size);
1066 +- btrfs_truncate_item(root, path, size, 1);
1067 ++ if (btrfs_file_extent_compression(leaf, fi) !=
1068 ++ BTRFS_COMPRESS_NONE && pending_del_nr) {
1069 ++ err = btrfs_del_items(trans, root, path,
1070 ++ pending_del_slot,
1071 ++ pending_del_nr);
1072 ++ if (err) {
1073 ++ btrfs_abort_transaction(trans,
1074 ++ root,
1075 ++ err);
1076 ++ goto error;
1077 ++ }
1078 ++ pending_del_nr = 0;
1079 ++ }
1080 ++
1081 ++ err = truncate_inline_extent(inode, path,
1082 ++ &found_key,
1083 ++ item_end,
1084 ++ new_size);
1085 ++ if (err) {
1086 ++ btrfs_abort_transaction(trans,
1087 ++ root, err);
1088 ++ goto error;
1089 ++ }
1090 + } else if (test_bit(BTRFS_ROOT_REF_COWS,
1091 + &root->state)) {
1092 +- inode_sub_bytes(inode, item_end + 1 -
1093 +- found_key.offset);
1094 ++ inode_sub_bytes(inode, item_end + 1 - new_size);
1095 + }
1096 + }
1097 + delete:
1098 +@@ -4461,7 +4521,7 @@ delete:
1099 + ret = btrfs_free_extent(trans, root, extent_start,
1100 + extent_num_bytes, 0,
1101 + btrfs_header_owner(leaf),
1102 +- ino, extent_offset, 0);
1103 ++ ino, extent_offset);
1104 + BUG_ON(ret);
1105 + if (btrfs_should_throttle_delayed_refs(trans, root))
1106 + btrfs_async_run_delayed_refs(root,
1107 +diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
1108 +index 8d20f3b1cab0..6548a36823bc 100644
1109 +--- a/fs/btrfs/ioctl.c
1110 ++++ b/fs/btrfs/ioctl.c
1111 +@@ -3203,41 +3203,6 @@ out:
1112 + return ret;
1113 + }
1114 +
1115 +-/* Helper to check and see if this root currently has a ref on the given disk
1116 +- * bytenr. If it does then we need to update the quota for this root. This
1117 +- * doesn't do anything if quotas aren't enabled.
1118 +- */
1119 +-static int check_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
1120 +- u64 disko)
1121 +-{
1122 +- struct seq_list tree_mod_seq_elem = SEQ_LIST_INIT(tree_mod_seq_elem);
1123 +- struct ulist *roots;
1124 +- struct ulist_iterator uiter;
1125 +- struct ulist_node *root_node = NULL;
1126 +- int ret;
1127 +-
1128 +- if (!root->fs_info->quota_enabled)
1129 +- return 1;
1130 +-
1131 +- btrfs_get_tree_mod_seq(root->fs_info, &tree_mod_seq_elem);
1132 +- ret = btrfs_find_all_roots(trans, root->fs_info, disko,
1133 +- tree_mod_seq_elem.seq, &roots);
1134 +- if (ret < 0)
1135 +- goto out;
1136 +- ret = 0;
1137 +- ULIST_ITER_INIT(&uiter);
1138 +- while ((root_node = ulist_next(roots, &uiter))) {
1139 +- if (root_node->val == root->objectid) {
1140 +- ret = 1;
1141 +- break;
1142 +- }
1143 +- }
1144 +- ulist_free(roots);
1145 +-out:
1146 +- btrfs_put_tree_mod_seq(root->fs_info, &tree_mod_seq_elem);
1147 +- return ret;
1148 +-}
1149 +-
1150 + static int clone_finish_inode_update(struct btrfs_trans_handle *trans,
1151 + struct inode *inode,
1152 + u64 endoff,
1153 +@@ -3328,6 +3293,150 @@ static void clone_update_extent_map(struct inode *inode,
1154 + &BTRFS_I(inode)->runtime_flags);
1155 + }
1156 +
1157 ++/*
1158 ++ * Make sure we do not end up inserting an inline extent into a file that has
1159 ++ * already other (non-inline) extents. If a file has an inline extent it can
1160 ++ * not have any other extents and the (single) inline extent must start at the
1161 ++ * file offset 0. Failing to respect these rules will lead to file corruption,
1162 ++ * resulting in EIO errors on read/write operations, hitting BUG_ON's in mm, etc
1163 ++ *
1164 ++ * We can have extents that have been already written to disk or we can have
1165 ++ * dirty ranges still in delalloc, in which case the extent maps and items are
1166 ++ * created only when we run delalloc, and the delalloc ranges might fall outside
1167 ++ * the range we are currently locking in the inode's io tree. So we check the
1168 ++ * inode's i_size because of that (i_size updates are done while holding the
1169 ++ * i_mutex, which we are holding here).
1170 ++ * We also check to see if the inode has a size not greater than "datal" but has
1171 ++ * extents beyond it, due to an fallocate with FALLOC_FL_KEEP_SIZE (and we are
1172 ++ * protected against such concurrent fallocate calls by the i_mutex).
1173 ++ *
1174 ++ * If the file has no extents but a size greater than datal, do not allow the
1175 ++ * copy because we would need turn the inline extent into a non-inline one (even
1176 ++ * with NO_HOLES enabled). If we find our destination inode only has one inline
1177 ++ * extent, just overwrite it with the source inline extent if its size is less
1178 ++ * than the source extent's size, or we could copy the source inline extent's
1179 ++ * data into the destination inode's inline extent if the later is greater then
1180 ++ * the former.
1181 ++ */
1182 ++static int clone_copy_inline_extent(struct inode *src,
1183 ++ struct inode *dst,
1184 ++ struct btrfs_trans_handle *trans,
1185 ++ struct btrfs_path *path,
1186 ++ struct btrfs_key *new_key,
1187 ++ const u64 drop_start,
1188 ++ const u64 datal,
1189 ++ const u64 skip,
1190 ++ const u64 size,
1191 ++ char *inline_data)
1192 ++{
1193 ++ struct btrfs_root *root = BTRFS_I(dst)->root;
1194 ++ const u64 aligned_end = ALIGN(new_key->offset + datal,
1195 ++ root->sectorsize);
1196 ++ int ret;
1197 ++ struct btrfs_key key;
1198 ++
1199 ++ if (new_key->offset > 0)
1200 ++ return -EOPNOTSUPP;
1201 ++
1202 ++ key.objectid = btrfs_ino(dst);
1203 ++ key.type = BTRFS_EXTENT_DATA_KEY;
1204 ++ key.offset = 0;
1205 ++ ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1206 ++ if (ret < 0) {
1207 ++ return ret;
1208 ++ } else if (ret > 0) {
1209 ++ if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) {
1210 ++ ret = btrfs_next_leaf(root, path);
1211 ++ if (ret < 0)
1212 ++ return ret;
1213 ++ else if (ret > 0)
1214 ++ goto copy_inline_extent;
1215 ++ }
1216 ++ btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
1217 ++ if (key.objectid == btrfs_ino(dst) &&
1218 ++ key.type == BTRFS_EXTENT_DATA_KEY) {
1219 ++ ASSERT(key.offset > 0);
1220 ++ return -EOPNOTSUPP;
1221 ++ }
1222 ++ } else if (i_size_read(dst) <= datal) {
1223 ++ struct btrfs_file_extent_item *ei;
1224 ++ u64 ext_len;
1225 ++
1226 ++ /*
1227 ++ * If the file size is <= datal, make sure there are no other
1228 ++ * extents following (can happen do to an fallocate call with
1229 ++ * the flag FALLOC_FL_KEEP_SIZE).
1230 ++ */
1231 ++ ei = btrfs_item_ptr(path->nodes[0], path->slots[0],
1232 ++ struct btrfs_file_extent_item);
1233 ++ /*
1234 ++ * If it's an inline extent, it can not have other extents
1235 ++ * following it.
1236 ++ */
1237 ++ if (btrfs_file_extent_type(path->nodes[0], ei) ==
1238 ++ BTRFS_FILE_EXTENT_INLINE)
1239 ++ goto copy_inline_extent;
1240 ++
1241 ++ ext_len = btrfs_file_extent_num_bytes(path->nodes[0], ei);
1242 ++ if (ext_len > aligned_end)
1243 ++ return -EOPNOTSUPP;
1244 ++
1245 ++ ret = btrfs_next_item(root, path);
1246 ++ if (ret < 0) {
1247 ++ return ret;
1248 ++ } else if (ret == 0) {
1249 ++ btrfs_item_key_to_cpu(path->nodes[0], &key,
1250 ++ path->slots[0]);
1251 ++ if (key.objectid == btrfs_ino(dst) &&
1252 ++ key.type == BTRFS_EXTENT_DATA_KEY)
1253 ++ return -EOPNOTSUPP;
1254 ++ }
1255 ++ }
1256 ++
1257 ++copy_inline_extent:
1258 ++ /*
1259 ++ * We have no extent items, or we have an extent at offset 0 which may
1260 ++ * or may not be inlined. All these cases are dealt the same way.
1261 ++ */
1262 ++ if (i_size_read(dst) > datal) {
1263 ++ /*
1264 ++ * If the destination inode has an inline extent...
1265 ++ * This would require copying the data from the source inline
1266 ++ * extent into the beginning of the destination's inline extent.
1267 ++ * But this is really complex, both extents can be compressed
1268 ++ * or just one of them, which would require decompressing and
1269 ++ * re-compressing data (which could increase the new compressed
1270 ++ * size, not allowing the compressed data to fit anymore in an
1271 ++ * inline extent).
1272 ++ * So just don't support this case for now (it should be rare,
1273 ++ * we are not really saving space when cloning inline extents).
1274 ++ */
1275 ++ return -EOPNOTSUPP;
1276 ++ }
1277 ++
1278 ++ btrfs_release_path(path);
1279 ++ ret = btrfs_drop_extents(trans, root, dst, drop_start, aligned_end, 1);
1280 ++ if (ret)
1281 ++ return ret;
1282 ++ ret = btrfs_insert_empty_item(trans, root, path, new_key, size);
1283 ++ if (ret)
1284 ++ return ret;
1285 ++
1286 ++ if (skip) {
1287 ++ const u32 start = btrfs_file_extent_calc_inline_size(0);
1288 ++
1289 ++ memmove(inline_data + start, inline_data + start + skip, datal);
1290 ++ }
1291 ++
1292 ++ write_extent_buffer(path->nodes[0], inline_data,
1293 ++ btrfs_item_ptr_offset(path->nodes[0],
1294 ++ path->slots[0]),
1295 ++ size);
1296 ++ inode_add_bytes(dst, datal);
1297 ++
1298 ++ return 0;
1299 ++}
1300 ++
1301 + /**
1302 + * btrfs_clone() - clone a range from inode file to another
1303 + *
1304 +@@ -3352,9 +3461,7 @@ static int btrfs_clone(struct inode *src, struct inode *inode,
1305 + u32 nritems;
1306 + int slot;
1307 + int ret;
1308 +- int no_quota;
1309 + const u64 len = olen_aligned;
1310 +- u64 last_disko = 0;
1311 + u64 last_dest_end = destoff;
1312 +
1313 + ret = -ENOMEM;
1314 +@@ -3400,7 +3507,6 @@ static int btrfs_clone(struct inode *src, struct inode *inode,
1315 +
1316 + nritems = btrfs_header_nritems(path->nodes[0]);
1317 + process_slot:
1318 +- no_quota = 1;
1319 + if (path->slots[0] >= nritems) {
1320 + ret = btrfs_next_leaf(BTRFS_I(src)->root, path);
1321 + if (ret < 0)
1322 +@@ -3552,35 +3658,13 @@ process_slot:
1323 + btrfs_set_file_extent_num_bytes(leaf, extent,
1324 + datal);
1325 +
1326 +- /*
1327 +- * We need to look up the roots that point at
1328 +- * this bytenr and see if the new root does. If
1329 +- * it does not we need to make sure we update
1330 +- * quotas appropriately.
1331 +- */
1332 +- if (disko && root != BTRFS_I(src)->root &&
1333 +- disko != last_disko) {
1334 +- no_quota = check_ref(trans, root,
1335 +- disko);
1336 +- if (no_quota < 0) {
1337 +- btrfs_abort_transaction(trans,
1338 +- root,
1339 +- ret);
1340 +- btrfs_end_transaction(trans,
1341 +- root);
1342 +- ret = no_quota;
1343 +- goto out;
1344 +- }
1345 +- }
1346 +-
1347 + if (disko) {
1348 + inode_add_bytes(inode, datal);
1349 + ret = btrfs_inc_extent_ref(trans, root,
1350 + disko, diskl, 0,
1351 + root->root_key.objectid,
1352 + btrfs_ino(inode),
1353 +- new_key.offset - datao,
1354 +- no_quota);
1355 ++ new_key.offset - datao);
1356 + if (ret) {
1357 + btrfs_abort_transaction(trans,
1358 + root,
1359 +@@ -3594,21 +3678,6 @@ process_slot:
1360 + } else if (type == BTRFS_FILE_EXTENT_INLINE) {
1361 + u64 skip = 0;
1362 + u64 trim = 0;
1363 +- u64 aligned_end = 0;
1364 +-
1365 +- /*
1366 +- * Don't copy an inline extent into an offset
1367 +- * greater than zero. Having an inline extent
1368 +- * at such an offset results in chaos as btrfs
1369 +- * isn't prepared for such cases. Just skip
1370 +- * this case for the same reasons as commented
1371 +- * at btrfs_ioctl_clone().
1372 +- */
1373 +- if (last_dest_end > 0) {
1374 +- ret = -EOPNOTSUPP;
1375 +- btrfs_end_transaction(trans, root);
1376 +- goto out;
1377 +- }
1378 +
1379 + if (off > key.offset) {
1380 + skip = off - key.offset;
1381 +@@ -3626,42 +3695,22 @@ process_slot:
1382 + size -= skip + trim;
1383 + datal -= skip + trim;
1384 +
1385 +- aligned_end = ALIGN(new_key.offset + datal,
1386 +- root->sectorsize);
1387 +- ret = btrfs_drop_extents(trans, root, inode,
1388 +- drop_start,
1389 +- aligned_end,
1390 +- 1);
1391 ++ ret = clone_copy_inline_extent(src, inode,
1392 ++ trans, path,
1393 ++ &new_key,
1394 ++ drop_start,
1395 ++ datal,
1396 ++ skip, size, buf);
1397 + if (ret) {
1398 + if (ret != -EOPNOTSUPP)
1399 + btrfs_abort_transaction(trans,
1400 +- root, ret);
1401 +- btrfs_end_transaction(trans, root);
1402 +- goto out;
1403 +- }
1404 +-
1405 +- ret = btrfs_insert_empty_item(trans, root, path,
1406 +- &new_key, size);
1407 +- if (ret) {
1408 +- btrfs_abort_transaction(trans, root,
1409 +- ret);
1410 ++ root,
1411 ++ ret);
1412 + btrfs_end_transaction(trans, root);
1413 + goto out;
1414 + }
1415 +-
1416 +- if (skip) {
1417 +- u32 start =
1418 +- btrfs_file_extent_calc_inline_size(0);
1419 +- memmove(buf+start, buf+start+skip,
1420 +- datal);
1421 +- }
1422 +-
1423 + leaf = path->nodes[0];
1424 + slot = path->slots[0];
1425 +- write_extent_buffer(leaf, buf,
1426 +- btrfs_item_ptr_offset(leaf, slot),
1427 +- size);
1428 +- inode_add_bytes(inode, datal);
1429 + }
1430 +
1431 + /* If we have an implicit hole (NO_HOLES feature). */
1432 +diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
1433 +index 303babeef505..ab507e3d536b 100644
1434 +--- a/fs/btrfs/relocation.c
1435 ++++ b/fs/btrfs/relocation.c
1436 +@@ -1716,7 +1716,7 @@ int replace_file_extents(struct btrfs_trans_handle *trans,
1437 + ret = btrfs_inc_extent_ref(trans, root, new_bytenr,
1438 + num_bytes, parent,
1439 + btrfs_header_owner(leaf),
1440 +- key.objectid, key.offset, 1);
1441 ++ key.objectid, key.offset);
1442 + if (ret) {
1443 + btrfs_abort_transaction(trans, root, ret);
1444 + break;
1445 +@@ -1724,7 +1724,7 @@ int replace_file_extents(struct btrfs_trans_handle *trans,
1446 +
1447 + ret = btrfs_free_extent(trans, root, bytenr, num_bytes,
1448 + parent, btrfs_header_owner(leaf),
1449 +- key.objectid, key.offset, 1);
1450 ++ key.objectid, key.offset);
1451 + if (ret) {
1452 + btrfs_abort_transaction(trans, root, ret);
1453 + break;
1454 +@@ -1900,23 +1900,21 @@ again:
1455 +
1456 + ret = btrfs_inc_extent_ref(trans, src, old_bytenr, blocksize,
1457 + path->nodes[level]->start,
1458 +- src->root_key.objectid, level - 1, 0,
1459 +- 1);
1460 ++ src->root_key.objectid, level - 1, 0);
1461 + BUG_ON(ret);
1462 + ret = btrfs_inc_extent_ref(trans, dest, new_bytenr, blocksize,
1463 + 0, dest->root_key.objectid, level - 1,
1464 +- 0, 1);
1465 ++ 0);
1466 + BUG_ON(ret);
1467 +
1468 + ret = btrfs_free_extent(trans, src, new_bytenr, blocksize,
1469 + path->nodes[level]->start,
1470 +- src->root_key.objectid, level - 1, 0,
1471 +- 1);
1472 ++ src->root_key.objectid, level - 1, 0);
1473 + BUG_ON(ret);
1474 +
1475 + ret = btrfs_free_extent(trans, dest, old_bytenr, blocksize,
1476 + 0, dest->root_key.objectid, level - 1,
1477 +- 0, 1);
1478 ++ 0);
1479 + BUG_ON(ret);
1480 +
1481 + btrfs_unlock_up_safe(path, 0);
1482 +@@ -2745,7 +2743,7 @@ static int do_relocation(struct btrfs_trans_handle *trans,
1483 + node->eb->start, blocksize,
1484 + upper->eb->start,
1485 + btrfs_header_owner(upper->eb),
1486 +- node->level, 0, 1);
1487 ++ node->level, 0);
1488 + BUG_ON(ret);
1489 +
1490 + ret = btrfs_drop_subtree(trans, root, eb, upper->eb);
1491 +diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c
1492 +index a739b825bdd3..23bb2e4b911b 100644
1493 +--- a/fs/btrfs/send.c
1494 ++++ b/fs/btrfs/send.c
1495 +@@ -2353,8 +2353,14 @@ static int send_subvol_begin(struct send_ctx *sctx)
1496 + }
1497 +
1498 + TLV_PUT_STRING(sctx, BTRFS_SEND_A_PATH, name, namelen);
1499 +- TLV_PUT_UUID(sctx, BTRFS_SEND_A_UUID,
1500 +- sctx->send_root->root_item.uuid);
1501 ++
1502 ++ if (!btrfs_is_empty_uuid(sctx->send_root->root_item.received_uuid))
1503 ++ TLV_PUT_UUID(sctx, BTRFS_SEND_A_UUID,
1504 ++ sctx->send_root->root_item.received_uuid);
1505 ++ else
1506 ++ TLV_PUT_UUID(sctx, BTRFS_SEND_A_UUID,
1507 ++ sctx->send_root->root_item.uuid);
1508 ++
1509 + TLV_PUT_U64(sctx, BTRFS_SEND_A_CTRANSID,
1510 + le64_to_cpu(sctx->send_root->root_item.ctransid));
1511 + if (parent_root) {
1512 +diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
1513 +index 1bbaace73383..6f8af2de5912 100644
1514 +--- a/fs/btrfs/tree-log.c
1515 ++++ b/fs/btrfs/tree-log.c
1516 +@@ -691,7 +691,7 @@ static noinline int replay_one_extent(struct btrfs_trans_handle *trans,
1517 + ret = btrfs_inc_extent_ref(trans, root,
1518 + ins.objectid, ins.offset,
1519 + 0, root->root_key.objectid,
1520 +- key->objectid, offset, 0);
1521 ++ key->objectid, offset);
1522 + if (ret)
1523 + goto out;
1524 + } else {
1525 +diff --git a/fs/btrfs/xattr.c b/fs/btrfs/xattr.c
1526 +index 6f518c90e1c1..1fcd7b6e7564 100644
1527 +--- a/fs/btrfs/xattr.c
1528 ++++ b/fs/btrfs/xattr.c
1529 +@@ -313,8 +313,10 @@ ssize_t btrfs_listxattr(struct dentry *dentry, char *buffer, size_t size)
1530 + /* check to make sure this item is what we want */
1531 + if (found_key.objectid != key.objectid)
1532 + break;
1533 +- if (found_key.type != BTRFS_XATTR_ITEM_KEY)
1534 ++ if (found_key.type > BTRFS_XATTR_ITEM_KEY)
1535 + break;
1536 ++ if (found_key.type < BTRFS_XATTR_ITEM_KEY)
1537 ++ goto next;
1538 +
1539 + di = btrfs_item_ptr(leaf, slot, struct btrfs_dir_item);
1540 + if (verify_dir_item(root, leaf, di))
1541 +diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c
1542 +index 51cb02da75d9..fe2c982764e7 100644
1543 +--- a/fs/ceph/mds_client.c
1544 ++++ b/fs/ceph/mds_client.c
1545 +@@ -1935,7 +1935,7 @@ static struct ceph_msg *create_request_message(struct ceph_mds_client *mdsc,
1546 +
1547 + len = sizeof(*head) +
1548 + pathlen1 + pathlen2 + 2*(1 + sizeof(u32) + sizeof(u64)) +
1549 +- sizeof(struct timespec);
1550 ++ sizeof(struct ceph_timespec);
1551 +
1552 + /* calculate (max) length for cap releases */
1553 + len += sizeof(struct ceph_mds_request_release) *
1554 +diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c
1555 +index c711be8d6a3c..9c8d23316da1 100644
1556 +--- a/fs/debugfs/inode.c
1557 ++++ b/fs/debugfs/inode.c
1558 +@@ -271,8 +271,12 @@ static struct dentry *start_creating(const char *name, struct dentry *parent)
1559 + dput(dentry);
1560 + dentry = ERR_PTR(-EEXIST);
1561 + }
1562 +- if (IS_ERR(dentry))
1563 ++
1564 ++ if (IS_ERR(dentry)) {
1565 + mutex_unlock(&d_inode(parent)->i_mutex);
1566 ++ simple_release_fs(&debugfs_mount, &debugfs_mount_count);
1567 ++ }
1568 ++
1569 + return dentry;
1570 + }
1571 +
1572 +diff --git a/fs/ext4/crypto.c b/fs/ext4/crypto.c
1573 +index 45731558138c..2fab243a4c9e 100644
1574 +--- a/fs/ext4/crypto.c
1575 ++++ b/fs/ext4/crypto.c
1576 +@@ -411,7 +411,13 @@ int ext4_encrypted_zeroout(struct inode *inode, struct ext4_extent *ex)
1577 + ext4_lblk_t lblk = ex->ee_block;
1578 + ext4_fsblk_t pblk = ext4_ext_pblock(ex);
1579 + unsigned int len = ext4_ext_get_actual_len(ex);
1580 +- int err = 0;
1581 ++ int ret, err = 0;
1582 ++
1583 ++#if 0
1584 ++ ext4_msg(inode->i_sb, KERN_CRIT,
1585 ++ "ext4_encrypted_zeroout ino %lu lblk %u len %u",
1586 ++ (unsigned long) inode->i_ino, lblk, len);
1587 ++#endif
1588 +
1589 + BUG_ON(inode->i_sb->s_blocksize != PAGE_CACHE_SIZE);
1590 +
1591 +@@ -437,17 +443,26 @@ int ext4_encrypted_zeroout(struct inode *inode, struct ext4_extent *ex)
1592 + goto errout;
1593 + }
1594 + bio->bi_bdev = inode->i_sb->s_bdev;
1595 +- bio->bi_iter.bi_sector = pblk;
1596 +- err = bio_add_page(bio, ciphertext_page,
1597 ++ bio->bi_iter.bi_sector =
1598 ++ pblk << (inode->i_sb->s_blocksize_bits - 9);
1599 ++ ret = bio_add_page(bio, ciphertext_page,
1600 + inode->i_sb->s_blocksize, 0);
1601 +- if (err) {
1602 ++ if (ret != inode->i_sb->s_blocksize) {
1603 ++ /* should never happen! */
1604 ++ ext4_msg(inode->i_sb, KERN_ERR,
1605 ++ "bio_add_page failed: %d", ret);
1606 ++ WARN_ON(1);
1607 + bio_put(bio);
1608 ++ err = -EIO;
1609 + goto errout;
1610 + }
1611 + err = submit_bio_wait(WRITE, bio);
1612 ++ if ((err == 0) && bio->bi_error)
1613 ++ err = -EIO;
1614 + bio_put(bio);
1615 + if (err)
1616 + goto errout;
1617 ++ lblk++; pblk++;
1618 + }
1619 + err = 0;
1620 + errout:
1621 +diff --git a/fs/ext4/ext4_jbd2.c b/fs/ext4/ext4_jbd2.c
1622 +index d41843181818..e770c1ee4613 100644
1623 +--- a/fs/ext4/ext4_jbd2.c
1624 ++++ b/fs/ext4/ext4_jbd2.c
1625 +@@ -88,13 +88,13 @@ int __ext4_journal_stop(const char *where, unsigned int line, handle_t *handle)
1626 + return 0;
1627 + }
1628 +
1629 ++ err = handle->h_err;
1630 + if (!handle->h_transaction) {
1631 +- err = jbd2_journal_stop(handle);
1632 +- return handle->h_err ? handle->h_err : err;
1633 ++ rc = jbd2_journal_stop(handle);
1634 ++ return err ? err : rc;
1635 + }
1636 +
1637 + sb = handle->h_transaction->t_journal->j_private;
1638 +- err = handle->h_err;
1639 + rc = jbd2_journal_stop(handle);
1640 +
1641 + if (!err)
1642 +diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
1643 +index 2553aa8b608d..7f486e350d15 100644
1644 +--- a/fs/ext4/extents.c
1645 ++++ b/fs/ext4/extents.c
1646 +@@ -3558,6 +3558,9 @@ static int ext4_ext_convert_to_initialized(handle_t *handle,
1647 + max_zeroout = sbi->s_extent_max_zeroout_kb >>
1648 + (inode->i_sb->s_blocksize_bits - 10);
1649 +
1650 ++ if (ext4_encrypted_inode(inode))
1651 ++ max_zeroout = 0;
1652 ++
1653 + /* If extent is less than s_max_zeroout_kb, zeroout directly */
1654 + if (max_zeroout && (ee_len <= max_zeroout)) {
1655 + err = ext4_ext_zeroout(inode, ex);
1656 +diff --git a/fs/ext4/page-io.c b/fs/ext4/page-io.c
1657 +index 84ba4d2b3a35..17fbe3882b8e 100644
1658 +--- a/fs/ext4/page-io.c
1659 ++++ b/fs/ext4/page-io.c
1660 +@@ -425,6 +425,7 @@ int ext4_bio_write_page(struct ext4_io_submit *io,
1661 + struct buffer_head *bh, *head;
1662 + int ret = 0;
1663 + int nr_submitted = 0;
1664 ++ int nr_to_submit = 0;
1665 +
1666 + blocksize = 1 << inode->i_blkbits;
1667 +
1668 +@@ -477,11 +478,13 @@ int ext4_bio_write_page(struct ext4_io_submit *io,
1669 + unmap_underlying_metadata(bh->b_bdev, bh->b_blocknr);
1670 + }
1671 + set_buffer_async_write(bh);
1672 ++ nr_to_submit++;
1673 + } while ((bh = bh->b_this_page) != head);
1674 +
1675 + bh = head = page_buffers(page);
1676 +
1677 +- if (ext4_encrypted_inode(inode) && S_ISREG(inode->i_mode)) {
1678 ++ if (ext4_encrypted_inode(inode) && S_ISREG(inode->i_mode) &&
1679 ++ nr_to_submit) {
1680 + data_page = ext4_encrypt(inode, page);
1681 + if (IS_ERR(data_page)) {
1682 + ret = PTR_ERR(data_page);
1683 +diff --git a/fs/ext4/super.c b/fs/ext4/super.c
1684 +index a63c7b0a10cf..df84bd256c9f 100644
1685 +--- a/fs/ext4/super.c
1686 ++++ b/fs/ext4/super.c
1687 +@@ -394,9 +394,13 @@ static void ext4_handle_error(struct super_block *sb)
1688 + smp_wmb();
1689 + sb->s_flags |= MS_RDONLY;
1690 + }
1691 +- if (test_opt(sb, ERRORS_PANIC))
1692 ++ if (test_opt(sb, ERRORS_PANIC)) {
1693 ++ if (EXT4_SB(sb)->s_journal &&
1694 ++ !(EXT4_SB(sb)->s_journal->j_flags & JBD2_REC_ERR))
1695 ++ return;
1696 + panic("EXT4-fs (device %s): panic forced after error\n",
1697 + sb->s_id);
1698 ++ }
1699 + }
1700 +
1701 + #define ext4_error_ratelimit(sb) \
1702 +@@ -585,8 +589,12 @@ void __ext4_abort(struct super_block *sb, const char *function,
1703 + jbd2_journal_abort(EXT4_SB(sb)->s_journal, -EIO);
1704 + save_error_info(sb, function, line);
1705 + }
1706 +- if (test_opt(sb, ERRORS_PANIC))
1707 ++ if (test_opt(sb, ERRORS_PANIC)) {
1708 ++ if (EXT4_SB(sb)->s_journal &&
1709 ++ !(EXT4_SB(sb)->s_journal->j_flags & JBD2_REC_ERR))
1710 ++ return;
1711 + panic("EXT4-fs panic from previous error\n");
1712 ++ }
1713 + }
1714 +
1715 + void __ext4_msg(struct super_block *sb,
1716 +diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c
1717 +index 8270fe9e3641..37023d0bdae4 100644
1718 +--- a/fs/jbd2/journal.c
1719 ++++ b/fs/jbd2/journal.c
1720 +@@ -2071,8 +2071,12 @@ static void __journal_abort_soft (journal_t *journal, int errno)
1721 +
1722 + __jbd2_journal_abort_hard(journal);
1723 +
1724 +- if (errno)
1725 ++ if (errno) {
1726 + jbd2_journal_update_sb_errno(journal);
1727 ++ write_lock(&journal->j_state_lock);
1728 ++ journal->j_flags |= JBD2_REC_ERR;
1729 ++ write_unlock(&journal->j_state_lock);
1730 ++ }
1731 + }
1732 +
1733 + /**
1734 +diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
1735 +index 326d9e10d833..ffdf9b9e88ab 100644
1736 +--- a/fs/nfs/inode.c
1737 ++++ b/fs/nfs/inode.c
1738 +@@ -1824,7 +1824,11 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr)
1739 + if ((long)fattr->gencount - (long)nfsi->attr_gencount > 0)
1740 + nfsi->attr_gencount = fattr->gencount;
1741 + }
1742 +- invalid &= ~NFS_INO_INVALID_ATTR;
1743 ++
1744 ++ /* Don't declare attrcache up to date if there were no attrs! */
1745 ++ if (fattr->valid != 0)
1746 ++ invalid &= ~NFS_INO_INVALID_ATTR;
1747 ++
1748 + /* Don't invalidate the data if we were to blame */
1749 + if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode)
1750 + || S_ISLNK(inode->i_mode)))
1751 +diff --git a/fs/nfs/nfs4client.c b/fs/nfs/nfs4client.c
1752 +index 223bedda64ae..10410e8b5853 100644
1753 +--- a/fs/nfs/nfs4client.c
1754 ++++ b/fs/nfs/nfs4client.c
1755 +@@ -33,7 +33,7 @@ static int nfs_get_cb_ident_idr(struct nfs_client *clp, int minorversion)
1756 + return ret;
1757 + idr_preload(GFP_KERNEL);
1758 + spin_lock(&nn->nfs_client_lock);
1759 +- ret = idr_alloc(&nn->cb_ident_idr, clp, 0, 0, GFP_NOWAIT);
1760 ++ ret = idr_alloc(&nn->cb_ident_idr, clp, 1, 0, GFP_NOWAIT);
1761 + if (ret >= 0)
1762 + clp->cl_cb_ident = ret;
1763 + spin_unlock(&nn->nfs_client_lock);
1764 +diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c
1765 +index 8abe27165ad0..abf5caea20c9 100644
1766 +--- a/fs/nfs/pnfs.c
1767 ++++ b/fs/nfs/pnfs.c
1768 +@@ -872,33 +872,38 @@ send_layoutget(struct pnfs_layout_hdr *lo,
1769 +
1770 + dprintk("--> %s\n", __func__);
1771 +
1772 +- lgp = kzalloc(sizeof(*lgp), gfp_flags);
1773 +- if (lgp == NULL)
1774 +- return NULL;
1775 ++ /*
1776 ++ * Synchronously retrieve layout information from server and
1777 ++ * store in lseg. If we race with a concurrent seqid morphing
1778 ++ * op, then re-send the LAYOUTGET.
1779 ++ */
1780 ++ do {
1781 ++ lgp = kzalloc(sizeof(*lgp), gfp_flags);
1782 ++ if (lgp == NULL)
1783 ++ return NULL;
1784 ++
1785 ++ i_size = i_size_read(ino);
1786 ++
1787 ++ lgp->args.minlength = PAGE_CACHE_SIZE;
1788 ++ if (lgp->args.minlength > range->length)
1789 ++ lgp->args.minlength = range->length;
1790 ++ if (range->iomode == IOMODE_READ) {
1791 ++ if (range->offset >= i_size)
1792 ++ lgp->args.minlength = 0;
1793 ++ else if (i_size - range->offset < lgp->args.minlength)
1794 ++ lgp->args.minlength = i_size - range->offset;
1795 ++ }
1796 ++ lgp->args.maxcount = PNFS_LAYOUT_MAXSIZE;
1797 ++ lgp->args.range = *range;
1798 ++ lgp->args.type = server->pnfs_curr_ld->id;
1799 ++ lgp->args.inode = ino;
1800 ++ lgp->args.ctx = get_nfs_open_context(ctx);
1801 ++ lgp->gfp_flags = gfp_flags;
1802 ++ lgp->cred = lo->plh_lc_cred;
1803 +
1804 +- i_size = i_size_read(ino);
1805 ++ lseg = nfs4_proc_layoutget(lgp, gfp_flags);
1806 ++ } while (lseg == ERR_PTR(-EAGAIN));
1807 +
1808 +- lgp->args.minlength = PAGE_CACHE_SIZE;
1809 +- if (lgp->args.minlength > range->length)
1810 +- lgp->args.minlength = range->length;
1811 +- if (range->iomode == IOMODE_READ) {
1812 +- if (range->offset >= i_size)
1813 +- lgp->args.minlength = 0;
1814 +- else if (i_size - range->offset < lgp->args.minlength)
1815 +- lgp->args.minlength = i_size - range->offset;
1816 +- }
1817 +- lgp->args.maxcount = PNFS_LAYOUT_MAXSIZE;
1818 +- lgp->args.range = *range;
1819 +- lgp->args.type = server->pnfs_curr_ld->id;
1820 +- lgp->args.inode = ino;
1821 +- lgp->args.ctx = get_nfs_open_context(ctx);
1822 +- lgp->gfp_flags = gfp_flags;
1823 +- lgp->cred = lo->plh_lc_cred;
1824 +-
1825 +- /* Synchronously retrieve layout information from server and
1826 +- * store in lseg.
1827 +- */
1828 +- lseg = nfs4_proc_layoutget(lgp, gfp_flags);
1829 + if (IS_ERR(lseg)) {
1830 + switch (PTR_ERR(lseg)) {
1831 + case -ENOMEM:
1832 +@@ -1687,6 +1692,7 @@ pnfs_layout_process(struct nfs4_layoutget *lgp)
1833 + /* existing state ID, make sure the sequence number matches. */
1834 + if (pnfs_layout_stateid_blocked(lo, &res->stateid)) {
1835 + dprintk("%s forget reply due to sequence\n", __func__);
1836 ++ status = -EAGAIN;
1837 + goto out_forget_reply;
1838 + }
1839 + pnfs_set_layout_stateid(lo, &res->stateid, false);
1840 +diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
1841 +index 0f1d5691b795..0dea0c254ddf 100644
1842 +--- a/fs/nfsd/nfs4state.c
1843 ++++ b/fs/nfsd/nfs4state.c
1844 +@@ -765,16 +765,68 @@ void nfs4_unhash_stid(struct nfs4_stid *s)
1845 + s->sc_type = 0;
1846 + }
1847 +
1848 +-static void
1849 ++/**
1850 ++ * nfs4_get_existing_delegation - Discover if this delegation already exists
1851 ++ * @clp: a pointer to the nfs4_client we're granting a delegation to
1852 ++ * @fp: a pointer to the nfs4_file we're granting a delegation on
1853 ++ *
1854 ++ * Return:
1855 ++ * On success: NULL if an existing delegation was not found.
1856 ++ *
1857 ++ * On error: -EAGAIN if one was previously granted to this nfs4_client
1858 ++ * for this nfs4_file.
1859 ++ *
1860 ++ */
1861 ++
1862 ++static int
1863 ++nfs4_get_existing_delegation(struct nfs4_client *clp, struct nfs4_file *fp)
1864 ++{
1865 ++ struct nfs4_delegation *searchdp = NULL;
1866 ++ struct nfs4_client *searchclp = NULL;
1867 ++
1868 ++ lockdep_assert_held(&state_lock);
1869 ++ lockdep_assert_held(&fp->fi_lock);
1870 ++
1871 ++ list_for_each_entry(searchdp, &fp->fi_delegations, dl_perfile) {
1872 ++ searchclp = searchdp->dl_stid.sc_client;
1873 ++ if (clp == searchclp) {
1874 ++ return -EAGAIN;
1875 ++ }
1876 ++ }
1877 ++ return 0;
1878 ++}
1879 ++
1880 ++/**
1881 ++ * hash_delegation_locked - Add a delegation to the appropriate lists
1882 ++ * @dp: a pointer to the nfs4_delegation we are adding.
1883 ++ * @fp: a pointer to the nfs4_file we're granting a delegation on
1884 ++ *
1885 ++ * Return:
1886 ++ * On success: NULL if the delegation was successfully hashed.
1887 ++ *
1888 ++ * On error: -EAGAIN if one was previously granted to this
1889 ++ * nfs4_client for this nfs4_file. Delegation is not hashed.
1890 ++ *
1891 ++ */
1892 ++
1893 ++static int
1894 + hash_delegation_locked(struct nfs4_delegation *dp, struct nfs4_file *fp)
1895 + {
1896 ++ int status;
1897 ++ struct nfs4_client *clp = dp->dl_stid.sc_client;
1898 ++
1899 + lockdep_assert_held(&state_lock);
1900 + lockdep_assert_held(&fp->fi_lock);
1901 +
1902 ++ status = nfs4_get_existing_delegation(clp, fp);
1903 ++ if (status)
1904 ++ return status;
1905 ++ ++fp->fi_delegees;
1906 + atomic_inc(&dp->dl_stid.sc_count);
1907 + dp->dl_stid.sc_type = NFS4_DELEG_STID;
1908 + list_add(&dp->dl_perfile, &fp->fi_delegations);
1909 +- list_add(&dp->dl_perclnt, &dp->dl_stid.sc_client->cl_delegations);
1910 ++ list_add(&dp->dl_perclnt, &clp->cl_delegations);
1911 ++ return 0;
1912 + }
1913 +
1914 + static bool
1915 +@@ -3360,6 +3412,7 @@ static void init_open_stateid(struct nfs4_ol_stateid *stp, struct nfs4_file *fp,
1916 + stp->st_access_bmap = 0;
1917 + stp->st_deny_bmap = 0;
1918 + stp->st_openstp = NULL;
1919 ++ init_rwsem(&stp->st_rwsem);
1920 + spin_lock(&oo->oo_owner.so_client->cl_lock);
1921 + list_add(&stp->st_perstateowner, &oo->oo_owner.so_stateids);
1922 + spin_lock(&fp->fi_lock);
1923 +@@ -3945,6 +3998,18 @@ static struct file_lock *nfs4_alloc_init_lease(struct nfs4_file *fp, int flag)
1924 + return fl;
1925 + }
1926 +
1927 ++/**
1928 ++ * nfs4_setlease - Obtain a delegation by requesting lease from vfs layer
1929 ++ * @dp: a pointer to the nfs4_delegation we're adding.
1930 ++ *
1931 ++ * Return:
1932 ++ * On success: Return code will be 0 on success.
1933 ++ *
1934 ++ * On error: -EAGAIN if there was an existing delegation.
1935 ++ * nonzero if there is an error in other cases.
1936 ++ *
1937 ++ */
1938 ++
1939 + static int nfs4_setlease(struct nfs4_delegation *dp)
1940 + {
1941 + struct nfs4_file *fp = dp->dl_stid.sc_file;
1942 +@@ -3976,16 +4041,19 @@ static int nfs4_setlease(struct nfs4_delegation *dp)
1943 + goto out_unlock;
1944 + /* Race breaker */
1945 + if (fp->fi_deleg_file) {
1946 +- status = 0;
1947 +- ++fp->fi_delegees;
1948 +- hash_delegation_locked(dp, fp);
1949 ++ status = hash_delegation_locked(dp, fp);
1950 + goto out_unlock;
1951 + }
1952 + fp->fi_deleg_file = filp;
1953 +- fp->fi_delegees = 1;
1954 +- hash_delegation_locked(dp, fp);
1955 ++ fp->fi_delegees = 0;
1956 ++ status = hash_delegation_locked(dp, fp);
1957 + spin_unlock(&fp->fi_lock);
1958 + spin_unlock(&state_lock);
1959 ++ if (status) {
1960 ++ /* Should never happen, this is a new fi_deleg_file */
1961 ++ WARN_ON_ONCE(1);
1962 ++ goto out_fput;
1963 ++ }
1964 + return 0;
1965 + out_unlock:
1966 + spin_unlock(&fp->fi_lock);
1967 +@@ -4005,6 +4073,15 @@ nfs4_set_delegation(struct nfs4_client *clp, struct svc_fh *fh,
1968 + if (fp->fi_had_conflict)
1969 + return ERR_PTR(-EAGAIN);
1970 +
1971 ++ spin_lock(&state_lock);
1972 ++ spin_lock(&fp->fi_lock);
1973 ++ status = nfs4_get_existing_delegation(clp, fp);
1974 ++ spin_unlock(&fp->fi_lock);
1975 ++ spin_unlock(&state_lock);
1976 ++
1977 ++ if (status)
1978 ++ return ERR_PTR(status);
1979 ++
1980 + dp = alloc_init_deleg(clp, fh, odstate);
1981 + if (!dp)
1982 + return ERR_PTR(-ENOMEM);
1983 +@@ -4023,9 +4100,7 @@ nfs4_set_delegation(struct nfs4_client *clp, struct svc_fh *fh,
1984 + status = -EAGAIN;
1985 + goto out_unlock;
1986 + }
1987 +- ++fp->fi_delegees;
1988 +- hash_delegation_locked(dp, fp);
1989 +- status = 0;
1990 ++ status = hash_delegation_locked(dp, fp);
1991 + out_unlock:
1992 + spin_unlock(&fp->fi_lock);
1993 + spin_unlock(&state_lock);
1994 +@@ -4187,15 +4262,20 @@ nfsd4_process_open2(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nf
1995 + */
1996 + if (stp) {
1997 + /* Stateid was found, this is an OPEN upgrade */
1998 ++ down_read(&stp->st_rwsem);
1999 + status = nfs4_upgrade_open(rqstp, fp, current_fh, stp, open);
2000 +- if (status)
2001 ++ if (status) {
2002 ++ up_read(&stp->st_rwsem);
2003 + goto out;
2004 ++ }
2005 + } else {
2006 + stp = open->op_stp;
2007 + open->op_stp = NULL;
2008 + init_open_stateid(stp, fp, open);
2009 ++ down_read(&stp->st_rwsem);
2010 + status = nfs4_get_vfs_file(rqstp, fp, current_fh, stp, open);
2011 + if (status) {
2012 ++ up_read(&stp->st_rwsem);
2013 + release_open_stateid(stp);
2014 + goto out;
2015 + }
2016 +@@ -4207,6 +4287,7 @@ nfsd4_process_open2(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nf
2017 + }
2018 + update_stateid(&stp->st_stid.sc_stateid);
2019 + memcpy(&open->op_stateid, &stp->st_stid.sc_stateid, sizeof(stateid_t));
2020 ++ up_read(&stp->st_rwsem);
2021 +
2022 + if (nfsd4_has_session(&resp->cstate)) {
2023 + if (open->op_deleg_want & NFS4_SHARE_WANT_NO_DELEG) {
2024 +@@ -4819,10 +4900,13 @@ static __be32 nfs4_seqid_op_checks(struct nfsd4_compound_state *cstate, stateid_
2025 + * revoked delegations are kept only for free_stateid.
2026 + */
2027 + return nfserr_bad_stateid;
2028 ++ down_write(&stp->st_rwsem);
2029 + status = check_stateid_generation(stateid, &stp->st_stid.sc_stateid, nfsd4_has_session(cstate));
2030 +- if (status)
2031 +- return status;
2032 +- return nfs4_check_fh(current_fh, &stp->st_stid);
2033 ++ if (status == nfs_ok)
2034 ++ status = nfs4_check_fh(current_fh, &stp->st_stid);
2035 ++ if (status != nfs_ok)
2036 ++ up_write(&stp->st_rwsem);
2037 ++ return status;
2038 + }
2039 +
2040 + /*
2041 +@@ -4869,6 +4953,7 @@ static __be32 nfs4_preprocess_confirmed_seqid_op(struct nfsd4_compound_state *cs
2042 + return status;
2043 + oo = openowner(stp->st_stateowner);
2044 + if (!(oo->oo_flags & NFS4_OO_CONFIRMED)) {
2045 ++ up_write(&stp->st_rwsem);
2046 + nfs4_put_stid(&stp->st_stid);
2047 + return nfserr_bad_stateid;
2048 + }
2049 +@@ -4899,11 +4984,14 @@ nfsd4_open_confirm(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
2050 + goto out;
2051 + oo = openowner(stp->st_stateowner);
2052 + status = nfserr_bad_stateid;
2053 +- if (oo->oo_flags & NFS4_OO_CONFIRMED)
2054 ++ if (oo->oo_flags & NFS4_OO_CONFIRMED) {
2055 ++ up_write(&stp->st_rwsem);
2056 + goto put_stateid;
2057 ++ }
2058 + oo->oo_flags |= NFS4_OO_CONFIRMED;
2059 + update_stateid(&stp->st_stid.sc_stateid);
2060 + memcpy(&oc->oc_resp_stateid, &stp->st_stid.sc_stateid, sizeof(stateid_t));
2061 ++ up_write(&stp->st_rwsem);
2062 + dprintk("NFSD: %s: success, seqid=%d stateid=" STATEID_FMT "\n",
2063 + __func__, oc->oc_seqid, STATEID_VAL(&stp->st_stid.sc_stateid));
2064 +
2065 +@@ -4982,6 +5070,7 @@ nfsd4_open_downgrade(struct svc_rqst *rqstp,
2066 + memcpy(&od->od_stateid, &stp->st_stid.sc_stateid, sizeof(stateid_t));
2067 + status = nfs_ok;
2068 + put_stateid:
2069 ++ up_write(&stp->st_rwsem);
2070 + nfs4_put_stid(&stp->st_stid);
2071 + out:
2072 + nfsd4_bump_seqid(cstate, status);
2073 +@@ -5035,6 +5124,7 @@ nfsd4_close(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
2074 + goto out;
2075 + update_stateid(&stp->st_stid.sc_stateid);
2076 + memcpy(&close->cl_stateid, &stp->st_stid.sc_stateid, sizeof(stateid_t));
2077 ++ up_write(&stp->st_rwsem);
2078 +
2079 + nfsd4_close_open_stateid(stp);
2080 +
2081 +@@ -5260,6 +5350,7 @@ init_lock_stateid(struct nfs4_ol_stateid *stp, struct nfs4_lockowner *lo,
2082 + stp->st_access_bmap = 0;
2083 + stp->st_deny_bmap = open_stp->st_deny_bmap;
2084 + stp->st_openstp = open_stp;
2085 ++ init_rwsem(&stp->st_rwsem);
2086 + list_add(&stp->st_locks, &open_stp->st_locks);
2087 + list_add(&stp->st_perstateowner, &lo->lo_owner.so_stateids);
2088 + spin_lock(&fp->fi_lock);
2089 +@@ -5428,6 +5519,7 @@ nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
2090 + &open_stp, nn);
2091 + if (status)
2092 + goto out;
2093 ++ up_write(&open_stp->st_rwsem);
2094 + open_sop = openowner(open_stp->st_stateowner);
2095 + status = nfserr_bad_stateid;
2096 + if (!same_clid(&open_sop->oo_owner.so_client->cl_clientid,
2097 +@@ -5435,6 +5527,8 @@ nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
2098 + goto out;
2099 + status = lookup_or_create_lock_state(cstate, open_stp, lock,
2100 + &lock_stp, &new);
2101 ++ if (status == nfs_ok)
2102 ++ down_write(&lock_stp->st_rwsem);
2103 + } else {
2104 + status = nfs4_preprocess_seqid_op(cstate,
2105 + lock->lk_old_lock_seqid,
2106 +@@ -5540,6 +5634,8 @@ out:
2107 + seqid_mutating_err(ntohl(status)))
2108 + lock_sop->lo_owner.so_seqid++;
2109 +
2110 ++ up_write(&lock_stp->st_rwsem);
2111 ++
2112 + /*
2113 + * If this is a new, never-before-used stateid, and we are
2114 + * returning an error, then just go ahead and release it.
2115 +@@ -5709,6 +5805,7 @@ nfsd4_locku(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
2116 + fput:
2117 + fput(filp);
2118 + put_stateid:
2119 ++ up_write(&stp->st_rwsem);
2120 + nfs4_put_stid(&stp->st_stid);
2121 + out:
2122 + nfsd4_bump_seqid(cstate, status);
2123 +diff --git a/fs/nfsd/state.h b/fs/nfsd/state.h
2124 +index 583ffc13cae2..31bde12feefe 100644
2125 +--- a/fs/nfsd/state.h
2126 ++++ b/fs/nfsd/state.h
2127 +@@ -534,15 +534,16 @@ struct nfs4_file {
2128 + * Better suggestions welcome.
2129 + */
2130 + struct nfs4_ol_stateid {
2131 +- struct nfs4_stid st_stid; /* must be first field */
2132 +- struct list_head st_perfile;
2133 +- struct list_head st_perstateowner;
2134 +- struct list_head st_locks;
2135 +- struct nfs4_stateowner * st_stateowner;
2136 +- struct nfs4_clnt_odstate * st_clnt_odstate;
2137 +- unsigned char st_access_bmap;
2138 +- unsigned char st_deny_bmap;
2139 +- struct nfs4_ol_stateid * st_openstp;
2140 ++ struct nfs4_stid st_stid;
2141 ++ struct list_head st_perfile;
2142 ++ struct list_head st_perstateowner;
2143 ++ struct list_head st_locks;
2144 ++ struct nfs4_stateowner *st_stateowner;
2145 ++ struct nfs4_clnt_odstate *st_clnt_odstate;
2146 ++ unsigned char st_access_bmap;
2147 ++ unsigned char st_deny_bmap;
2148 ++ struct nfs4_ol_stateid *st_openstp;
2149 ++ struct rw_semaphore st_rwsem;
2150 + };
2151 +
2152 + static inline struct nfs4_ol_stateid *openlockstateid(struct nfs4_stid *s)
2153 +diff --git a/fs/ocfs2/namei.c b/fs/ocfs2/namei.c
2154 +index b7dfac226b1e..12bfa9ca5583 100644
2155 +--- a/fs/ocfs2/namei.c
2156 ++++ b/fs/ocfs2/namei.c
2157 +@@ -374,6 +374,8 @@ static int ocfs2_mknod(struct inode *dir,
2158 + mlog_errno(status);
2159 + goto leave;
2160 + }
2161 ++ /* update inode->i_mode after mask with "umask". */
2162 ++ inode->i_mode = mode;
2163 +
2164 + handle = ocfs2_start_trans(osb, ocfs2_mknod_credits(osb->sb,
2165 + S_ISDIR(mode),
2166 +diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h
2167 +index f1f32af6d9b9..3e4ff3f1d314 100644
2168 +--- a/include/linux/ipv6.h
2169 ++++ b/include/linux/ipv6.h
2170 +@@ -227,7 +227,7 @@ struct ipv6_pinfo {
2171 + struct ipv6_ac_socklist *ipv6_ac_list;
2172 + struct ipv6_fl_socklist __rcu *ipv6_fl_list;
2173 +
2174 +- struct ipv6_txoptions *opt;
2175 ++ struct ipv6_txoptions __rcu *opt;
2176 + struct sk_buff *pktoptions;
2177 + struct sk_buff *rxpmtu;
2178 + struct inet6_cork cork;
2179 +diff --git a/include/linux/jbd2.h b/include/linux/jbd2.h
2180 +index df07e78487d5..1abeb820a630 100644
2181 +--- a/include/linux/jbd2.h
2182 ++++ b/include/linux/jbd2.h
2183 +@@ -1046,6 +1046,7 @@ struct journal_s
2184 + #define JBD2_ABORT_ON_SYNCDATA_ERR 0x040 /* Abort the journal on file
2185 + * data write error in ordered
2186 + * mode */
2187 ++#define JBD2_REC_ERR 0x080 /* The errno in the sb has been recorded */
2188 +
2189 + /*
2190 + * Function declarations for the journaling transaction and buffer
2191 +diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h
2192 +index dd2097455a2e..1565324eb620 100644
2193 +--- a/include/linux/mlx5/mlx5_ifc.h
2194 ++++ b/include/linux/mlx5/mlx5_ifc.h
2195 +@@ -453,26 +453,28 @@ struct mlx5_ifc_per_protocol_networking_offload_caps_bits {
2196 + u8 lro_cap[0x1];
2197 + u8 lro_psh_flag[0x1];
2198 + u8 lro_time_stamp[0x1];
2199 +- u8 reserved_0[0x6];
2200 ++ u8 reserved_0[0x3];
2201 ++ u8 self_lb_en_modifiable[0x1];
2202 ++ u8 reserved_1[0x2];
2203 + u8 max_lso_cap[0x5];
2204 +- u8 reserved_1[0x4];
2205 ++ u8 reserved_2[0x4];
2206 + u8 rss_ind_tbl_cap[0x4];
2207 +- u8 reserved_2[0x3];
2208 ++ u8 reserved_3[0x3];
2209 + u8 tunnel_lso_const_out_ip_id[0x1];
2210 +- u8 reserved_3[0x2];
2211 ++ u8 reserved_4[0x2];
2212 + u8 tunnel_statless_gre[0x1];
2213 + u8 tunnel_stateless_vxlan[0x1];
2214 +
2215 +- u8 reserved_4[0x20];
2216 ++ u8 reserved_5[0x20];
2217 +
2218 +- u8 reserved_5[0x10];
2219 ++ u8 reserved_6[0x10];
2220 + u8 lro_min_mss_size[0x10];
2221 +
2222 +- u8 reserved_6[0x120];
2223 ++ u8 reserved_7[0x120];
2224 +
2225 + u8 lro_timer_supported_periods[4][0x20];
2226 +
2227 +- u8 reserved_7[0x600];
2228 ++ u8 reserved_8[0x600];
2229 + };
2230 +
2231 + struct mlx5_ifc_roce_cap_bits {
2232 +@@ -4051,9 +4053,11 @@ struct mlx5_ifc_modify_tis_in_bits {
2233 + };
2234 +
2235 + struct mlx5_ifc_modify_tir_bitmask_bits {
2236 +- u8 reserved[0x20];
2237 ++ u8 reserved_0[0x20];
2238 +
2239 +- u8 reserved1[0x1f];
2240 ++ u8 reserved_1[0x1b];
2241 ++ u8 self_lb_en[0x1];
2242 ++ u8 reserved_2[0x3];
2243 + u8 lro[0x1];
2244 + };
2245 +
2246 +diff --git a/include/net/af_unix.h b/include/net/af_unix.h
2247 +index b36d837c701e..2a91a0561a47 100644
2248 +--- a/include/net/af_unix.h
2249 ++++ b/include/net/af_unix.h
2250 +@@ -62,6 +62,7 @@ struct unix_sock {
2251 + #define UNIX_GC_CANDIDATE 0
2252 + #define UNIX_GC_MAYBE_CYCLE 1
2253 + struct socket_wq peer_wq;
2254 ++ wait_queue_t peer_wake;
2255 + };
2256 +
2257 + static inline struct unix_sock *unix_sk(const struct sock *sk)
2258 +diff --git a/include/net/ip6_fib.h b/include/net/ip6_fib.h
2259 +index aaf9700fc9e5..fb961a576abe 100644
2260 +--- a/include/net/ip6_fib.h
2261 ++++ b/include/net/ip6_fib.h
2262 +@@ -167,7 +167,8 @@ static inline void rt6_update_expires(struct rt6_info *rt0, int timeout)
2263 +
2264 + static inline u32 rt6_get_cookie(const struct rt6_info *rt)
2265 + {
2266 +- if (rt->rt6i_flags & RTF_PCPU || unlikely(rt->dst.flags & DST_NOCACHE))
2267 ++ if (rt->rt6i_flags & RTF_PCPU ||
2268 ++ (unlikely(rt->dst.flags & DST_NOCACHE) && rt->dst.from))
2269 + rt = (struct rt6_info *)(rt->dst.from);
2270 +
2271 + return rt->rt6i_node ? rt->rt6i_node->fn_sernum : 0;
2272 +diff --git a/include/net/ip6_tunnel.h b/include/net/ip6_tunnel.h
2273 +index fa915fa0f703..d49a8f8fae45 100644
2274 +--- a/include/net/ip6_tunnel.h
2275 ++++ b/include/net/ip6_tunnel.h
2276 +@@ -90,11 +90,12 @@ static inline void ip6tunnel_xmit(struct sock *sk, struct sk_buff *skb,
2277 + err = ip6_local_out_sk(sk, skb);
2278 +
2279 + if (net_xmit_eval(err) == 0) {
2280 +- struct pcpu_sw_netstats *tstats = this_cpu_ptr(dev->tstats);
2281 ++ struct pcpu_sw_netstats *tstats = get_cpu_ptr(dev->tstats);
2282 + u64_stats_update_begin(&tstats->syncp);
2283 + tstats->tx_bytes += pkt_len;
2284 + tstats->tx_packets++;
2285 + u64_stats_update_end(&tstats->syncp);
2286 ++ put_cpu_ptr(tstats);
2287 + } else {
2288 + stats->tx_errors++;
2289 + stats->tx_aborted_errors++;
2290 +diff --git a/include/net/ip_tunnels.h b/include/net/ip_tunnels.h
2291 +index f6dafec9102c..62a750a6a8f8 100644
2292 +--- a/include/net/ip_tunnels.h
2293 ++++ b/include/net/ip_tunnels.h
2294 +@@ -287,12 +287,13 @@ static inline void iptunnel_xmit_stats(int err,
2295 + struct pcpu_sw_netstats __percpu *stats)
2296 + {
2297 + if (err > 0) {
2298 +- struct pcpu_sw_netstats *tstats = this_cpu_ptr(stats);
2299 ++ struct pcpu_sw_netstats *tstats = get_cpu_ptr(stats);
2300 +
2301 + u64_stats_update_begin(&tstats->syncp);
2302 + tstats->tx_bytes += err;
2303 + tstats->tx_packets++;
2304 + u64_stats_update_end(&tstats->syncp);
2305 ++ put_cpu_ptr(tstats);
2306 + } else if (err < 0) {
2307 + err_stats->tx_errors++;
2308 + err_stats->tx_aborted_errors++;
2309 +diff --git a/include/net/ipv6.h b/include/net/ipv6.h
2310 +index 711cca428cc8..b14e1581c477 100644
2311 +--- a/include/net/ipv6.h
2312 ++++ b/include/net/ipv6.h
2313 +@@ -205,6 +205,7 @@ extern rwlock_t ip6_ra_lock;
2314 + */
2315 +
2316 + struct ipv6_txoptions {
2317 ++ atomic_t refcnt;
2318 + /* Length of this structure */
2319 + int tot_len;
2320 +
2321 +@@ -217,7 +218,7 @@ struct ipv6_txoptions {
2322 + struct ipv6_opt_hdr *dst0opt;
2323 + struct ipv6_rt_hdr *srcrt; /* Routing Header */
2324 + struct ipv6_opt_hdr *dst1opt;
2325 +-
2326 ++ struct rcu_head rcu;
2327 + /* Option buffer, as read by IPV6_PKTOPTIONS, starts here. */
2328 + };
2329 +
2330 +@@ -252,6 +253,24 @@ struct ipv6_fl_socklist {
2331 + struct rcu_head rcu;
2332 + };
2333 +
2334 ++static inline struct ipv6_txoptions *txopt_get(const struct ipv6_pinfo *np)
2335 ++{
2336 ++ struct ipv6_txoptions *opt;
2337 ++
2338 ++ rcu_read_lock();
2339 ++ opt = rcu_dereference(np->opt);
2340 ++ if (opt && !atomic_inc_not_zero(&opt->refcnt))
2341 ++ opt = NULL;
2342 ++ rcu_read_unlock();
2343 ++ return opt;
2344 ++}
2345 ++
2346 ++static inline void txopt_put(struct ipv6_txoptions *opt)
2347 ++{
2348 ++ if (opt && atomic_dec_and_test(&opt->refcnt))
2349 ++ kfree_rcu(opt, rcu);
2350 ++}
2351 ++
2352 + struct ip6_flowlabel *fl6_sock_lookup(struct sock *sk, __be32 label);
2353 + struct ipv6_txoptions *fl6_merge_options(struct ipv6_txoptions *opt_space,
2354 + struct ip6_flowlabel *fl,
2355 +@@ -490,6 +509,7 @@ struct ip6_create_arg {
2356 + u32 user;
2357 + const struct in6_addr *src;
2358 + const struct in6_addr *dst;
2359 ++ int iif;
2360 + u8 ecn;
2361 + };
2362 +
2363 +diff --git a/include/net/ndisc.h b/include/net/ndisc.h
2364 +index aba5695fadb0..b3a7751251b4 100644
2365 +--- a/include/net/ndisc.h
2366 ++++ b/include/net/ndisc.h
2367 +@@ -182,8 +182,7 @@ int ndisc_rcv(struct sk_buff *skb);
2368 +
2369 + void ndisc_send_ns(struct net_device *dev, struct neighbour *neigh,
2370 + const struct in6_addr *solicit,
2371 +- const struct in6_addr *daddr, const struct in6_addr *saddr,
2372 +- struct sk_buff *oskb);
2373 ++ const struct in6_addr *daddr, const struct in6_addr *saddr);
2374 +
2375 + void ndisc_send_rs(struct net_device *dev,
2376 + const struct in6_addr *saddr, const struct in6_addr *daddr);
2377 +diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
2378 +index 444faa89a55f..f1ad8f8fd4f1 100644
2379 +--- a/include/net/sch_generic.h
2380 ++++ b/include/net/sch_generic.h
2381 +@@ -61,6 +61,9 @@ struct Qdisc {
2382 + */
2383 + #define TCQ_F_WARN_NONWC (1 << 16)
2384 + #define TCQ_F_CPUSTATS 0x20 /* run using percpu statistics */
2385 ++#define TCQ_F_NOPARENT 0x40 /* root of its hierarchy :
2386 ++ * qdisc_tree_decrease_qlen() should stop.
2387 ++ */
2388 + u32 limit;
2389 + const struct Qdisc_ops *ops;
2390 + struct qdisc_size_table __rcu *stab;
2391 +diff --git a/include/net/switchdev.h b/include/net/switchdev.h
2392 +index 319baab3b48e..731c40e34bf2 100644
2393 +--- a/include/net/switchdev.h
2394 ++++ b/include/net/switchdev.h
2395 +@@ -272,7 +272,7 @@ static inline int switchdev_port_fdb_dump(struct sk_buff *skb,
2396 + struct net_device *filter_dev,
2397 + int idx)
2398 + {
2399 +- return -EOPNOTSUPP;
2400 ++ return idx;
2401 + }
2402 +
2403 + static inline void switchdev_port_fwd_mark_set(struct net_device *dev,
2404 +diff --git a/kernel/.gitignore b/kernel/.gitignore
2405 +index 790d83c7d160..b3097bde4e9c 100644
2406 +--- a/kernel/.gitignore
2407 ++++ b/kernel/.gitignore
2408 +@@ -5,4 +5,3 @@ config_data.h
2409 + config_data.gz
2410 + timeconst.h
2411 + hz.bc
2412 +-x509_certificate_list
2413 +diff --git a/kernel/bpf/arraymap.c b/kernel/bpf/arraymap.c
2414 +index 29ace107f236..7a0decf47110 100644
2415 +--- a/kernel/bpf/arraymap.c
2416 ++++ b/kernel/bpf/arraymap.c
2417 +@@ -104,7 +104,7 @@ static int array_map_update_elem(struct bpf_map *map, void *key, void *value,
2418 + /* all elements already exist */
2419 + return -EEXIST;
2420 +
2421 +- memcpy(array->value + array->elem_size * index, value, array->elem_size);
2422 ++ memcpy(array->value + array->elem_size * index, value, map->value_size);
2423 + return 0;
2424 + }
2425 +
2426 +diff --git a/net/core/neighbour.c b/net/core/neighbour.c
2427 +index 2b515ba7e94f..c169bba44e05 100644
2428 +--- a/net/core/neighbour.c
2429 ++++ b/net/core/neighbour.c
2430 +@@ -2215,7 +2215,7 @@ static int pneigh_fill_info(struct sk_buff *skb, struct pneigh_entry *pn,
2431 + ndm->ndm_pad2 = 0;
2432 + ndm->ndm_flags = pn->flags | NTF_PROXY;
2433 + ndm->ndm_type = RTN_UNICAST;
2434 +- ndm->ndm_ifindex = pn->dev->ifindex;
2435 ++ ndm->ndm_ifindex = pn->dev ? pn->dev->ifindex : 0;
2436 + ndm->ndm_state = NUD_NONE;
2437 +
2438 + if (nla_put(skb, NDA_DST, tbl->key_len, pn->key))
2439 +@@ -2290,7 +2290,7 @@ static int pneigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb,
2440 + if (h > s_h)
2441 + s_idx = 0;
2442 + for (n = tbl->phash_buckets[h], idx = 0; n; n = n->next) {
2443 +- if (dev_net(n->dev) != net)
2444 ++ if (pneigh_net(n) != net)
2445 + continue;
2446 + if (idx < s_idx)
2447 + goto next;
2448 +diff --git a/net/core/scm.c b/net/core/scm.c
2449 +index 3b6899b7d810..8a1741b14302 100644
2450 +--- a/net/core/scm.c
2451 ++++ b/net/core/scm.c
2452 +@@ -305,6 +305,8 @@ void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm)
2453 + err = put_user(cmlen, &cm->cmsg_len);
2454 + if (!err) {
2455 + cmlen = CMSG_SPACE(i*sizeof(int));
2456 ++ if (msg->msg_controllen < cmlen)
2457 ++ cmlen = msg->msg_controllen;
2458 + msg->msg_control += cmlen;
2459 + msg->msg_controllen -= cmlen;
2460 + }
2461 +diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c
2462 +index 5165571f397a..a0490508d213 100644
2463 +--- a/net/dccp/ipv6.c
2464 ++++ b/net/dccp/ipv6.c
2465 +@@ -202,7 +202,9 @@ static int dccp_v6_send_response(struct sock *sk, struct request_sock *req)
2466 + security_req_classify_flow(req, flowi6_to_flowi(&fl6));
2467 +
2468 +
2469 +- final_p = fl6_update_dst(&fl6, np->opt, &final);
2470 ++ rcu_read_lock();
2471 ++ final_p = fl6_update_dst(&fl6, rcu_dereference(np->opt), &final);
2472 ++ rcu_read_unlock();
2473 +
2474 + dst = ip6_dst_lookup_flow(sk, &fl6, final_p);
2475 + if (IS_ERR(dst)) {
2476 +@@ -219,7 +221,10 @@ static int dccp_v6_send_response(struct sock *sk, struct request_sock *req)
2477 + &ireq->ir_v6_loc_addr,
2478 + &ireq->ir_v6_rmt_addr);
2479 + fl6.daddr = ireq->ir_v6_rmt_addr;
2480 +- err = ip6_xmit(sk, skb, &fl6, np->opt, np->tclass);
2481 ++ rcu_read_lock();
2482 ++ err = ip6_xmit(sk, skb, &fl6, rcu_dereference(np->opt),
2483 ++ np->tclass);
2484 ++ rcu_read_unlock();
2485 + err = net_xmit_eval(err);
2486 + }
2487 +
2488 +@@ -415,6 +420,7 @@ static struct sock *dccp_v6_request_recv_sock(struct sock *sk,
2489 + {
2490 + struct inet_request_sock *ireq = inet_rsk(req);
2491 + struct ipv6_pinfo *newnp, *np = inet6_sk(sk);
2492 ++ struct ipv6_txoptions *opt;
2493 + struct inet_sock *newinet;
2494 + struct dccp6_sock *newdp6;
2495 + struct sock *newsk;
2496 +@@ -534,13 +540,15 @@ static struct sock *dccp_v6_request_recv_sock(struct sock *sk,
2497 + * Yes, keeping reference count would be much more clever, but we make
2498 + * one more one thing there: reattach optmem to newsk.
2499 + */
2500 +- if (np->opt != NULL)
2501 +- newnp->opt = ipv6_dup_options(newsk, np->opt);
2502 +-
2503 ++ opt = rcu_dereference(np->opt);
2504 ++ if (opt) {
2505 ++ opt = ipv6_dup_options(newsk, opt);
2506 ++ RCU_INIT_POINTER(newnp->opt, opt);
2507 ++ }
2508 + inet_csk(newsk)->icsk_ext_hdr_len = 0;
2509 +- if (newnp->opt != NULL)
2510 +- inet_csk(newsk)->icsk_ext_hdr_len = (newnp->opt->opt_nflen +
2511 +- newnp->opt->opt_flen);
2512 ++ if (opt)
2513 ++ inet_csk(newsk)->icsk_ext_hdr_len = opt->opt_nflen +
2514 ++ opt->opt_flen;
2515 +
2516 + dccp_sync_mss(newsk, dst_mtu(dst));
2517 +
2518 +@@ -793,6 +801,7 @@ static int dccp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
2519 + struct ipv6_pinfo *np = inet6_sk(sk);
2520 + struct dccp_sock *dp = dccp_sk(sk);
2521 + struct in6_addr *saddr = NULL, *final_p, final;
2522 ++ struct ipv6_txoptions *opt;
2523 + struct flowi6 fl6;
2524 + struct dst_entry *dst;
2525 + int addr_type;
2526 +@@ -892,7 +901,8 @@ static int dccp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
2527 + fl6.fl6_sport = inet->inet_sport;
2528 + security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
2529 +
2530 +- final_p = fl6_update_dst(&fl6, np->opt, &final);
2531 ++ opt = rcu_dereference_protected(np->opt, sock_owned_by_user(sk));
2532 ++ final_p = fl6_update_dst(&fl6, opt, &final);
2533 +
2534 + dst = ip6_dst_lookup_flow(sk, &fl6, final_p);
2535 + if (IS_ERR(dst)) {
2536 +@@ -912,9 +922,8 @@ static int dccp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
2537 + __ip6_dst_store(sk, dst, NULL, NULL);
2538 +
2539 + icsk->icsk_ext_hdr_len = 0;
2540 +- if (np->opt != NULL)
2541 +- icsk->icsk_ext_hdr_len = (np->opt->opt_flen +
2542 +- np->opt->opt_nflen);
2543 ++ if (opt)
2544 ++ icsk->icsk_ext_hdr_len = opt->opt_flen + opt->opt_nflen;
2545 +
2546 + inet->inet_dport = usin->sin6_port;
2547 +
2548 +diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c
2549 +index 8e8203d5c520..ef7e2c4342cb 100644
2550 +--- a/net/ipv4/ipmr.c
2551 ++++ b/net/ipv4/ipmr.c
2552 +@@ -134,7 +134,7 @@ static int __ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb,
2553 + struct mfc_cache *c, struct rtmsg *rtm);
2554 + static void mroute_netlink_event(struct mr_table *mrt, struct mfc_cache *mfc,
2555 + int cmd);
2556 +-static void mroute_clean_tables(struct mr_table *mrt);
2557 ++static void mroute_clean_tables(struct mr_table *mrt, bool all);
2558 + static void ipmr_expire_process(unsigned long arg);
2559 +
2560 + #ifdef CONFIG_IP_MROUTE_MULTIPLE_TABLES
2561 +@@ -350,7 +350,7 @@ static struct mr_table *ipmr_new_table(struct net *net, u32 id)
2562 + static void ipmr_free_table(struct mr_table *mrt)
2563 + {
2564 + del_timer_sync(&mrt->ipmr_expire_timer);
2565 +- mroute_clean_tables(mrt);
2566 ++ mroute_clean_tables(mrt, true);
2567 + kfree(mrt);
2568 + }
2569 +
2570 +@@ -1208,7 +1208,7 @@ static int ipmr_mfc_add(struct net *net, struct mr_table *mrt,
2571 + * Close the multicast socket, and clear the vif tables etc
2572 + */
2573 +
2574 +-static void mroute_clean_tables(struct mr_table *mrt)
2575 ++static void mroute_clean_tables(struct mr_table *mrt, bool all)
2576 + {
2577 + int i;
2578 + LIST_HEAD(list);
2579 +@@ -1217,8 +1217,9 @@ static void mroute_clean_tables(struct mr_table *mrt)
2580 + /* Shut down all active vif entries */
2581 +
2582 + for (i = 0; i < mrt->maxvif; i++) {
2583 +- if (!(mrt->vif_table[i].flags & VIFF_STATIC))
2584 +- vif_delete(mrt, i, 0, &list);
2585 ++ if (!all && (mrt->vif_table[i].flags & VIFF_STATIC))
2586 ++ continue;
2587 ++ vif_delete(mrt, i, 0, &list);
2588 + }
2589 + unregister_netdevice_many(&list);
2590 +
2591 +@@ -1226,7 +1227,7 @@ static void mroute_clean_tables(struct mr_table *mrt)
2592 +
2593 + for (i = 0; i < MFC_LINES; i++) {
2594 + list_for_each_entry_safe(c, next, &mrt->mfc_cache_array[i], list) {
2595 +- if (c->mfc_flags & MFC_STATIC)
2596 ++ if (!all && (c->mfc_flags & MFC_STATIC))
2597 + continue;
2598 + list_del_rcu(&c->list);
2599 + mroute_netlink_event(mrt, c, RTM_DELROUTE);
2600 +@@ -1261,7 +1262,7 @@ static void mrtsock_destruct(struct sock *sk)
2601 + NETCONFA_IFINDEX_ALL,
2602 + net->ipv4.devconf_all);
2603 + RCU_INIT_POINTER(mrt->mroute_sk, NULL);
2604 +- mroute_clean_tables(mrt);
2605 ++ mroute_clean_tables(mrt, false);
2606 + }
2607 + }
2608 + rtnl_unlock();
2609 +diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
2610 +index a8f515bb19c4..0a2b61dbcd4e 100644
2611 +--- a/net/ipv4/tcp_input.c
2612 ++++ b/net/ipv4/tcp_input.c
2613 +@@ -4457,19 +4457,34 @@ static int __must_check tcp_queue_rcv(struct sock *sk, struct sk_buff *skb, int
2614 + int tcp_send_rcvq(struct sock *sk, struct msghdr *msg, size_t size)
2615 + {
2616 + struct sk_buff *skb;
2617 ++ int err = -ENOMEM;
2618 ++ int data_len = 0;
2619 + bool fragstolen;
2620 +
2621 + if (size == 0)
2622 + return 0;
2623 +
2624 +- skb = alloc_skb(size, sk->sk_allocation);
2625 ++ if (size > PAGE_SIZE) {
2626 ++ int npages = min_t(size_t, size >> PAGE_SHIFT, MAX_SKB_FRAGS);
2627 ++
2628 ++ data_len = npages << PAGE_SHIFT;
2629 ++ size = data_len + (size & ~PAGE_MASK);
2630 ++ }
2631 ++ skb = alloc_skb_with_frags(size - data_len, data_len,
2632 ++ PAGE_ALLOC_COSTLY_ORDER,
2633 ++ &err, sk->sk_allocation);
2634 + if (!skb)
2635 + goto err;
2636 +
2637 ++ skb_put(skb, size - data_len);
2638 ++ skb->data_len = data_len;
2639 ++ skb->len = size;
2640 ++
2641 + if (tcp_try_rmem_schedule(sk, skb, skb->truesize))
2642 + goto err_free;
2643 +
2644 +- if (memcpy_from_msg(skb_put(skb, size), msg, size))
2645 ++ err = skb_copy_datagram_from_iter(skb, 0, &msg->msg_iter, size);
2646 ++ if (err)
2647 + goto err_free;
2648 +
2649 + TCP_SKB_CB(skb)->seq = tcp_sk(sk)->rcv_nxt;
2650 +@@ -4485,7 +4500,8 @@ int tcp_send_rcvq(struct sock *sk, struct msghdr *msg, size_t size)
2651 + err_free:
2652 + kfree_skb(skb);
2653 + err:
2654 +- return -ENOMEM;
2655 ++ return err;
2656 ++
2657 + }
2658 +
2659 + static void tcp_data_queue(struct sock *sk, struct sk_buff *skb)
2660 +@@ -5643,6 +5659,7 @@ discard:
2661 + }
2662 +
2663 + tp->rcv_nxt = TCP_SKB_CB(skb)->seq + 1;
2664 ++ tp->copied_seq = tp->rcv_nxt;
2665 + tp->rcv_wup = TCP_SKB_CB(skb)->seq + 1;
2666 +
2667 + /* RFC1323: The window in SYN & SYN/ACK segments is
2668 +diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
2669 +index 93898e093d4e..a7739c83aa84 100644
2670 +--- a/net/ipv4/tcp_ipv4.c
2671 ++++ b/net/ipv4/tcp_ipv4.c
2672 +@@ -922,7 +922,8 @@ int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr,
2673 + }
2674 +
2675 + md5sig = rcu_dereference_protected(tp->md5sig_info,
2676 +- sock_owned_by_user(sk));
2677 ++ sock_owned_by_user(sk) ||
2678 ++ lockdep_is_held(&sk->sk_lock.slock));
2679 + if (!md5sig) {
2680 + md5sig = kmalloc(sizeof(*md5sig), gfp);
2681 + if (!md5sig)
2682 +diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
2683 +index 7149ebc820c7..04f0a052b524 100644
2684 +--- a/net/ipv4/tcp_timer.c
2685 ++++ b/net/ipv4/tcp_timer.c
2686 +@@ -176,6 +176,18 @@ static int tcp_write_timeout(struct sock *sk)
2687 + syn_set = true;
2688 + } else {
2689 + if (retransmits_timed_out(sk, sysctl_tcp_retries1, 0, 0)) {
2690 ++ /* Some middle-boxes may black-hole Fast Open _after_
2691 ++ * the handshake. Therefore we conservatively disable
2692 ++ * Fast Open on this path on recurring timeouts with
2693 ++ * few or zero bytes acked after Fast Open.
2694 ++ */
2695 ++ if (tp->syn_data_acked &&
2696 ++ tp->bytes_acked <= tp->rx_opt.mss_clamp) {
2697 ++ tcp_fastopen_cache_set(sk, 0, NULL, true, 0);
2698 ++ if (icsk->icsk_retransmits == sysctl_tcp_retries1)
2699 ++ NET_INC_STATS_BH(sock_net(sk),
2700 ++ LINUX_MIB_TCPFASTOPENACTIVEFAIL);
2701 ++ }
2702 + /* Black hole detection */
2703 + tcp_mtu_probing(icsk, sk);
2704 +
2705 +diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
2706 +index dd00828863a0..3939dd290c44 100644
2707 +--- a/net/ipv6/addrconf.c
2708 ++++ b/net/ipv6/addrconf.c
2709 +@@ -3628,7 +3628,7 @@ static void addrconf_dad_work(struct work_struct *w)
2710 +
2711 + /* send a neighbour solicitation for our addr */
2712 + addrconf_addr_solict_mult(&ifp->addr, &mcaddr);
2713 +- ndisc_send_ns(ifp->idev->dev, NULL, &ifp->addr, &mcaddr, &in6addr_any, NULL);
2714 ++ ndisc_send_ns(ifp->idev->dev, NULL, &ifp->addr, &mcaddr, &in6addr_any);
2715 + out:
2716 + in6_ifa_put(ifp);
2717 + rtnl_unlock();
2718 +diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c
2719 +index 44bb66bde0e2..38d66ddfb937 100644
2720 +--- a/net/ipv6/af_inet6.c
2721 ++++ b/net/ipv6/af_inet6.c
2722 +@@ -428,9 +428,11 @@ void inet6_destroy_sock(struct sock *sk)
2723 +
2724 + /* Free tx options */
2725 +
2726 +- opt = xchg(&np->opt, NULL);
2727 +- if (opt)
2728 +- sock_kfree_s(sk, opt, opt->tot_len);
2729 ++ opt = xchg((__force struct ipv6_txoptions **)&np->opt, NULL);
2730 ++ if (opt) {
2731 ++ atomic_sub(opt->tot_len, &sk->sk_omem_alloc);
2732 ++ txopt_put(opt);
2733 ++ }
2734 + }
2735 + EXPORT_SYMBOL_GPL(inet6_destroy_sock);
2736 +
2737 +@@ -659,7 +661,10 @@ int inet6_sk_rebuild_header(struct sock *sk)
2738 + fl6.fl6_sport = inet->inet_sport;
2739 + security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
2740 +
2741 +- final_p = fl6_update_dst(&fl6, np->opt, &final);
2742 ++ rcu_read_lock();
2743 ++ final_p = fl6_update_dst(&fl6, rcu_dereference(np->opt),
2744 ++ &final);
2745 ++ rcu_read_unlock();
2746 +
2747 + dst = ip6_dst_lookup_flow(sk, &fl6, final_p);
2748 + if (IS_ERR(dst)) {
2749 +diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c
2750 +index 9aadd57808a5..a42a673aa547 100644
2751 +--- a/net/ipv6/datagram.c
2752 ++++ b/net/ipv6/datagram.c
2753 +@@ -167,8 +167,10 @@ ipv4_connected:
2754 +
2755 + security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
2756 +
2757 +- opt = flowlabel ? flowlabel->opt : np->opt;
2758 ++ rcu_read_lock();
2759 ++ opt = flowlabel ? flowlabel->opt : rcu_dereference(np->opt);
2760 + final_p = fl6_update_dst(&fl6, opt, &final);
2761 ++ rcu_read_unlock();
2762 +
2763 + dst = ip6_dst_lookup_flow(sk, &fl6, final_p);
2764 + err = 0;
2765 +diff --git a/net/ipv6/exthdrs.c b/net/ipv6/exthdrs.c
2766 +index ce203b0402be..ea7c4d64a00a 100644
2767 +--- a/net/ipv6/exthdrs.c
2768 ++++ b/net/ipv6/exthdrs.c
2769 +@@ -727,6 +727,7 @@ ipv6_dup_options(struct sock *sk, struct ipv6_txoptions *opt)
2770 + *((char **)&opt2->dst1opt) += dif;
2771 + if (opt2->srcrt)
2772 + *((char **)&opt2->srcrt) += dif;
2773 ++ atomic_set(&opt2->refcnt, 1);
2774 + }
2775 + return opt2;
2776 + }
2777 +@@ -790,7 +791,7 @@ ipv6_renew_options(struct sock *sk, struct ipv6_txoptions *opt,
2778 + return ERR_PTR(-ENOBUFS);
2779 +
2780 + memset(opt2, 0, tot_len);
2781 +-
2782 ++ atomic_set(&opt2->refcnt, 1);
2783 + opt2->tot_len = tot_len;
2784 + p = (char *)(opt2 + 1);
2785 +
2786 +diff --git a/net/ipv6/inet6_connection_sock.c b/net/ipv6/inet6_connection_sock.c
2787 +index 6927f3fb5597..9beed302eb36 100644
2788 +--- a/net/ipv6/inet6_connection_sock.c
2789 ++++ b/net/ipv6/inet6_connection_sock.c
2790 +@@ -77,7 +77,9 @@ struct dst_entry *inet6_csk_route_req(struct sock *sk,
2791 + memset(fl6, 0, sizeof(*fl6));
2792 + fl6->flowi6_proto = IPPROTO_TCP;
2793 + fl6->daddr = ireq->ir_v6_rmt_addr;
2794 +- final_p = fl6_update_dst(fl6, np->opt, &final);
2795 ++ rcu_read_lock();
2796 ++ final_p = fl6_update_dst(fl6, rcu_dereference(np->opt), &final);
2797 ++ rcu_read_unlock();
2798 + fl6->saddr = ireq->ir_v6_loc_addr;
2799 + fl6->flowi6_oif = ireq->ir_iif;
2800 + fl6->flowi6_mark = ireq->ir_mark;
2801 +@@ -207,7 +209,9 @@ static struct dst_entry *inet6_csk_route_socket(struct sock *sk,
2802 + fl6->fl6_dport = inet->inet_dport;
2803 + security_sk_classify_flow(sk, flowi6_to_flowi(fl6));
2804 +
2805 +- final_p = fl6_update_dst(fl6, np->opt, &final);
2806 ++ rcu_read_lock();
2807 ++ final_p = fl6_update_dst(fl6, rcu_dereference(np->opt), &final);
2808 ++ rcu_read_unlock();
2809 +
2810 + dst = __inet6_csk_dst_check(sk, np->dst_cookie);
2811 + if (!dst) {
2812 +@@ -240,7 +244,8 @@ int inet6_csk_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl_unused
2813 + /* Restore final destination back after routing done */
2814 + fl6.daddr = sk->sk_v6_daddr;
2815 +
2816 +- res = ip6_xmit(sk, skb, &fl6, np->opt, np->tclass);
2817 ++ res = ip6_xmit(sk, skb, &fl6, rcu_dereference(np->opt),
2818 ++ np->tclass);
2819 + rcu_read_unlock();
2820 + return res;
2821 + }
2822 +diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
2823 +index eabffbb89795..137fca42aaa6 100644
2824 +--- a/net/ipv6/ip6_tunnel.c
2825 ++++ b/net/ipv6/ip6_tunnel.c
2826 +@@ -177,7 +177,7 @@ void ip6_tnl_dst_reset(struct ip6_tnl *t)
2827 + int i;
2828 +
2829 + for_each_possible_cpu(i)
2830 +- ip6_tnl_per_cpu_dst_set(raw_cpu_ptr(t->dst_cache), NULL);
2831 ++ ip6_tnl_per_cpu_dst_set(per_cpu_ptr(t->dst_cache, i), NULL);
2832 + }
2833 + EXPORT_SYMBOL_GPL(ip6_tnl_dst_reset);
2834 +
2835 +diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c
2836 +index 0e004cc42a22..35eee72ab4af 100644
2837 +--- a/net/ipv6/ip6mr.c
2838 ++++ b/net/ipv6/ip6mr.c
2839 +@@ -118,7 +118,7 @@ static void mr6_netlink_event(struct mr6_table *mrt, struct mfc6_cache *mfc,
2840 + int cmd);
2841 + static int ip6mr_rtm_dumproute(struct sk_buff *skb,
2842 + struct netlink_callback *cb);
2843 +-static void mroute_clean_tables(struct mr6_table *mrt);
2844 ++static void mroute_clean_tables(struct mr6_table *mrt, bool all);
2845 + static void ipmr_expire_process(unsigned long arg);
2846 +
2847 + #ifdef CONFIG_IPV6_MROUTE_MULTIPLE_TABLES
2848 +@@ -334,7 +334,7 @@ static struct mr6_table *ip6mr_new_table(struct net *net, u32 id)
2849 + static void ip6mr_free_table(struct mr6_table *mrt)
2850 + {
2851 + del_timer_sync(&mrt->ipmr_expire_timer);
2852 +- mroute_clean_tables(mrt);
2853 ++ mroute_clean_tables(mrt, true);
2854 + kfree(mrt);
2855 + }
2856 +
2857 +@@ -1542,7 +1542,7 @@ static int ip6mr_mfc_add(struct net *net, struct mr6_table *mrt,
2858 + * Close the multicast socket, and clear the vif tables etc
2859 + */
2860 +
2861 +-static void mroute_clean_tables(struct mr6_table *mrt)
2862 ++static void mroute_clean_tables(struct mr6_table *mrt, bool all)
2863 + {
2864 + int i;
2865 + LIST_HEAD(list);
2866 +@@ -1552,8 +1552,9 @@ static void mroute_clean_tables(struct mr6_table *mrt)
2867 + * Shut down all active vif entries
2868 + */
2869 + for (i = 0; i < mrt->maxvif; i++) {
2870 +- if (!(mrt->vif6_table[i].flags & VIFF_STATIC))
2871 +- mif6_delete(mrt, i, &list);
2872 ++ if (!all && (mrt->vif6_table[i].flags & VIFF_STATIC))
2873 ++ continue;
2874 ++ mif6_delete(mrt, i, &list);
2875 + }
2876 + unregister_netdevice_many(&list);
2877 +
2878 +@@ -1562,7 +1563,7 @@ static void mroute_clean_tables(struct mr6_table *mrt)
2879 + */
2880 + for (i = 0; i < MFC6_LINES; i++) {
2881 + list_for_each_entry_safe(c, next, &mrt->mfc6_cache_array[i], list) {
2882 +- if (c->mfc_flags & MFC_STATIC)
2883 ++ if (!all && (c->mfc_flags & MFC_STATIC))
2884 + continue;
2885 + write_lock_bh(&mrt_lock);
2886 + list_del(&c->list);
2887 +@@ -1625,7 +1626,7 @@ int ip6mr_sk_done(struct sock *sk)
2888 + net->ipv6.devconf_all);
2889 + write_unlock_bh(&mrt_lock);
2890 +
2891 +- mroute_clean_tables(mrt);
2892 ++ mroute_clean_tables(mrt, false);
2893 + err = 0;
2894 + break;
2895 + }
2896 +diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
2897 +index 63e6956917c9..4449ad1f8114 100644
2898 +--- a/net/ipv6/ipv6_sockglue.c
2899 ++++ b/net/ipv6/ipv6_sockglue.c
2900 +@@ -111,7 +111,8 @@ struct ipv6_txoptions *ipv6_update_options(struct sock *sk,
2901 + icsk->icsk_sync_mss(sk, icsk->icsk_pmtu_cookie);
2902 + }
2903 + }
2904 +- opt = xchg(&inet6_sk(sk)->opt, opt);
2905 ++ opt = xchg((__force struct ipv6_txoptions **)&inet6_sk(sk)->opt,
2906 ++ opt);
2907 + sk_dst_reset(sk);
2908 +
2909 + return opt;
2910 +@@ -231,9 +232,12 @@ static int do_ipv6_setsockopt(struct sock *sk, int level, int optname,
2911 + sk->sk_socket->ops = &inet_dgram_ops;
2912 + sk->sk_family = PF_INET;
2913 + }
2914 +- opt = xchg(&np->opt, NULL);
2915 +- if (opt)
2916 +- sock_kfree_s(sk, opt, opt->tot_len);
2917 ++ opt = xchg((__force struct ipv6_txoptions **)&np->opt,
2918 ++ NULL);
2919 ++ if (opt) {
2920 ++ atomic_sub(opt->tot_len, &sk->sk_omem_alloc);
2921 ++ txopt_put(opt);
2922 ++ }
2923 + pktopt = xchg(&np->pktoptions, NULL);
2924 + kfree_skb(pktopt);
2925 +
2926 +@@ -403,7 +407,8 @@ static int do_ipv6_setsockopt(struct sock *sk, int level, int optname,
2927 + if (optname != IPV6_RTHDR && !ns_capable(net->user_ns, CAP_NET_RAW))
2928 + break;
2929 +
2930 +- opt = ipv6_renew_options(sk, np->opt, optname,
2931 ++ opt = rcu_dereference_protected(np->opt, sock_owned_by_user(sk));
2932 ++ opt = ipv6_renew_options(sk, opt, optname,
2933 + (struct ipv6_opt_hdr __user *)optval,
2934 + optlen);
2935 + if (IS_ERR(opt)) {
2936 +@@ -432,8 +437,10 @@ static int do_ipv6_setsockopt(struct sock *sk, int level, int optname,
2937 + retv = 0;
2938 + opt = ipv6_update_options(sk, opt);
2939 + sticky_done:
2940 +- if (opt)
2941 +- sock_kfree_s(sk, opt, opt->tot_len);
2942 ++ if (opt) {
2943 ++ atomic_sub(opt->tot_len, &sk->sk_omem_alloc);
2944 ++ txopt_put(opt);
2945 ++ }
2946 + break;
2947 + }
2948 +
2949 +@@ -486,6 +493,7 @@ sticky_done:
2950 + break;
2951 +
2952 + memset(opt, 0, sizeof(*opt));
2953 ++ atomic_set(&opt->refcnt, 1);
2954 + opt->tot_len = sizeof(*opt) + optlen;
2955 + retv = -EFAULT;
2956 + if (copy_from_user(opt+1, optval, optlen))
2957 +@@ -502,8 +510,10 @@ update:
2958 + retv = 0;
2959 + opt = ipv6_update_options(sk, opt);
2960 + done:
2961 +- if (opt)
2962 +- sock_kfree_s(sk, opt, opt->tot_len);
2963 ++ if (opt) {
2964 ++ atomic_sub(opt->tot_len, &sk->sk_omem_alloc);
2965 ++ txopt_put(opt);
2966 ++ }
2967 + break;
2968 + }
2969 + case IPV6_UNICAST_HOPS:
2970 +@@ -1110,10 +1120,11 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname,
2971 + case IPV6_RTHDR:
2972 + case IPV6_DSTOPTS:
2973 + {
2974 ++ struct ipv6_txoptions *opt;
2975 +
2976 + lock_sock(sk);
2977 +- len = ipv6_getsockopt_sticky(sk, np->opt,
2978 +- optname, optval, len);
2979 ++ opt = rcu_dereference_protected(np->opt, sock_owned_by_user(sk));
2980 ++ len = ipv6_getsockopt_sticky(sk, opt, optname, optval, len);
2981 + release_sock(sk);
2982 + /* check if ipv6_getsockopt_sticky() returns err code */
2983 + if (len < 0)
2984 +diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c
2985 +index 083b2927fc67..41e3b5ee8d0b 100644
2986 +--- a/net/ipv6/mcast.c
2987 ++++ b/net/ipv6/mcast.c
2988 +@@ -1651,7 +1651,6 @@ out:
2989 + if (!err) {
2990 + ICMP6MSGOUT_INC_STATS(net, idev, ICMPV6_MLD2_REPORT);
2991 + ICMP6_INC_STATS(net, idev, ICMP6_MIB_OUTMSGS);
2992 +- IP6_UPD_PO_STATS(net, idev, IPSTATS_MIB_OUTMCAST, payload_len);
2993 + } else {
2994 + IP6_INC_STATS(net, idev, IPSTATS_MIB_OUTDISCARDS);
2995 + }
2996 +@@ -2014,7 +2013,6 @@ out:
2997 + if (!err) {
2998 + ICMP6MSGOUT_INC_STATS(net, idev, type);
2999 + ICMP6_INC_STATS(net, idev, ICMP6_MIB_OUTMSGS);
3000 +- IP6_UPD_PO_STATS(net, idev, IPSTATS_MIB_OUTMCAST, full_len);
3001 + } else
3002 + IP6_INC_STATS(net, idev, IPSTATS_MIB_OUTDISCARDS);
3003 +
3004 +diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c
3005 +index 64a71354b069..9ad46cd7930d 100644
3006 +--- a/net/ipv6/ndisc.c
3007 ++++ b/net/ipv6/ndisc.c
3008 +@@ -553,8 +553,7 @@ static void ndisc_send_unsol_na(struct net_device *dev)
3009 +
3010 + void ndisc_send_ns(struct net_device *dev, struct neighbour *neigh,
3011 + const struct in6_addr *solicit,
3012 +- const struct in6_addr *daddr, const struct in6_addr *saddr,
3013 +- struct sk_buff *oskb)
3014 ++ const struct in6_addr *daddr, const struct in6_addr *saddr)
3015 + {
3016 + struct sk_buff *skb;
3017 + struct in6_addr addr_buf;
3018 +@@ -590,9 +589,6 @@ void ndisc_send_ns(struct net_device *dev, struct neighbour *neigh,
3019 + ndisc_fill_addr_option(skb, ND_OPT_SOURCE_LL_ADDR,
3020 + dev->dev_addr);
3021 +
3022 +- if (!(dev->priv_flags & IFF_XMIT_DST_RELEASE) && oskb)
3023 +- skb_dst_copy(skb, oskb);
3024 +-
3025 + ndisc_send_skb(skb, daddr, saddr);
3026 + }
3027 +
3028 +@@ -679,12 +675,12 @@ static void ndisc_solicit(struct neighbour *neigh, struct sk_buff *skb)
3029 + "%s: trying to ucast probe in NUD_INVALID: %pI6\n",
3030 + __func__, target);
3031 + }
3032 +- ndisc_send_ns(dev, neigh, target, target, saddr, skb);
3033 ++ ndisc_send_ns(dev, neigh, target, target, saddr);
3034 + } else if ((probes -= NEIGH_VAR(neigh->parms, APP_PROBES)) < 0) {
3035 + neigh_app_ns(neigh);
3036 + } else {
3037 + addrconf_addr_solict_mult(target, &mcaddr);
3038 +- ndisc_send_ns(dev, NULL, target, &mcaddr, saddr, skb);
3039 ++ ndisc_send_ns(dev, NULL, target, &mcaddr, saddr);
3040 + }
3041 + }
3042 +
3043 +diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c
3044 +index c7196ad1d69f..dc50143f50f2 100644
3045 +--- a/net/ipv6/netfilter/nf_conntrack_reasm.c
3046 ++++ b/net/ipv6/netfilter/nf_conntrack_reasm.c
3047 +@@ -190,7 +190,7 @@ static void nf_ct_frag6_expire(unsigned long data)
3048 + /* Creation primitives. */
3049 + static inline struct frag_queue *fq_find(struct net *net, __be32 id,
3050 + u32 user, struct in6_addr *src,
3051 +- struct in6_addr *dst, u8 ecn)
3052 ++ struct in6_addr *dst, int iif, u8 ecn)
3053 + {
3054 + struct inet_frag_queue *q;
3055 + struct ip6_create_arg arg;
3056 +@@ -200,6 +200,7 @@ static inline struct frag_queue *fq_find(struct net *net, __be32 id,
3057 + arg.user = user;
3058 + arg.src = src;
3059 + arg.dst = dst;
3060 ++ arg.iif = iif;
3061 + arg.ecn = ecn;
3062 +
3063 + local_bh_disable();
3064 +@@ -603,7 +604,7 @@ struct sk_buff *nf_ct_frag6_gather(struct sk_buff *skb, u32 user)
3065 + fhdr = (struct frag_hdr *)skb_transport_header(clone);
3066 +
3067 + fq = fq_find(net, fhdr->identification, user, &hdr->saddr, &hdr->daddr,
3068 +- ip6_frag_ecn(hdr));
3069 ++ skb->dev ? skb->dev->ifindex : 0, ip6_frag_ecn(hdr));
3070 + if (fq == NULL) {
3071 + pr_debug("Can't find and can't create new queue\n");
3072 + goto ret_orig;
3073 +diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
3074 +index fdbada1569a3..fe977299551e 100644
3075 +--- a/net/ipv6/raw.c
3076 ++++ b/net/ipv6/raw.c
3077 +@@ -732,6 +732,7 @@ static int raw6_getfrag(void *from, char *to, int offset, int len, int odd,
3078 +
3079 + static int rawv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
3080 + {
3081 ++ struct ipv6_txoptions *opt_to_free = NULL;
3082 + struct ipv6_txoptions opt_space;
3083 + DECLARE_SOCKADDR(struct sockaddr_in6 *, sin6, msg->msg_name);
3084 + struct in6_addr *daddr, *final_p, final;
3085 +@@ -838,8 +839,10 @@ static int rawv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
3086 + if (!(opt->opt_nflen|opt->opt_flen))
3087 + opt = NULL;
3088 + }
3089 +- if (!opt)
3090 +- opt = np->opt;
3091 ++ if (!opt) {
3092 ++ opt = txopt_get(np);
3093 ++ opt_to_free = opt;
3094 ++ }
3095 + if (flowlabel)
3096 + opt = fl6_merge_options(&opt_space, flowlabel, opt);
3097 + opt = ipv6_fixup_options(&opt_space, opt);
3098 +@@ -905,6 +908,7 @@ done:
3099 + dst_release(dst);
3100 + out:
3101 + fl6_sock_release(flowlabel);
3102 ++ txopt_put(opt_to_free);
3103 + return err < 0 ? err : len;
3104 + do_confirm:
3105 + dst_confirm(dst);
3106 +diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c
3107 +index f1159bb76e0a..04013a910ce5 100644
3108 +--- a/net/ipv6/reassembly.c
3109 ++++ b/net/ipv6/reassembly.c
3110 +@@ -108,7 +108,10 @@ bool ip6_frag_match(const struct inet_frag_queue *q, const void *a)
3111 + return fq->id == arg->id &&
3112 + fq->user == arg->user &&
3113 + ipv6_addr_equal(&fq->saddr, arg->src) &&
3114 +- ipv6_addr_equal(&fq->daddr, arg->dst);
3115 ++ ipv6_addr_equal(&fq->daddr, arg->dst) &&
3116 ++ (arg->iif == fq->iif ||
3117 ++ !(ipv6_addr_type(arg->dst) & (IPV6_ADDR_MULTICAST |
3118 ++ IPV6_ADDR_LINKLOCAL)));
3119 + }
3120 + EXPORT_SYMBOL(ip6_frag_match);
3121 +
3122 +@@ -180,7 +183,7 @@ static void ip6_frag_expire(unsigned long data)
3123 +
3124 + static struct frag_queue *
3125 + fq_find(struct net *net, __be32 id, const struct in6_addr *src,
3126 +- const struct in6_addr *dst, u8 ecn)
3127 ++ const struct in6_addr *dst, int iif, u8 ecn)
3128 + {
3129 + struct inet_frag_queue *q;
3130 + struct ip6_create_arg arg;
3131 +@@ -190,6 +193,7 @@ fq_find(struct net *net, __be32 id, const struct in6_addr *src,
3132 + arg.user = IP6_DEFRAG_LOCAL_DELIVER;
3133 + arg.src = src;
3134 + arg.dst = dst;
3135 ++ arg.iif = iif;
3136 + arg.ecn = ecn;
3137 +
3138 + hash = inet6_hash_frag(id, src, dst);
3139 +@@ -551,7 +555,7 @@ static int ipv6_frag_rcv(struct sk_buff *skb)
3140 + }
3141 +
3142 + fq = fq_find(net, fhdr->identification, &hdr->saddr, &hdr->daddr,
3143 +- ip6_frag_ecn(hdr));
3144 ++ skb->dev ? skb->dev->ifindex : 0, ip6_frag_ecn(hdr));
3145 + if (fq) {
3146 + int ret;
3147 +
3148 +diff --git a/net/ipv6/route.c b/net/ipv6/route.c
3149 +index 946880ad48ac..fd0e6746d0cf 100644
3150 +--- a/net/ipv6/route.c
3151 ++++ b/net/ipv6/route.c
3152 +@@ -403,6 +403,14 @@ static void ip6_dst_ifdown(struct dst_entry *dst, struct net_device *dev,
3153 + }
3154 + }
3155 +
3156 ++static bool __rt6_check_expired(const struct rt6_info *rt)
3157 ++{
3158 ++ if (rt->rt6i_flags & RTF_EXPIRES)
3159 ++ return time_after(jiffies, rt->dst.expires);
3160 ++ else
3161 ++ return false;
3162 ++}
3163 ++
3164 + static bool rt6_check_expired(const struct rt6_info *rt)
3165 + {
3166 + if (rt->rt6i_flags & RTF_EXPIRES) {
3167 +@@ -538,7 +546,7 @@ static void rt6_probe_deferred(struct work_struct *w)
3168 + container_of(w, struct __rt6_probe_work, work);
3169 +
3170 + addrconf_addr_solict_mult(&work->target, &mcaddr);
3171 +- ndisc_send_ns(work->dev, NULL, &work->target, &mcaddr, NULL, NULL);
3172 ++ ndisc_send_ns(work->dev, NULL, &work->target, &mcaddr, NULL);
3173 + dev_put(work->dev);
3174 + kfree(work);
3175 + }
3176 +@@ -1270,7 +1278,8 @@ static struct dst_entry *rt6_check(struct rt6_info *rt, u32 cookie)
3177 +
3178 + static struct dst_entry *rt6_dst_from_check(struct rt6_info *rt, u32 cookie)
3179 + {
3180 +- if (rt->dst.obsolete == DST_OBSOLETE_FORCE_CHK &&
3181 ++ if (!__rt6_check_expired(rt) &&
3182 ++ rt->dst.obsolete == DST_OBSOLETE_FORCE_CHK &&
3183 + rt6_check((struct rt6_info *)(rt->dst.from), cookie))
3184 + return &rt->dst;
3185 + else
3186 +@@ -1290,7 +1299,8 @@ static struct dst_entry *ip6_dst_check(struct dst_entry *dst, u32 cookie)
3187 +
3188 + rt6_dst_from_metrics_check(rt);
3189 +
3190 +- if ((rt->rt6i_flags & RTF_PCPU) || unlikely(dst->flags & DST_NOCACHE))
3191 ++ if (rt->rt6i_flags & RTF_PCPU ||
3192 ++ (unlikely(dst->flags & DST_NOCACHE) && rt->dst.from))
3193 + return rt6_dst_from_check(rt, cookie);
3194 + else
3195 + return rt6_check(rt, cookie);
3196 +@@ -1340,6 +1350,12 @@ static void rt6_do_update_pmtu(struct rt6_info *rt, u32 mtu)
3197 + rt6_update_expires(rt, net->ipv6.sysctl.ip6_rt_mtu_expires);
3198 + }
3199 +
3200 ++static bool rt6_cache_allowed_for_pmtu(const struct rt6_info *rt)
3201 ++{
3202 ++ return !(rt->rt6i_flags & RTF_CACHE) &&
3203 ++ (rt->rt6i_flags & RTF_PCPU || rt->rt6i_node);
3204 ++}
3205 ++
3206 + static void __ip6_rt_update_pmtu(struct dst_entry *dst, const struct sock *sk,
3207 + const struct ipv6hdr *iph, u32 mtu)
3208 + {
3209 +@@ -1353,7 +1369,7 @@ static void __ip6_rt_update_pmtu(struct dst_entry *dst, const struct sock *sk,
3210 + if (mtu >= dst_mtu(dst))
3211 + return;
3212 +
3213 +- if (rt6->rt6i_flags & RTF_CACHE) {
3214 ++ if (!rt6_cache_allowed_for_pmtu(rt6)) {
3215 + rt6_do_update_pmtu(rt6, mtu);
3216 + } else {
3217 + const struct in6_addr *daddr, *saddr;
3218 +diff --git a/net/ipv6/syncookies.c b/net/ipv6/syncookies.c
3219 +index 0909f4e0d53c..f30bfdcdea54 100644
3220 +--- a/net/ipv6/syncookies.c
3221 ++++ b/net/ipv6/syncookies.c
3222 +@@ -225,7 +225,7 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb)
3223 + memset(&fl6, 0, sizeof(fl6));
3224 + fl6.flowi6_proto = IPPROTO_TCP;
3225 + fl6.daddr = ireq->ir_v6_rmt_addr;
3226 +- final_p = fl6_update_dst(&fl6, np->opt, &final);
3227 ++ final_p = fl6_update_dst(&fl6, rcu_dereference(np->opt), &final);
3228 + fl6.saddr = ireq->ir_v6_loc_addr;
3229 + fl6.flowi6_oif = sk->sk_bound_dev_if;
3230 + fl6.flowi6_mark = ireq->ir_mark;
3231 +diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
3232 +index 97d9314ea361..9e9b77bd2d0a 100644
3233 +--- a/net/ipv6/tcp_ipv6.c
3234 ++++ b/net/ipv6/tcp_ipv6.c
3235 +@@ -120,6 +120,7 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
3236 + struct ipv6_pinfo *np = inet6_sk(sk);
3237 + struct tcp_sock *tp = tcp_sk(sk);
3238 + struct in6_addr *saddr = NULL, *final_p, final;
3239 ++ struct ipv6_txoptions *opt;
3240 + struct flowi6 fl6;
3241 + struct dst_entry *dst;
3242 + int addr_type;
3243 +@@ -235,7 +236,8 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
3244 + fl6.fl6_dport = usin->sin6_port;
3245 + fl6.fl6_sport = inet->inet_sport;
3246 +
3247 +- final_p = fl6_update_dst(&fl6, np->opt, &final);
3248 ++ opt = rcu_dereference_protected(np->opt, sock_owned_by_user(sk));
3249 ++ final_p = fl6_update_dst(&fl6, opt, &final);
3250 +
3251 + security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
3252 +
3253 +@@ -263,9 +265,9 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
3254 + tcp_fetch_timewait_stamp(sk, dst);
3255 +
3256 + icsk->icsk_ext_hdr_len = 0;
3257 +- if (np->opt)
3258 +- icsk->icsk_ext_hdr_len = (np->opt->opt_flen +
3259 +- np->opt->opt_nflen);
3260 ++ if (opt)
3261 ++ icsk->icsk_ext_hdr_len = opt->opt_flen +
3262 ++ opt->opt_nflen;
3263 +
3264 + tp->rx_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr);
3265 +
3266 +@@ -461,7 +463,8 @@ static int tcp_v6_send_synack(struct sock *sk, struct dst_entry *dst,
3267 + fl6->flowlabel = ip6_flowlabel(ipv6_hdr(ireq->pktopts));
3268 +
3269 + skb_set_queue_mapping(skb, queue_mapping);
3270 +- err = ip6_xmit(sk, skb, fl6, np->opt, np->tclass);
3271 ++ err = ip6_xmit(sk, skb, fl6, rcu_dereference(np->opt),
3272 ++ np->tclass);
3273 + err = net_xmit_eval(err);
3274 + }
3275 +
3276 +@@ -991,6 +994,7 @@ static struct sock *tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
3277 + struct inet_request_sock *ireq;
3278 + struct ipv6_pinfo *newnp, *np = inet6_sk(sk);
3279 + struct tcp6_sock *newtcp6sk;
3280 ++ struct ipv6_txoptions *opt;
3281 + struct inet_sock *newinet;
3282 + struct tcp_sock *newtp;
3283 + struct sock *newsk;
3284 +@@ -1126,13 +1130,15 @@ static struct sock *tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
3285 + but we make one more one thing there: reattach optmem
3286 + to newsk.
3287 + */
3288 +- if (np->opt)
3289 +- newnp->opt = ipv6_dup_options(newsk, np->opt);
3290 +-
3291 ++ opt = rcu_dereference(np->opt);
3292 ++ if (opt) {
3293 ++ opt = ipv6_dup_options(newsk, opt);
3294 ++ RCU_INIT_POINTER(newnp->opt, opt);
3295 ++ }
3296 + inet_csk(newsk)->icsk_ext_hdr_len = 0;
3297 +- if (newnp->opt)
3298 +- inet_csk(newsk)->icsk_ext_hdr_len = (newnp->opt->opt_nflen +
3299 +- newnp->opt->opt_flen);
3300 ++ if (opt)
3301 ++ inet_csk(newsk)->icsk_ext_hdr_len = opt->opt_nflen +
3302 ++ opt->opt_flen;
3303 +
3304 + tcp_ca_openreq_child(newsk, dst);
3305 +
3306 +diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
3307 +index 0aba654f5b91..8379fc2f4b1d 100644
3308 +--- a/net/ipv6/udp.c
3309 ++++ b/net/ipv6/udp.c
3310 +@@ -1107,6 +1107,7 @@ int udpv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
3311 + DECLARE_SOCKADDR(struct sockaddr_in6 *, sin6, msg->msg_name);
3312 + struct in6_addr *daddr, *final_p, final;
3313 + struct ipv6_txoptions *opt = NULL;
3314 ++ struct ipv6_txoptions *opt_to_free = NULL;
3315 + struct ip6_flowlabel *flowlabel = NULL;
3316 + struct flowi6 fl6;
3317 + struct dst_entry *dst;
3318 +@@ -1260,8 +1261,10 @@ do_udp_sendmsg:
3319 + opt = NULL;
3320 + connected = 0;
3321 + }
3322 +- if (!opt)
3323 +- opt = np->opt;
3324 ++ if (!opt) {
3325 ++ opt = txopt_get(np);
3326 ++ opt_to_free = opt;
3327 ++ }
3328 + if (flowlabel)
3329 + opt = fl6_merge_options(&opt_space, flowlabel, opt);
3330 + opt = ipv6_fixup_options(&opt_space, opt);
3331 +@@ -1370,6 +1373,7 @@ release_dst:
3332 + out:
3333 + dst_release(dst);
3334 + fl6_sock_release(flowlabel);
3335 ++ txopt_put(opt_to_free);
3336 + if (!err)
3337 + return len;
3338 + /*
3339 +diff --git a/net/l2tp/l2tp_ip6.c b/net/l2tp/l2tp_ip6.c
3340 +index d1ded3777815..0ce9da948ad7 100644
3341 +--- a/net/l2tp/l2tp_ip6.c
3342 ++++ b/net/l2tp/l2tp_ip6.c
3343 +@@ -486,6 +486,7 @@ static int l2tp_ip6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
3344 + DECLARE_SOCKADDR(struct sockaddr_l2tpip6 *, lsa, msg->msg_name);
3345 + struct in6_addr *daddr, *final_p, final;
3346 + struct ipv6_pinfo *np = inet6_sk(sk);
3347 ++ struct ipv6_txoptions *opt_to_free = NULL;
3348 + struct ipv6_txoptions *opt = NULL;
3349 + struct ip6_flowlabel *flowlabel = NULL;
3350 + struct dst_entry *dst = NULL;
3351 +@@ -575,8 +576,10 @@ static int l2tp_ip6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
3352 + opt = NULL;
3353 + }
3354 +
3355 +- if (opt == NULL)
3356 +- opt = np->opt;
3357 ++ if (!opt) {
3358 ++ opt = txopt_get(np);
3359 ++ opt_to_free = opt;
3360 ++ }
3361 + if (flowlabel)
3362 + opt = fl6_merge_options(&opt_space, flowlabel, opt);
3363 + opt = ipv6_fixup_options(&opt_space, opt);
3364 +@@ -631,6 +634,7 @@ done:
3365 + dst_release(dst);
3366 + out:
3367 + fl6_sock_release(flowlabel);
3368 ++ txopt_put(opt_to_free);
3369 +
3370 + return err < 0 ? err : len;
3371 +
3372 +diff --git a/net/openvswitch/dp_notify.c b/net/openvswitch/dp_notify.c
3373 +index a7a80a6b77b0..653d073bae45 100644
3374 +--- a/net/openvswitch/dp_notify.c
3375 ++++ b/net/openvswitch/dp_notify.c
3376 +@@ -58,7 +58,7 @@ void ovs_dp_notify_wq(struct work_struct *work)
3377 + struct hlist_node *n;
3378 +
3379 + hlist_for_each_entry_safe(vport, n, &dp->ports[i], dp_hash_node) {
3380 +- if (vport->ops->type != OVS_VPORT_TYPE_NETDEV)
3381 ++ if (vport->ops->type == OVS_VPORT_TYPE_INTERNAL)
3382 + continue;
3383 +
3384 + if (!(vport->dev->priv_flags & IFF_OVS_DATAPATH))
3385 +diff --git a/net/openvswitch/vport-netdev.c b/net/openvswitch/vport-netdev.c
3386 +index f7e8dcce7ada..ac14c488669c 100644
3387 +--- a/net/openvswitch/vport-netdev.c
3388 ++++ b/net/openvswitch/vport-netdev.c
3389 +@@ -180,9 +180,13 @@ void ovs_netdev_tunnel_destroy(struct vport *vport)
3390 + if (vport->dev->priv_flags & IFF_OVS_DATAPATH)
3391 + ovs_netdev_detach_dev(vport);
3392 +
3393 +- /* Early release so we can unregister the device */
3394 ++ /* We can be invoked by both explicit vport deletion and
3395 ++ * underlying netdev deregistration; delete the link only
3396 ++ * if it's not already shutting down.
3397 ++ */
3398 ++ if (vport->dev->reg_state == NETREG_REGISTERED)
3399 ++ rtnl_delete_link(vport->dev);
3400 + dev_put(vport->dev);
3401 +- rtnl_delete_link(vport->dev);
3402 + vport->dev = NULL;
3403 + rtnl_unlock();
3404 +
3405 +diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
3406 +index 27b2898f275c..4695a36eeca3 100644
3407 +--- a/net/packet/af_packet.c
3408 ++++ b/net/packet/af_packet.c
3409 +@@ -1741,6 +1741,20 @@ static void fanout_release(struct sock *sk)
3410 + kfree_rcu(po->rollover, rcu);
3411 + }
3412 +
3413 ++static bool packet_extra_vlan_len_allowed(const struct net_device *dev,
3414 ++ struct sk_buff *skb)
3415 ++{
3416 ++ /* Earlier code assumed this would be a VLAN pkt, double-check
3417 ++ * this now that we have the actual packet in hand. We can only
3418 ++ * do this check on Ethernet devices.
3419 ++ */
3420 ++ if (unlikely(dev->type != ARPHRD_ETHER))
3421 ++ return false;
3422 ++
3423 ++ skb_reset_mac_header(skb);
3424 ++ return likely(eth_hdr(skb)->h_proto == htons(ETH_P_8021Q));
3425 ++}
3426 ++
3427 + static const struct proto_ops packet_ops;
3428 +
3429 + static const struct proto_ops packet_ops_spkt;
3430 +@@ -1902,18 +1916,10 @@ retry:
3431 + goto retry;
3432 + }
3433 +
3434 +- if (len > (dev->mtu + dev->hard_header_len + extra_len)) {
3435 +- /* Earlier code assumed this would be a VLAN pkt,
3436 +- * double-check this now that we have the actual
3437 +- * packet in hand.
3438 +- */
3439 +- struct ethhdr *ehdr;
3440 +- skb_reset_mac_header(skb);
3441 +- ehdr = eth_hdr(skb);
3442 +- if (ehdr->h_proto != htons(ETH_P_8021Q)) {
3443 +- err = -EMSGSIZE;
3444 +- goto out_unlock;
3445 +- }
3446 ++ if (len > (dev->mtu + dev->hard_header_len + extra_len) &&
3447 ++ !packet_extra_vlan_len_allowed(dev, skb)) {
3448 ++ err = -EMSGSIZE;
3449 ++ goto out_unlock;
3450 + }
3451 +
3452 + skb->protocol = proto;
3453 +@@ -2332,6 +2338,15 @@ static bool ll_header_truncated(const struct net_device *dev, int len)
3454 + return false;
3455 + }
3456 +
3457 ++static void tpacket_set_protocol(const struct net_device *dev,
3458 ++ struct sk_buff *skb)
3459 ++{
3460 ++ if (dev->type == ARPHRD_ETHER) {
3461 ++ skb_reset_mac_header(skb);
3462 ++ skb->protocol = eth_hdr(skb)->h_proto;
3463 ++ }
3464 ++}
3465 ++
3466 + static int tpacket_fill_skb(struct packet_sock *po, struct sk_buff *skb,
3467 + void *frame, struct net_device *dev, int size_max,
3468 + __be16 proto, unsigned char *addr, int hlen)
3469 +@@ -2368,8 +2383,6 @@ static int tpacket_fill_skb(struct packet_sock *po, struct sk_buff *skb,
3470 + skb_reserve(skb, hlen);
3471 + skb_reset_network_header(skb);
3472 +
3473 +- if (!packet_use_direct_xmit(po))
3474 +- skb_probe_transport_header(skb, 0);
3475 + if (unlikely(po->tp_tx_has_off)) {
3476 + int off_min, off_max, off;
3477 + off_min = po->tp_hdrlen - sizeof(struct sockaddr_ll);
3478 +@@ -2415,6 +2428,8 @@ static int tpacket_fill_skb(struct packet_sock *po, struct sk_buff *skb,
3479 + dev->hard_header_len);
3480 + if (unlikely(err))
3481 + return err;
3482 ++ if (!skb->protocol)
3483 ++ tpacket_set_protocol(dev, skb);
3484 +
3485 + data += dev->hard_header_len;
3486 + to_write -= dev->hard_header_len;
3487 +@@ -2449,6 +2464,8 @@ static int tpacket_fill_skb(struct packet_sock *po, struct sk_buff *skb,
3488 + len = ((to_write > len_max) ? len_max : to_write);
3489 + }
3490 +
3491 ++ skb_probe_transport_header(skb, 0);
3492 ++
3493 + return tp_len;
3494 + }
3495 +
3496 +@@ -2493,12 +2510,13 @@ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
3497 + if (unlikely(!(dev->flags & IFF_UP)))
3498 + goto out_put;
3499 +
3500 +- reserve = dev->hard_header_len + VLAN_HLEN;
3501 ++ if (po->sk.sk_socket->type == SOCK_RAW)
3502 ++ reserve = dev->hard_header_len;
3503 + size_max = po->tx_ring.frame_size
3504 + - (po->tp_hdrlen - sizeof(struct sockaddr_ll));
3505 +
3506 +- if (size_max > dev->mtu + reserve)
3507 +- size_max = dev->mtu + reserve;
3508 ++ if (size_max > dev->mtu + reserve + VLAN_HLEN)
3509 ++ size_max = dev->mtu + reserve + VLAN_HLEN;
3510 +
3511 + do {
3512 + ph = packet_current_frame(po, &po->tx_ring,
3513 +@@ -2525,18 +2543,10 @@ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
3514 + tp_len = tpacket_fill_skb(po, skb, ph, dev, size_max, proto,
3515 + addr, hlen);
3516 + if (likely(tp_len >= 0) &&
3517 +- tp_len > dev->mtu + dev->hard_header_len) {
3518 +- struct ethhdr *ehdr;
3519 +- /* Earlier code assumed this would be a VLAN pkt,
3520 +- * double-check this now that we have the actual
3521 +- * packet in hand.
3522 +- */
3523 ++ tp_len > dev->mtu + reserve &&
3524 ++ !packet_extra_vlan_len_allowed(dev, skb))
3525 ++ tp_len = -EMSGSIZE;
3526 +
3527 +- skb_reset_mac_header(skb);
3528 +- ehdr = eth_hdr(skb);
3529 +- if (ehdr->h_proto != htons(ETH_P_8021Q))
3530 +- tp_len = -EMSGSIZE;
3531 +- }
3532 + if (unlikely(tp_len < 0)) {
3533 + if (po->tp_loss) {
3534 + __packet_set_status(po, ph,
3535 +@@ -2757,18 +2767,10 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
3536 +
3537 + sock_tx_timestamp(sk, &skb_shinfo(skb)->tx_flags);
3538 +
3539 +- if (!gso_type && (len > dev->mtu + reserve + extra_len)) {
3540 +- /* Earlier code assumed this would be a VLAN pkt,
3541 +- * double-check this now that we have the actual
3542 +- * packet in hand.
3543 +- */
3544 +- struct ethhdr *ehdr;
3545 +- skb_reset_mac_header(skb);
3546 +- ehdr = eth_hdr(skb);
3547 +- if (ehdr->h_proto != htons(ETH_P_8021Q)) {
3548 +- err = -EMSGSIZE;
3549 +- goto out_free;
3550 +- }
3551 ++ if (!gso_type && (len > dev->mtu + reserve + extra_len) &&
3552 ++ !packet_extra_vlan_len_allowed(dev, skb)) {
3553 ++ err = -EMSGSIZE;
3554 ++ goto out_free;
3555 + }
3556 +
3557 + skb->protocol = proto;
3558 +@@ -2799,8 +2801,8 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
3559 + len += vnet_hdr_len;
3560 + }
3561 +
3562 +- if (!packet_use_direct_xmit(po))
3563 +- skb_probe_transport_header(skb, reserve);
3564 ++ skb_probe_transport_header(skb, reserve);
3565 ++
3566 + if (unlikely(extra_len == 4))
3567 + skb->no_fcs = 1;
3568 +
3569 +diff --git a/net/rds/connection.c b/net/rds/connection.c
3570 +index 49adeef8090c..9b2de5e67d79 100644
3571 +--- a/net/rds/connection.c
3572 ++++ b/net/rds/connection.c
3573 +@@ -190,12 +190,6 @@ new_conn:
3574 + }
3575 + }
3576 +
3577 +- if (trans == NULL) {
3578 +- kmem_cache_free(rds_conn_slab, conn);
3579 +- conn = ERR_PTR(-ENODEV);
3580 +- goto out;
3581 +- }
3582 +-
3583 + conn->c_trans = trans;
3584 +
3585 + ret = trans->conn_alloc(conn, gfp);
3586 +diff --git a/net/rds/send.c b/net/rds/send.c
3587 +index 4df61a515b83..859de6f32521 100644
3588 +--- a/net/rds/send.c
3589 ++++ b/net/rds/send.c
3590 +@@ -1009,11 +1009,13 @@ int rds_sendmsg(struct socket *sock, struct msghdr *msg, size_t payload_len)
3591 + release_sock(sk);
3592 + }
3593 +
3594 +- /* racing with another thread binding seems ok here */
3595 ++ lock_sock(sk);
3596 + if (daddr == 0 || rs->rs_bound_addr == 0) {
3597 ++ release_sock(sk);
3598 + ret = -ENOTCONN; /* XXX not a great errno */
3599 + goto out;
3600 + }
3601 ++ release_sock(sk);
3602 +
3603 + if (payload_len > rds_sk_sndbuf(rs)) {
3604 + ret = -EMSGSIZE;
3605 +diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
3606 +index f43c8f33f09e..7ec667dd4ce1 100644
3607 +--- a/net/sched/sch_api.c
3608 ++++ b/net/sched/sch_api.c
3609 +@@ -253,7 +253,8 @@ int qdisc_set_default(const char *name)
3610 + }
3611 +
3612 + /* We know handle. Find qdisc among all qdisc's attached to device
3613 +- (root qdisc, all its children, children of children etc.)
3614 ++ * (root qdisc, all its children, children of children etc.)
3615 ++ * Note: caller either uses rtnl or rcu_read_lock()
3616 + */
3617 +
3618 + static struct Qdisc *qdisc_match_from_root(struct Qdisc *root, u32 handle)
3619 +@@ -264,7 +265,7 @@ static struct Qdisc *qdisc_match_from_root(struct Qdisc *root, u32 handle)
3620 + root->handle == handle)
3621 + return root;
3622 +
3623 +- list_for_each_entry(q, &root->list, list) {
3624 ++ list_for_each_entry_rcu(q, &root->list, list) {
3625 + if (q->handle == handle)
3626 + return q;
3627 + }
3628 +@@ -277,15 +278,18 @@ void qdisc_list_add(struct Qdisc *q)
3629 + struct Qdisc *root = qdisc_dev(q)->qdisc;
3630 +
3631 + WARN_ON_ONCE(root == &noop_qdisc);
3632 +- list_add_tail(&q->list, &root->list);
3633 ++ ASSERT_RTNL();
3634 ++ list_add_tail_rcu(&q->list, &root->list);
3635 + }
3636 + }
3637 + EXPORT_SYMBOL(qdisc_list_add);
3638 +
3639 + void qdisc_list_del(struct Qdisc *q)
3640 + {
3641 +- if ((q->parent != TC_H_ROOT) && !(q->flags & TCQ_F_INGRESS))
3642 +- list_del(&q->list);
3643 ++ if ((q->parent != TC_H_ROOT) && !(q->flags & TCQ_F_INGRESS)) {
3644 ++ ASSERT_RTNL();
3645 ++ list_del_rcu(&q->list);
3646 ++ }
3647 + }
3648 + EXPORT_SYMBOL(qdisc_list_del);
3649 +
3650 +@@ -750,14 +754,18 @@ void qdisc_tree_decrease_qlen(struct Qdisc *sch, unsigned int n)
3651 + if (n == 0)
3652 + return;
3653 + drops = max_t(int, n, 0);
3654 ++ rcu_read_lock();
3655 + while ((parentid = sch->parent)) {
3656 + if (TC_H_MAJ(parentid) == TC_H_MAJ(TC_H_INGRESS))
3657 +- return;
3658 ++ break;
3659 +
3660 ++ if (sch->flags & TCQ_F_NOPARENT)
3661 ++ break;
3662 ++ /* TODO: perform the search on a per txq basis */
3663 + sch = qdisc_lookup(qdisc_dev(sch), TC_H_MAJ(parentid));
3664 + if (sch == NULL) {
3665 +- WARN_ON(parentid != TC_H_ROOT);
3666 +- return;
3667 ++ WARN_ON_ONCE(parentid != TC_H_ROOT);
3668 ++ break;
3669 + }
3670 + cops = sch->ops->cl_ops;
3671 + if (cops->qlen_notify) {
3672 +@@ -768,6 +776,7 @@ void qdisc_tree_decrease_qlen(struct Qdisc *sch, unsigned int n)
3673 + sch->q.qlen -= n;
3674 + __qdisc_qstats_drop(sch, drops);
3675 + }
3676 ++ rcu_read_unlock();
3677 + }
3678 + EXPORT_SYMBOL(qdisc_tree_decrease_qlen);
3679 +
3680 +@@ -941,7 +950,7 @@ qdisc_create(struct net_device *dev, struct netdev_queue *dev_queue,
3681 + }
3682 + lockdep_set_class(qdisc_lock(sch), &qdisc_tx_lock);
3683 + if (!netif_is_multiqueue(dev))
3684 +- sch->flags |= TCQ_F_ONETXQUEUE;
3685 ++ sch->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT;
3686 + }
3687 +
3688 + sch->handle = handle;
3689 +diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
3690 +index cb5d4ad32946..e82a1ad80aa5 100644
3691 +--- a/net/sched/sch_generic.c
3692 ++++ b/net/sched/sch_generic.c
3693 +@@ -737,7 +737,7 @@ static void attach_one_default_qdisc(struct net_device *dev,
3694 + return;
3695 + }
3696 + if (!netif_is_multiqueue(dev))
3697 +- qdisc->flags |= TCQ_F_ONETXQUEUE;
3698 ++ qdisc->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT;
3699 + dev_queue->qdisc_sleeping = qdisc;
3700 + }
3701 +
3702 +diff --git a/net/sched/sch_mq.c b/net/sched/sch_mq.c
3703 +index f3cbaecd283a..3e82f047caaf 100644
3704 +--- a/net/sched/sch_mq.c
3705 ++++ b/net/sched/sch_mq.c
3706 +@@ -63,7 +63,7 @@ static int mq_init(struct Qdisc *sch, struct nlattr *opt)
3707 + if (qdisc == NULL)
3708 + goto err;
3709 + priv->qdiscs[ntx] = qdisc;
3710 +- qdisc->flags |= TCQ_F_ONETXQUEUE;
3711 ++ qdisc->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT;
3712 + }
3713 +
3714 + sch->flags |= TCQ_F_MQROOT;
3715 +@@ -156,7 +156,7 @@ static int mq_graft(struct Qdisc *sch, unsigned long cl, struct Qdisc *new,
3716 +
3717 + *old = dev_graft_qdisc(dev_queue, new);
3718 + if (new)
3719 +- new->flags |= TCQ_F_ONETXQUEUE;
3720 ++ new->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT;
3721 + if (dev->flags & IFF_UP)
3722 + dev_activate(dev);
3723 + return 0;
3724 +diff --git a/net/sched/sch_mqprio.c b/net/sched/sch_mqprio.c
3725 +index 3811a745452c..ad70ecf57ce7 100644
3726 +--- a/net/sched/sch_mqprio.c
3727 ++++ b/net/sched/sch_mqprio.c
3728 +@@ -132,7 +132,7 @@ static int mqprio_init(struct Qdisc *sch, struct nlattr *opt)
3729 + goto err;
3730 + }
3731 + priv->qdiscs[i] = qdisc;
3732 +- qdisc->flags |= TCQ_F_ONETXQUEUE;
3733 ++ qdisc->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT;
3734 + }
3735 +
3736 + /* If the mqprio options indicate that hardware should own
3737 +@@ -209,7 +209,7 @@ static int mqprio_graft(struct Qdisc *sch, unsigned long cl, struct Qdisc *new,
3738 + *old = dev_graft_qdisc(dev_queue, new);
3739 +
3740 + if (new)
3741 +- new->flags |= TCQ_F_ONETXQUEUE;
3742 ++ new->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT;
3743 +
3744 + if (dev->flags & IFF_UP)
3745 + dev_activate(dev);
3746 +diff --git a/net/sctp/auth.c b/net/sctp/auth.c
3747 +index 4f15b7d730e1..1543e39f47c3 100644
3748 +--- a/net/sctp/auth.c
3749 ++++ b/net/sctp/auth.c
3750 +@@ -809,8 +809,8 @@ int sctp_auth_ep_set_hmacs(struct sctp_endpoint *ep,
3751 + if (!has_sha1)
3752 + return -EINVAL;
3753 +
3754 +- memcpy(ep->auth_hmacs_list->hmac_ids, &hmacs->shmac_idents[0],
3755 +- hmacs->shmac_num_idents * sizeof(__u16));
3756 ++ for (i = 0; i < hmacs->shmac_num_idents; i++)
3757 ++ ep->auth_hmacs_list->hmac_ids[i] = htons(hmacs->shmac_idents[i]);
3758 + ep->auth_hmacs_list->param_hdr.length = htons(sizeof(sctp_paramhdr_t) +
3759 + hmacs->shmac_num_idents * sizeof(__u16));
3760 + return 0;
3761 +diff --git a/net/sctp/socket.c b/net/sctp/socket.c
3762 +index 17bef01b9aa3..3ec88be0faec 100644
3763 +--- a/net/sctp/socket.c
3764 ++++ b/net/sctp/socket.c
3765 +@@ -7375,6 +7375,13 @@ struct proto sctp_prot = {
3766 +
3767 + #if IS_ENABLED(CONFIG_IPV6)
3768 +
3769 ++#include <net/transp_v6.h>
3770 ++static void sctp_v6_destroy_sock(struct sock *sk)
3771 ++{
3772 ++ sctp_destroy_sock(sk);
3773 ++ inet6_destroy_sock(sk);
3774 ++}
3775 ++
3776 + struct proto sctpv6_prot = {
3777 + .name = "SCTPv6",
3778 + .owner = THIS_MODULE,
3779 +@@ -7384,7 +7391,7 @@ struct proto sctpv6_prot = {
3780 + .accept = sctp_accept,
3781 + .ioctl = sctp_ioctl,
3782 + .init = sctp_init_sock,
3783 +- .destroy = sctp_destroy_sock,
3784 ++ .destroy = sctp_v6_destroy_sock,
3785 + .shutdown = sctp_shutdown,
3786 + .setsockopt = sctp_setsockopt,
3787 + .getsockopt = sctp_getsockopt,
3788 +diff --git a/net/tipc/udp_media.c b/net/tipc/udp_media.c
3789 +index cd7c5f131e72..86f2e7c44694 100644
3790 +--- a/net/tipc/udp_media.c
3791 ++++ b/net/tipc/udp_media.c
3792 +@@ -159,8 +159,11 @@ static int tipc_udp_send_msg(struct net *net, struct sk_buff *skb,
3793 + struct sk_buff *clone;
3794 + struct rtable *rt;
3795 +
3796 +- if (skb_headroom(skb) < UDP_MIN_HEADROOM)
3797 +- pskb_expand_head(skb, UDP_MIN_HEADROOM, 0, GFP_ATOMIC);
3798 ++ if (skb_headroom(skb) < UDP_MIN_HEADROOM) {
3799 ++ err = pskb_expand_head(skb, UDP_MIN_HEADROOM, 0, GFP_ATOMIC);
3800 ++ if (err)
3801 ++ goto tx_error;
3802 ++ }
3803 +
3804 + clone = skb_clone(skb, GFP_ATOMIC);
3805 + skb_set_inner_protocol(clone, htons(ETH_P_TIPC));
3806 +diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
3807 +index 94f658235fb4..128b0982c96b 100644
3808 +--- a/net/unix/af_unix.c
3809 ++++ b/net/unix/af_unix.c
3810 +@@ -326,6 +326,118 @@ found:
3811 + return s;
3812 + }
3813 +
3814 ++/* Support code for asymmetrically connected dgram sockets
3815 ++ *
3816 ++ * If a datagram socket is connected to a socket not itself connected
3817 ++ * to the first socket (eg, /dev/log), clients may only enqueue more
3818 ++ * messages if the present receive queue of the server socket is not
3819 ++ * "too large". This means there's a second writeability condition
3820 ++ * poll and sendmsg need to test. The dgram recv code will do a wake
3821 ++ * up on the peer_wait wait queue of a socket upon reception of a
3822 ++ * datagram which needs to be propagated to sleeping would-be writers
3823 ++ * since these might not have sent anything so far. This can't be
3824 ++ * accomplished via poll_wait because the lifetime of the server
3825 ++ * socket might be less than that of its clients if these break their
3826 ++ * association with it or if the server socket is closed while clients
3827 ++ * are still connected to it and there's no way to inform "a polling
3828 ++ * implementation" that it should let go of a certain wait queue
3829 ++ *
3830 ++ * In order to propagate a wake up, a wait_queue_t of the client
3831 ++ * socket is enqueued on the peer_wait queue of the server socket
3832 ++ * whose wake function does a wake_up on the ordinary client socket
3833 ++ * wait queue. This connection is established whenever a write (or
3834 ++ * poll for write) hit the flow control condition and broken when the
3835 ++ * association to the server socket is dissolved or after a wake up
3836 ++ * was relayed.
3837 ++ */
3838 ++
3839 ++static int unix_dgram_peer_wake_relay(wait_queue_t *q, unsigned mode, int flags,
3840 ++ void *key)
3841 ++{
3842 ++ struct unix_sock *u;
3843 ++ wait_queue_head_t *u_sleep;
3844 ++
3845 ++ u = container_of(q, struct unix_sock, peer_wake);
3846 ++
3847 ++ __remove_wait_queue(&unix_sk(u->peer_wake.private)->peer_wait,
3848 ++ q);
3849 ++ u->peer_wake.private = NULL;
3850 ++
3851 ++ /* relaying can only happen while the wq still exists */
3852 ++ u_sleep = sk_sleep(&u->sk);
3853 ++ if (u_sleep)
3854 ++ wake_up_interruptible_poll(u_sleep, key);
3855 ++
3856 ++ return 0;
3857 ++}
3858 ++
3859 ++static int unix_dgram_peer_wake_connect(struct sock *sk, struct sock *other)
3860 ++{
3861 ++ struct unix_sock *u, *u_other;
3862 ++ int rc;
3863 ++
3864 ++ u = unix_sk(sk);
3865 ++ u_other = unix_sk(other);
3866 ++ rc = 0;
3867 ++ spin_lock(&u_other->peer_wait.lock);
3868 ++
3869 ++ if (!u->peer_wake.private) {
3870 ++ u->peer_wake.private = other;
3871 ++ __add_wait_queue(&u_other->peer_wait, &u->peer_wake);
3872 ++
3873 ++ rc = 1;
3874 ++ }
3875 ++
3876 ++ spin_unlock(&u_other->peer_wait.lock);
3877 ++ return rc;
3878 ++}
3879 ++
3880 ++static void unix_dgram_peer_wake_disconnect(struct sock *sk,
3881 ++ struct sock *other)
3882 ++{
3883 ++ struct unix_sock *u, *u_other;
3884 ++
3885 ++ u = unix_sk(sk);
3886 ++ u_other = unix_sk(other);
3887 ++ spin_lock(&u_other->peer_wait.lock);
3888 ++
3889 ++ if (u->peer_wake.private == other) {
3890 ++ __remove_wait_queue(&u_other->peer_wait, &u->peer_wake);
3891 ++ u->peer_wake.private = NULL;
3892 ++ }
3893 ++
3894 ++ spin_unlock(&u_other->peer_wait.lock);
3895 ++}
3896 ++
3897 ++static void unix_dgram_peer_wake_disconnect_wakeup(struct sock *sk,
3898 ++ struct sock *other)
3899 ++{
3900 ++ unix_dgram_peer_wake_disconnect(sk, other);
3901 ++ wake_up_interruptible_poll(sk_sleep(sk),
3902 ++ POLLOUT |
3903 ++ POLLWRNORM |
3904 ++ POLLWRBAND);
3905 ++}
3906 ++
3907 ++/* preconditions:
3908 ++ * - unix_peer(sk) == other
3909 ++ * - association is stable
3910 ++ */
3911 ++static int unix_dgram_peer_wake_me(struct sock *sk, struct sock *other)
3912 ++{
3913 ++ int connected;
3914 ++
3915 ++ connected = unix_dgram_peer_wake_connect(sk, other);
3916 ++
3917 ++ if (unix_recvq_full(other))
3918 ++ return 1;
3919 ++
3920 ++ if (connected)
3921 ++ unix_dgram_peer_wake_disconnect(sk, other);
3922 ++
3923 ++ return 0;
3924 ++}
3925 ++
3926 + static inline int unix_writable(struct sock *sk)
3927 + {
3928 + return (atomic_read(&sk->sk_wmem_alloc) << 2) <= sk->sk_sndbuf;
3929 +@@ -430,6 +542,8 @@ static void unix_release_sock(struct sock *sk, int embrion)
3930 + skpair->sk_state_change(skpair);
3931 + sk_wake_async(skpair, SOCK_WAKE_WAITD, POLL_HUP);
3932 + }
3933 ++
3934 ++ unix_dgram_peer_wake_disconnect(sk, skpair);
3935 + sock_put(skpair); /* It may now die */
3936 + unix_peer(sk) = NULL;
3937 + }
3938 +@@ -440,6 +554,7 @@ static void unix_release_sock(struct sock *sk, int embrion)
3939 + if (state == TCP_LISTEN)
3940 + unix_release_sock(skb->sk, 1);
3941 + /* passed fds are erased in the kfree_skb hook */
3942 ++ UNIXCB(skb).consumed = skb->len;
3943 + kfree_skb(skb);
3944 + }
3945 +
3946 +@@ -664,6 +779,7 @@ static struct sock *unix_create1(struct net *net, struct socket *sock, int kern)
3947 + INIT_LIST_HEAD(&u->link);
3948 + mutex_init(&u->readlock); /* single task reading lock */
3949 + init_waitqueue_head(&u->peer_wait);
3950 ++ init_waitqueue_func_entry(&u->peer_wake, unix_dgram_peer_wake_relay);
3951 + unix_insert_socket(unix_sockets_unbound(sk), sk);
3952 + out:
3953 + if (sk == NULL)
3954 +@@ -1031,6 +1147,8 @@ restart:
3955 + if (unix_peer(sk)) {
3956 + struct sock *old_peer = unix_peer(sk);
3957 + unix_peer(sk) = other;
3958 ++ unix_dgram_peer_wake_disconnect_wakeup(sk, old_peer);
3959 ++
3960 + unix_state_double_unlock(sk, other);
3961 +
3962 + if (other != old_peer)
3963 +@@ -1432,6 +1550,14 @@ static int unix_scm_to_skb(struct scm_cookie *scm, struct sk_buff *skb, bool sen
3964 + return err;
3965 + }
3966 +
3967 ++static bool unix_passcred_enabled(const struct socket *sock,
3968 ++ const struct sock *other)
3969 ++{
3970 ++ return test_bit(SOCK_PASSCRED, &sock->flags) ||
3971 ++ !other->sk_socket ||
3972 ++ test_bit(SOCK_PASSCRED, &other->sk_socket->flags);
3973 ++}
3974 ++
3975 + /*
3976 + * Some apps rely on write() giving SCM_CREDENTIALS
3977 + * We include credentials if source or destination socket
3978 +@@ -1442,14 +1568,41 @@ static void maybe_add_creds(struct sk_buff *skb, const struct socket *sock,
3979 + {
3980 + if (UNIXCB(skb).pid)
3981 + return;
3982 +- if (test_bit(SOCK_PASSCRED, &sock->flags) ||
3983 +- !other->sk_socket ||
3984 +- test_bit(SOCK_PASSCRED, &other->sk_socket->flags)) {
3985 ++ if (unix_passcred_enabled(sock, other)) {
3986 + UNIXCB(skb).pid = get_pid(task_tgid(current));
3987 + current_uid_gid(&UNIXCB(skb).uid, &UNIXCB(skb).gid);
3988 + }
3989 + }
3990 +
3991 ++static int maybe_init_creds(struct scm_cookie *scm,
3992 ++ struct socket *socket,
3993 ++ const struct sock *other)
3994 ++{
3995 ++ int err;
3996 ++ struct msghdr msg = { .msg_controllen = 0 };
3997 ++
3998 ++ err = scm_send(socket, &msg, scm, false);
3999 ++ if (err)
4000 ++ return err;
4001 ++
4002 ++ if (unix_passcred_enabled(socket, other)) {
4003 ++ scm->pid = get_pid(task_tgid(current));
4004 ++ current_uid_gid(&scm->creds.uid, &scm->creds.gid);
4005 ++ }
4006 ++ return err;
4007 ++}
4008 ++
4009 ++static bool unix_skb_scm_eq(struct sk_buff *skb,
4010 ++ struct scm_cookie *scm)
4011 ++{
4012 ++ const struct unix_skb_parms *u = &UNIXCB(skb);
4013 ++
4014 ++ return u->pid == scm->pid &&
4015 ++ uid_eq(u->uid, scm->creds.uid) &&
4016 ++ gid_eq(u->gid, scm->creds.gid) &&
4017 ++ unix_secdata_eq(scm, skb);
4018 ++}
4019 ++
4020 + /*
4021 + * Send AF_UNIX data.
4022 + */
4023 +@@ -1470,6 +1623,7 @@ static int unix_dgram_sendmsg(struct socket *sock, struct msghdr *msg,
4024 + struct scm_cookie scm;
4025 + int max_level;
4026 + int data_len = 0;
4027 ++ int sk_locked;
4028 +
4029 + wait_for_unix_gc();
4030 + err = scm_send(sock, msg, &scm, false);
4031 +@@ -1548,12 +1702,14 @@ restart:
4032 + goto out_free;
4033 + }
4034 +
4035 ++ sk_locked = 0;
4036 + unix_state_lock(other);
4037 ++restart_locked:
4038 + err = -EPERM;
4039 + if (!unix_may_send(sk, other))
4040 + goto out_unlock;
4041 +
4042 +- if (sock_flag(other, SOCK_DEAD)) {
4043 ++ if (unlikely(sock_flag(other, SOCK_DEAD))) {
4044 + /*
4045 + * Check with 1003.1g - what should
4046 + * datagram error
4047 +@@ -1561,10 +1717,14 @@ restart:
4048 + unix_state_unlock(other);
4049 + sock_put(other);
4050 +
4051 ++ if (!sk_locked)
4052 ++ unix_state_lock(sk);
4053 ++
4054 + err = 0;
4055 +- unix_state_lock(sk);
4056 + if (unix_peer(sk) == other) {
4057 + unix_peer(sk) = NULL;
4058 ++ unix_dgram_peer_wake_disconnect_wakeup(sk, other);
4059 ++
4060 + unix_state_unlock(sk);
4061 +
4062 + unix_dgram_disconnected(sk, other);
4063 +@@ -1590,21 +1750,38 @@ restart:
4064 + goto out_unlock;
4065 + }
4066 +
4067 +- if (unix_peer(other) != sk && unix_recvq_full(other)) {
4068 +- if (!timeo) {
4069 +- err = -EAGAIN;
4070 +- goto out_unlock;
4071 ++ if (unlikely(unix_peer(other) != sk && unix_recvq_full(other))) {
4072 ++ if (timeo) {
4073 ++ timeo = unix_wait_for_peer(other, timeo);
4074 ++
4075 ++ err = sock_intr_errno(timeo);
4076 ++ if (signal_pending(current))
4077 ++ goto out_free;
4078 ++
4079 ++ goto restart;
4080 + }
4081 +
4082 +- timeo = unix_wait_for_peer(other, timeo);
4083 ++ if (!sk_locked) {
4084 ++ unix_state_unlock(other);
4085 ++ unix_state_double_lock(sk, other);
4086 ++ }
4087 +
4088 +- err = sock_intr_errno(timeo);
4089 +- if (signal_pending(current))
4090 +- goto out_free;
4091 ++ if (unix_peer(sk) != other ||
4092 ++ unix_dgram_peer_wake_me(sk, other)) {
4093 ++ err = -EAGAIN;
4094 ++ sk_locked = 1;
4095 ++ goto out_unlock;
4096 ++ }
4097 +
4098 +- goto restart;
4099 ++ if (!sk_locked) {
4100 ++ sk_locked = 1;
4101 ++ goto restart_locked;
4102 ++ }
4103 + }
4104 +
4105 ++ if (unlikely(sk_locked))
4106 ++ unix_state_unlock(sk);
4107 ++
4108 + if (sock_flag(other, SOCK_RCVTSTAMP))
4109 + __net_timestamp(skb);
4110 + maybe_add_creds(skb, sock, other);
4111 +@@ -1618,6 +1795,8 @@ restart:
4112 + return len;
4113 +
4114 + out_unlock:
4115 ++ if (sk_locked)
4116 ++ unix_state_unlock(sk);
4117 + unix_state_unlock(other);
4118 + out_free:
4119 + kfree_skb(skb);
4120 +@@ -1739,8 +1918,10 @@ out_err:
4121 + static ssize_t unix_stream_sendpage(struct socket *socket, struct page *page,
4122 + int offset, size_t size, int flags)
4123 + {
4124 +- int err = 0;
4125 +- bool send_sigpipe = true;
4126 ++ int err;
4127 ++ bool send_sigpipe = false;
4128 ++ bool init_scm = true;
4129 ++ struct scm_cookie scm;
4130 + struct sock *other, *sk = socket->sk;
4131 + struct sk_buff *skb, *newskb = NULL, *tail = NULL;
4132 +
4133 +@@ -1758,7 +1939,7 @@ alloc_skb:
4134 + newskb = sock_alloc_send_pskb(sk, 0, 0, flags & MSG_DONTWAIT,
4135 + &err, 0);
4136 + if (!newskb)
4137 +- return err;
4138 ++ goto err;
4139 + }
4140 +
4141 + /* we must acquire readlock as we modify already present
4142 +@@ -1767,12 +1948,12 @@ alloc_skb:
4143 + err = mutex_lock_interruptible(&unix_sk(other)->readlock);
4144 + if (err) {
4145 + err = flags & MSG_DONTWAIT ? -EAGAIN : -ERESTARTSYS;
4146 +- send_sigpipe = false;
4147 + goto err;
4148 + }
4149 +
4150 + if (sk->sk_shutdown & SEND_SHUTDOWN) {
4151 + err = -EPIPE;
4152 ++ send_sigpipe = true;
4153 + goto err_unlock;
4154 + }
4155 +
4156 +@@ -1781,23 +1962,34 @@ alloc_skb:
4157 + if (sock_flag(other, SOCK_DEAD) ||
4158 + other->sk_shutdown & RCV_SHUTDOWN) {
4159 + err = -EPIPE;
4160 ++ send_sigpipe = true;
4161 + goto err_state_unlock;
4162 + }
4163 +
4164 ++ if (init_scm) {
4165 ++ err = maybe_init_creds(&scm, socket, other);
4166 ++ if (err)
4167 ++ goto err_state_unlock;
4168 ++ init_scm = false;
4169 ++ }
4170 ++
4171 + skb = skb_peek_tail(&other->sk_receive_queue);
4172 + if (tail && tail == skb) {
4173 + skb = newskb;
4174 +- } else if (!skb) {
4175 +- if (newskb)
4176 ++ } else if (!skb || !unix_skb_scm_eq(skb, &scm)) {
4177 ++ if (newskb) {
4178 + skb = newskb;
4179 +- else
4180 ++ } else {
4181 ++ tail = skb;
4182 + goto alloc_skb;
4183 ++ }
4184 + } else if (newskb) {
4185 + /* this is fast path, we don't necessarily need to
4186 + * call to kfree_skb even though with newskb == NULL
4187 + * this - does no harm
4188 + */
4189 + consume_skb(newskb);
4190 ++ newskb = NULL;
4191 + }
4192 +
4193 + if (skb_append_pagefrags(skb, page, offset, size)) {
4194 +@@ -1810,14 +2002,20 @@ alloc_skb:
4195 + skb->truesize += size;
4196 + atomic_add(size, &sk->sk_wmem_alloc);
4197 +
4198 +- if (newskb)
4199 ++ if (newskb) {
4200 ++ err = unix_scm_to_skb(&scm, skb, false);
4201 ++ if (err)
4202 ++ goto err_state_unlock;
4203 ++ spin_lock(&other->sk_receive_queue.lock);
4204 + __skb_queue_tail(&other->sk_receive_queue, newskb);
4205 ++ spin_unlock(&other->sk_receive_queue.lock);
4206 ++ }
4207 +
4208 + unix_state_unlock(other);
4209 + mutex_unlock(&unix_sk(other)->readlock);
4210 +
4211 + other->sk_data_ready(other);
4212 +-
4213 ++ scm_destroy(&scm);
4214 + return size;
4215 +
4216 + err_state_unlock:
4217 +@@ -1828,6 +2026,8 @@ err:
4218 + kfree_skb(newskb);
4219 + if (send_sigpipe && !(flags & MSG_NOSIGNAL))
4220 + send_sig(SIGPIPE, current, 0);
4221 ++ if (!init_scm)
4222 ++ scm_destroy(&scm);
4223 + return err;
4224 + }
4225 +
4226 +@@ -2071,6 +2271,7 @@ static int unix_stream_read_generic(struct unix_stream_read_state *state)
4227 +
4228 + do {
4229 + int chunk;
4230 ++ bool drop_skb;
4231 + struct sk_buff *skb, *last;
4232 +
4233 + unix_state_lock(sk);
4234 +@@ -2130,10 +2331,7 @@ unlock:
4235 +
4236 + if (check_creds) {
4237 + /* Never glue messages from different writers */
4238 +- if ((UNIXCB(skb).pid != scm.pid) ||
4239 +- !uid_eq(UNIXCB(skb).uid, scm.creds.uid) ||
4240 +- !gid_eq(UNIXCB(skb).gid, scm.creds.gid) ||
4241 +- !unix_secdata_eq(&scm, skb))
4242 ++ if (!unix_skb_scm_eq(skb, &scm))
4243 + break;
4244 + } else if (test_bit(SOCK_PASSCRED, &sock->flags)) {
4245 + /* Copy credentials */
4246 +@@ -2151,7 +2349,11 @@ unlock:
4247 + }
4248 +
4249 + chunk = min_t(unsigned int, unix_skb_len(skb) - skip, size);
4250 ++ skb_get(skb);
4251 + chunk = state->recv_actor(skb, skip, chunk, state);
4252 ++ drop_skb = !unix_skb_len(skb);
4253 ++ /* skb is only safe to use if !drop_skb */
4254 ++ consume_skb(skb);
4255 + if (chunk < 0) {
4256 + if (copied == 0)
4257 + copied = -EFAULT;
4258 +@@ -2160,6 +2362,18 @@ unlock:
4259 + copied += chunk;
4260 + size -= chunk;
4261 +
4262 ++ if (drop_skb) {
4263 ++ /* the skb was touched by a concurrent reader;
4264 ++ * we should not expect anything from this skb
4265 ++ * anymore and assume it invalid - we can be
4266 ++ * sure it was dropped from the socket queue
4267 ++ *
4268 ++ * let's report a short read
4269 ++ */
4270 ++ err = 0;
4271 ++ break;
4272 ++ }
4273 ++
4274 + /* Mark read part of skb as used */
4275 + if (!(flags & MSG_PEEK)) {
4276 + UNIXCB(skb).consumed += chunk;
4277 +@@ -2453,14 +2667,16 @@ static unsigned int unix_dgram_poll(struct file *file, struct socket *sock,
4278 + return mask;
4279 +
4280 + writable = unix_writable(sk);
4281 +- other = unix_peer_get(sk);
4282 +- if (other) {
4283 +- if (unix_peer(other) != sk) {
4284 +- sock_poll_wait(file, &unix_sk(other)->peer_wait, wait);
4285 +- if (unix_recvq_full(other))
4286 +- writable = 0;
4287 +- }
4288 +- sock_put(other);
4289 ++ if (writable) {
4290 ++ unix_state_lock(sk);
4291 ++
4292 ++ other = unix_peer(sk);
4293 ++ if (other && unix_peer(other) != sk &&
4294 ++ unix_recvq_full(other) &&
4295 ++ unix_dgram_peer_wake_me(sk, other))
4296 ++ writable = 0;
4297 ++
4298 ++ unix_state_unlock(sk);
4299 + }
4300 +
4301 + if (writable)
4302 +diff --git a/sound/pci/Kconfig b/sound/pci/Kconfig
4303 +index edfc1b8d553e..656ce39bddbc 100644
4304 +--- a/sound/pci/Kconfig
4305 ++++ b/sound/pci/Kconfig
4306 +@@ -25,7 +25,7 @@ config SND_ALS300
4307 + select SND_PCM
4308 + select SND_AC97_CODEC
4309 + select SND_OPL3_LIB
4310 +- select ZONE_DMA
4311 ++ depends on ZONE_DMA
4312 + help
4313 + Say 'Y' or 'M' to include support for Avance Logic ALS300/ALS300+
4314 +
4315 +@@ -50,7 +50,7 @@ config SND_ALI5451
4316 + tristate "ALi M5451 PCI Audio Controller"
4317 + select SND_MPU401_UART
4318 + select SND_AC97_CODEC
4319 +- select ZONE_DMA
4320 ++ depends on ZONE_DMA
4321 + help
4322 + Say Y here to include support for the integrated AC97 sound
4323 + device on motherboards using the ALi M5451 Audio Controller
4324 +@@ -155,7 +155,7 @@ config SND_AZT3328
4325 + select SND_PCM
4326 + select SND_RAWMIDI
4327 + select SND_AC97_CODEC
4328 +- select ZONE_DMA
4329 ++ depends on ZONE_DMA
4330 + help
4331 + Say Y here to include support for Aztech AZF3328 (PCI168)
4332 + soundcards.
4333 +@@ -463,7 +463,7 @@ config SND_EMU10K1
4334 + select SND_HWDEP
4335 + select SND_RAWMIDI
4336 + select SND_AC97_CODEC
4337 +- select ZONE_DMA
4338 ++ depends on ZONE_DMA
4339 + help
4340 + Say Y to include support for Sound Blaster PCI 512, Live!,
4341 + Audigy and E-mu APS (partially supported) soundcards.
4342 +@@ -479,7 +479,7 @@ config SND_EMU10K1X
4343 + tristate "Emu10k1X (Dell OEM Version)"
4344 + select SND_AC97_CODEC
4345 + select SND_RAWMIDI
4346 +- select ZONE_DMA
4347 ++ depends on ZONE_DMA
4348 + help
4349 + Say Y here to include support for the Dell OEM version of the
4350 + Sound Blaster Live!.
4351 +@@ -513,7 +513,7 @@ config SND_ES1938
4352 + select SND_OPL3_LIB
4353 + select SND_MPU401_UART
4354 + select SND_AC97_CODEC
4355 +- select ZONE_DMA
4356 ++ depends on ZONE_DMA
4357 + help
4358 + Say Y here to include support for soundcards based on ESS Solo-1
4359 + (ES1938, ES1946, ES1969) chips.
4360 +@@ -525,7 +525,7 @@ config SND_ES1968
4361 + tristate "ESS ES1968/1978 (Maestro-1/2/2E)"
4362 + select SND_MPU401_UART
4363 + select SND_AC97_CODEC
4364 +- select ZONE_DMA
4365 ++ depends on ZONE_DMA
4366 + help
4367 + Say Y here to include support for soundcards based on ESS Maestro
4368 + 1/2/2E chips.
4369 +@@ -612,7 +612,7 @@ config SND_ICE1712
4370 + select SND_MPU401_UART
4371 + select SND_AC97_CODEC
4372 + select BITREVERSE
4373 +- select ZONE_DMA
4374 ++ depends on ZONE_DMA
4375 + help
4376 + Say Y here to include support for soundcards based on the
4377 + ICE1712 (Envy24) chip.
4378 +@@ -700,7 +700,7 @@ config SND_LX6464ES
4379 + config SND_MAESTRO3
4380 + tristate "ESS Allegro/Maestro3"
4381 + select SND_AC97_CODEC
4382 +- select ZONE_DMA
4383 ++ depends on ZONE_DMA
4384 + help
4385 + Say Y here to include support for soundcards based on ESS Maestro 3
4386 + (Allegro) chips.
4387 +@@ -806,7 +806,7 @@ config SND_SIS7019
4388 + tristate "SiS 7019 Audio Accelerator"
4389 + depends on X86_32
4390 + select SND_AC97_CODEC
4391 +- select ZONE_DMA
4392 ++ depends on ZONE_DMA
4393 + help
4394 + Say Y here to include support for the SiS 7019 Audio Accelerator.
4395 +
4396 +@@ -818,7 +818,7 @@ config SND_SONICVIBES
4397 + select SND_OPL3_LIB
4398 + select SND_MPU401_UART
4399 + select SND_AC97_CODEC
4400 +- select ZONE_DMA
4401 ++ depends on ZONE_DMA
4402 + help
4403 + Say Y here to include support for soundcards based on the S3
4404 + SonicVibes chip.
4405 +@@ -830,7 +830,7 @@ config SND_TRIDENT
4406 + tristate "Trident 4D-Wave DX/NX; SiS 7018"
4407 + select SND_MPU401_UART
4408 + select SND_AC97_CODEC
4409 +- select ZONE_DMA
4410 ++ depends on ZONE_DMA
4411 + help
4412 + Say Y here to include support for soundcards based on Trident
4413 + 4D-Wave DX/NX or SiS 7018 chips.
4414 +diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
4415 +index acbfbe087ee8..f22f5c409447 100644
4416 +--- a/sound/pci/hda/patch_hdmi.c
4417 ++++ b/sound/pci/hda/patch_hdmi.c
4418 +@@ -50,8 +50,9 @@ MODULE_PARM_DESC(static_hdmi_pcm, "Don't restrict PCM parameters per ELD info");
4419 + #define is_haswell(codec) ((codec)->core.vendor_id == 0x80862807)
4420 + #define is_broadwell(codec) ((codec)->core.vendor_id == 0x80862808)
4421 + #define is_skylake(codec) ((codec)->core.vendor_id == 0x80862809)
4422 ++#define is_broxton(codec) ((codec)->core.vendor_id == 0x8086280a)
4423 + #define is_haswell_plus(codec) (is_haswell(codec) || is_broadwell(codec) \
4424 +- || is_skylake(codec))
4425 ++ || is_skylake(codec) || is_broxton(codec))
4426 +
4427 + #define is_valleyview(codec) ((codec)->core.vendor_id == 0x80862882)
4428 + #define is_cherryview(codec) ((codec)->core.vendor_id == 0x80862883)
4429 +diff --git a/tools/net/Makefile b/tools/net/Makefile
4430 +index ee577ea03ba5..ddf888010652 100644
4431 +--- a/tools/net/Makefile
4432 ++++ b/tools/net/Makefile
4433 +@@ -4,6 +4,9 @@ CC = gcc
4434 + LEX = flex
4435 + YACC = bison
4436 +
4437 ++CFLAGS += -Wall -O2
4438 ++CFLAGS += -D__EXPORTED_HEADERS__ -I../../include/uapi -I../../include
4439 ++
4440 + %.yacc.c: %.y
4441 + $(YACC) -o $@ -d $<
4442 +
4443 +@@ -12,15 +15,13 @@ YACC = bison
4444 +
4445 + all : bpf_jit_disasm bpf_dbg bpf_asm
4446 +
4447 +-bpf_jit_disasm : CFLAGS = -Wall -O2 -DPACKAGE='bpf_jit_disasm'
4448 ++bpf_jit_disasm : CFLAGS += -DPACKAGE='bpf_jit_disasm'
4449 + bpf_jit_disasm : LDLIBS = -lopcodes -lbfd -ldl
4450 + bpf_jit_disasm : bpf_jit_disasm.o
4451 +
4452 +-bpf_dbg : CFLAGS = -Wall -O2
4453 + bpf_dbg : LDLIBS = -lreadline
4454 + bpf_dbg : bpf_dbg.o
4455 +
4456 +-bpf_asm : CFLAGS = -Wall -O2 -I.
4457 + bpf_asm : LDLIBS =
4458 + bpf_asm : bpf_asm.o bpf_exp.yacc.o bpf_exp.lex.o
4459 + bpf_exp.lex.o : bpf_exp.yacc.c