Gentoo Archives: gentoo-commits

From: Alice Ferrazzi <alicef@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:4.15 commit in: /
Date: Mon, 12 Feb 2018 09:01:18
Message-Id: 1518426061.a0d0349612049072e45ad7d28ff2422914b4b2dd.alicef@gentoo
1 commit: a0d0349612049072e45ad7d28ff2422914b4b2dd
2 Author: Alice Ferrazzi <alicef <AT> gentoo <DOT> org>
3 AuthorDate: Mon Feb 12 09:01:01 2018 +0000
4 Commit: Alice Ferrazzi <alicef <AT> gentoo <DOT> org>
5 CommitDate: Mon Feb 12 09:01:01 2018 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=a0d03496
7
8 linux kernel 4.15.3
9
10 0000_README | 4 +
11 1002_linux-4.15.3.patch | 776 ++++++++++++++++++++++++++++++++++++++++++++++++
12 2 files changed, 780 insertions(+)
13
14 diff --git a/0000_README b/0000_README
15 index db575f6..635b977 100644
16 --- a/0000_README
17 +++ b/0000_README
18 @@ -51,6 +51,10 @@ Patch: 1001_linux-4.15.2.patch
19 From: http://www.kernel.org
20 Desc: Linux 4.15.2
21
22 +Patch: 1002_linux-4.15.3.patch
23 +From: http://www.kernel.org
24 +Desc: Linux 4.15.3
25 +
26 Patch: 1500_XATTR_USER_PREFIX.patch
27 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
28 Desc: Support for namespace user.pax.* on tmpfs.
29
30 diff --git a/1002_linux-4.15.3.patch b/1002_linux-4.15.3.patch
31 new file mode 100644
32 index 0000000..7d0d7a2
33 --- /dev/null
34 +++ b/1002_linux-4.15.3.patch
35 @@ -0,0 +1,776 @@
36 +diff --git a/Makefile b/Makefile
37 +index 54f1bc10b531..13566ad7863a 100644
38 +--- a/Makefile
39 ++++ b/Makefile
40 +@@ -1,7 +1,7 @@
41 + # SPDX-License-Identifier: GPL-2.0
42 + VERSION = 4
43 + PATCHLEVEL = 15
44 +-SUBLEVEL = 2
45 ++SUBLEVEL = 3
46 + EXTRAVERSION =
47 + NAME = Fearless Coyote
48 +
49 +diff --git a/crypto/tcrypt.c b/crypto/tcrypt.c
50 +index 9267cbdb14d2..3ced1ba1fd11 100644
51 +--- a/crypto/tcrypt.c
52 ++++ b/crypto/tcrypt.c
53 +@@ -198,11 +198,13 @@ static void sg_init_aead(struct scatterlist *sg, char *xbuf[XBUFSIZE],
54 + }
55 +
56 + sg_init_table(sg, np + 1);
57 +- np--;
58 ++ if (rem)
59 ++ np--;
60 + for (k = 0; k < np; k++)
61 + sg_set_buf(&sg[k + 1], xbuf[k], PAGE_SIZE);
62 +
63 +- sg_set_buf(&sg[k + 1], xbuf[k], rem);
64 ++ if (rem)
65 ++ sg_set_buf(&sg[k + 1], xbuf[k], rem);
66 + }
67 +
68 + static void test_aead_speed(const char *algo, int enc, unsigned int secs,
69 +diff --git a/drivers/gpio/gpio-uniphier.c b/drivers/gpio/gpio-uniphier.c
70 +index 016d7427ebfa..761d8279abca 100644
71 +--- a/drivers/gpio/gpio-uniphier.c
72 ++++ b/drivers/gpio/gpio-uniphier.c
73 +@@ -505,4 +505,4 @@ module_platform_driver(uniphier_gpio_driver);
74 +
75 + MODULE_AUTHOR("Masahiro Yamada <yamada.masahiro@×××××××××.com>");
76 + MODULE_DESCRIPTION("UniPhier GPIO driver");
77 +-MODULE_LICENSE("GPL");
78 ++MODULE_LICENSE("GPL v2");
79 +diff --git a/drivers/media/platform/mtk-vcodec/mtk_vcodec_util.c b/drivers/media/platform/mtk-vcodec/mtk_vcodec_util.c
80 +index 46768c056193..0c28d0b995cc 100644
81 +--- a/drivers/media/platform/mtk-vcodec/mtk_vcodec_util.c
82 ++++ b/drivers/media/platform/mtk-vcodec/mtk_vcodec_util.c
83 +@@ -115,3 +115,6 @@ struct mtk_vcodec_ctx *mtk_vcodec_get_curr_ctx(struct mtk_vcodec_dev *dev)
84 + return ctx;
85 + }
86 + EXPORT_SYMBOL(mtk_vcodec_get_curr_ctx);
87 ++
88 ++MODULE_LICENSE("GPL v2");
89 ++MODULE_DESCRIPTION("Mediatek video codec driver");
90 +diff --git a/drivers/media/platform/soc_camera/soc_scale_crop.c b/drivers/media/platform/soc_camera/soc_scale_crop.c
91 +index 270ec613c27c..6164102e6f9f 100644
92 +--- a/drivers/media/platform/soc_camera/soc_scale_crop.c
93 ++++ b/drivers/media/platform/soc_camera/soc_scale_crop.c
94 +@@ -420,3 +420,7 @@ void soc_camera_calc_client_output(struct soc_camera_device *icd,
95 + mf->height = soc_camera_shift_scale(rect->height, shift, scale_v);
96 + }
97 + EXPORT_SYMBOL(soc_camera_calc_client_output);
98 ++
99 ++MODULE_DESCRIPTION("soc-camera scaling-cropping functions");
100 ++MODULE_AUTHOR("Guennadi Liakhovetski <kernel@×××××××××××.de>");
101 ++MODULE_LICENSE("GPL");
102 +diff --git a/drivers/media/platform/tegra-cec/tegra_cec.c b/drivers/media/platform/tegra-cec/tegra_cec.c
103 +index 807c94c70049..92f93a880015 100644
104 +--- a/drivers/media/platform/tegra-cec/tegra_cec.c
105 ++++ b/drivers/media/platform/tegra-cec/tegra_cec.c
106 +@@ -493,3 +493,8 @@ static struct platform_driver tegra_cec_driver = {
107 + };
108 +
109 + module_platform_driver(tegra_cec_driver);
110 ++
111 ++MODULE_DESCRIPTION("Tegra HDMI CEC driver");
112 ++MODULE_AUTHOR("NVIDIA CORPORATION");
113 ++MODULE_AUTHOR("Cisco Systems, Inc. and/or its affiliates");
114 ++MODULE_LICENSE("GPL v2");
115 +diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
116 +index f7080d0ab874..46b0372dd032 100644
117 +--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
118 ++++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
119 +@@ -3891,7 +3891,7 @@ static void qlcnic_83xx_flush_mbx_queue(struct qlcnic_adapter *adapter)
120 + struct list_head *head = &mbx->cmd_q;
121 + struct qlcnic_cmd_args *cmd = NULL;
122 +
123 +- spin_lock(&mbx->queue_lock);
124 ++ spin_lock_bh(&mbx->queue_lock);
125 +
126 + while (!list_empty(head)) {
127 + cmd = list_entry(head->next, struct qlcnic_cmd_args, list);
128 +@@ -3902,7 +3902,7 @@ static void qlcnic_83xx_flush_mbx_queue(struct qlcnic_adapter *adapter)
129 + qlcnic_83xx_notify_cmd_completion(adapter, cmd);
130 + }
131 +
132 +- spin_unlock(&mbx->queue_lock);
133 ++ spin_unlock_bh(&mbx->queue_lock);
134 + }
135 +
136 + static int qlcnic_83xx_check_mbx_status(struct qlcnic_adapter *adapter)
137 +@@ -3938,12 +3938,12 @@ static void qlcnic_83xx_dequeue_mbx_cmd(struct qlcnic_adapter *adapter,
138 + {
139 + struct qlcnic_mailbox *mbx = adapter->ahw->mailbox;
140 +
141 +- spin_lock(&mbx->queue_lock);
142 ++ spin_lock_bh(&mbx->queue_lock);
143 +
144 + list_del(&cmd->list);
145 + mbx->num_cmds--;
146 +
147 +- spin_unlock(&mbx->queue_lock);
148 ++ spin_unlock_bh(&mbx->queue_lock);
149 +
150 + qlcnic_83xx_notify_cmd_completion(adapter, cmd);
151 + }
152 +@@ -4008,7 +4008,7 @@ static int qlcnic_83xx_enqueue_mbx_cmd(struct qlcnic_adapter *adapter,
153 + init_completion(&cmd->completion);
154 + cmd->rsp_opcode = QLC_83XX_MBX_RESPONSE_UNKNOWN;
155 +
156 +- spin_lock(&mbx->queue_lock);
157 ++ spin_lock_bh(&mbx->queue_lock);
158 +
159 + list_add_tail(&cmd->list, &mbx->cmd_q);
160 + mbx->num_cmds++;
161 +@@ -4016,7 +4016,7 @@ static int qlcnic_83xx_enqueue_mbx_cmd(struct qlcnic_adapter *adapter,
162 + *timeout = cmd->total_cmds * QLC_83XX_MBX_TIMEOUT;
163 + queue_work(mbx->work_q, &mbx->work);
164 +
165 +- spin_unlock(&mbx->queue_lock);
166 ++ spin_unlock_bh(&mbx->queue_lock);
167 +
168 + return 0;
169 + }
170 +@@ -4112,15 +4112,15 @@ static void qlcnic_83xx_mailbox_worker(struct work_struct *work)
171 + mbx->rsp_status = QLC_83XX_MBX_RESPONSE_WAIT;
172 + spin_unlock_irqrestore(&mbx->aen_lock, flags);
173 +
174 +- spin_lock(&mbx->queue_lock);
175 ++ spin_lock_bh(&mbx->queue_lock);
176 +
177 + if (list_empty(head)) {
178 +- spin_unlock(&mbx->queue_lock);
179 ++ spin_unlock_bh(&mbx->queue_lock);
180 + return;
181 + }
182 + cmd = list_entry(head->next, struct qlcnic_cmd_args, list);
183 +
184 +- spin_unlock(&mbx->queue_lock);
185 ++ spin_unlock_bh(&mbx->queue_lock);
186 +
187 + mbx_ops->encode_cmd(adapter, cmd);
188 + mbx_ops->nofity_fw(adapter, QLC_83XX_MBX_REQUEST);
189 +diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
190 +index 734286ebe5ef..dd713dff8d22 100644
191 +--- a/drivers/net/ethernet/realtek/r8169.c
192 ++++ b/drivers/net/ethernet/realtek/r8169.c
193 +@@ -1395,7 +1395,7 @@ DECLARE_RTL_COND(rtl_ocp_tx_cond)
194 + {
195 + void __iomem *ioaddr = tp->mmio_addr;
196 +
197 +- return RTL_R8(IBISR0) & 0x02;
198 ++ return RTL_R8(IBISR0) & 0x20;
199 + }
200 +
201 + static void rtl8168ep_stop_cmac(struct rtl8169_private *tp)
202 +@@ -1403,7 +1403,7 @@ static void rtl8168ep_stop_cmac(struct rtl8169_private *tp)
203 + void __iomem *ioaddr = tp->mmio_addr;
204 +
205 + RTL_W8(IBCR2, RTL_R8(IBCR2) & ~0x01);
206 +- rtl_msleep_loop_wait_low(tp, &rtl_ocp_tx_cond, 50, 2000);
207 ++ rtl_msleep_loop_wait_high(tp, &rtl_ocp_tx_cond, 50, 2000);
208 + RTL_W8(IBISR0, RTL_R8(IBISR0) | 0x20);
209 + RTL_W8(IBCR0, RTL_R8(IBCR0) & ~0x01);
210 + }
211 +diff --git a/drivers/net/ethernet/rocker/rocker_main.c b/drivers/net/ethernet/rocker/rocker_main.c
212 +index fc8f8bdf6579..056cb6093630 100644
213 +--- a/drivers/net/ethernet/rocker/rocker_main.c
214 ++++ b/drivers/net/ethernet/rocker/rocker_main.c
215 +@@ -2902,6 +2902,12 @@ static int rocker_probe(struct pci_dev *pdev, const struct pci_device_id *id)
216 + goto err_alloc_ordered_workqueue;
217 + }
218 +
219 ++ err = rocker_probe_ports(rocker);
220 ++ if (err) {
221 ++ dev_err(&pdev->dev, "failed to probe ports\n");
222 ++ goto err_probe_ports;
223 ++ }
224 ++
225 + /* Only FIBs pointing to our own netdevs are programmed into
226 + * the device, so no need to pass a callback.
227 + */
228 +@@ -2918,22 +2924,16 @@ static int rocker_probe(struct pci_dev *pdev, const struct pci_device_id *id)
229 +
230 + rocker->hw.id = rocker_read64(rocker, SWITCH_ID);
231 +
232 +- err = rocker_probe_ports(rocker);
233 +- if (err) {
234 +- dev_err(&pdev->dev, "failed to probe ports\n");
235 +- goto err_probe_ports;
236 +- }
237 +-
238 + dev_info(&pdev->dev, "Rocker switch with id %*phN\n",
239 + (int)sizeof(rocker->hw.id), &rocker->hw.id);
240 +
241 + return 0;
242 +
243 +-err_probe_ports:
244 +- unregister_switchdev_notifier(&rocker_switchdev_notifier);
245 + err_register_switchdev_notifier:
246 + unregister_fib_notifier(&rocker->fib_nb);
247 + err_register_fib_notifier:
248 ++ rocker_remove_ports(rocker);
249 ++err_probe_ports:
250 + destroy_workqueue(rocker->rocker_owq);
251 + err_alloc_ordered_workqueue:
252 + free_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_EVENT), rocker);
253 +@@ -2961,9 +2961,9 @@ static void rocker_remove(struct pci_dev *pdev)
254 + {
255 + struct rocker *rocker = pci_get_drvdata(pdev);
256 +
257 +- rocker_remove_ports(rocker);
258 + unregister_switchdev_notifier(&rocker_switchdev_notifier);
259 + unregister_fib_notifier(&rocker->fib_nb);
260 ++ rocker_remove_ports(rocker);
261 + rocker_write32(rocker, CONTROL, ROCKER_CONTROL_RESET);
262 + destroy_workqueue(rocker->rocker_owq);
263 + free_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_EVENT), rocker);
264 +diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
265 +index 728819feab44..e7114c34fe4b 100644
266 +--- a/drivers/net/usb/qmi_wwan.c
267 ++++ b/drivers/net/usb/qmi_wwan.c
268 +@@ -1245,6 +1245,7 @@ static const struct usb_device_id products[] = {
269 + {QMI_QUIRK_SET_DTR(0x2c7c, 0x0125, 4)}, /* Quectel EC25, EC20 R2.0 Mini PCIe */
270 + {QMI_QUIRK_SET_DTR(0x2c7c, 0x0121, 4)}, /* Quectel EC21 Mini PCIe */
271 + {QMI_FIXED_INTF(0x2c7c, 0x0296, 4)}, /* Quectel BG96 */
272 ++ {QMI_QUIRK_SET_DTR(0x2c7c, 0x0306, 4)}, /* Quectel EP06 Mini PCIe */
273 +
274 + /* 4. Gobi 1000 devices */
275 + {QMI_GOBI1K_DEVICE(0x05c6, 0x9212)}, /* Acer Gobi Modem Device */
276 +diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
277 +index c7bdeb655646..5636c7ca8eba 100644
278 +--- a/drivers/vhost/net.c
279 ++++ b/drivers/vhost/net.c
280 +@@ -1208,6 +1208,7 @@ static long vhost_net_reset_owner(struct vhost_net *n)
281 + }
282 + vhost_net_stop(n, &tx_sock, &rx_sock);
283 + vhost_net_flush(n);
284 ++ vhost_dev_stop(&n->dev);
285 + vhost_dev_reset_owner(&n->dev, umem);
286 + vhost_net_vq_reset(n);
287 + done:
288 +diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
289 +index becf86aa4ac6..d6ec5a5a6782 100644
290 +--- a/include/net/sch_generic.h
291 ++++ b/include/net/sch_generic.h
292 +@@ -280,7 +280,6 @@ struct tcf_block {
293 + struct net *net;
294 + struct Qdisc *q;
295 + struct list_head cb_list;
296 +- struct work_struct work;
297 + };
298 +
299 + static inline void qdisc_cb_private_validate(const struct sk_buff *skb, int sz)
300 +diff --git a/mm/memcontrol.c b/mm/memcontrol.c
301 +index ac2ffd5e02b9..0a78ce57872d 100644
302 +--- a/mm/memcontrol.c
303 ++++ b/mm/memcontrol.c
304 +@@ -5828,6 +5828,20 @@ void mem_cgroup_sk_alloc(struct sock *sk)
305 + if (!mem_cgroup_sockets_enabled)
306 + return;
307 +
308 ++ /*
309 ++ * Socket cloning can throw us here with sk_memcg already
310 ++ * filled. It won't however, necessarily happen from
311 ++ * process context. So the test for root memcg given
312 ++ * the current task's memcg won't help us in this case.
313 ++ *
314 ++ * Respecting the original socket's memcg is a better
315 ++ * decision in this case.
316 ++ */
317 ++ if (sk->sk_memcg) {
318 ++ css_get(&sk->sk_memcg->css);
319 ++ return;
320 ++ }
321 ++
322 + rcu_read_lock();
323 + memcg = mem_cgroup_from_task(current);
324 + if (memcg == root_mem_cgroup)
325 +diff --git a/net/core/sock.c b/net/core/sock.c
326 +index c0b5b2f17412..7571dabfc4cf 100644
327 +--- a/net/core/sock.c
328 ++++ b/net/core/sock.c
329 +@@ -1675,16 +1675,13 @@ struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority)
330 + newsk->sk_dst_pending_confirm = 0;
331 + newsk->sk_wmem_queued = 0;
332 + newsk->sk_forward_alloc = 0;
333 +-
334 +- /* sk->sk_memcg will be populated at accept() time */
335 +- newsk->sk_memcg = NULL;
336 +-
337 + atomic_set(&newsk->sk_drops, 0);
338 + newsk->sk_send_head = NULL;
339 + newsk->sk_userlocks = sk->sk_userlocks & ~SOCK_BINDPORT_LOCK;
340 + atomic_set(&newsk->sk_zckey, 0);
341 +
342 + sock_reset_flag(newsk, SOCK_DONE);
343 ++ mem_cgroup_sk_alloc(newsk);
344 + cgroup_sk_alloc(&newsk->sk_cgrp_data);
345 +
346 + rcu_read_lock();
347 +diff --git a/net/core/sock_reuseport.c b/net/core/sock_reuseport.c
348 +index 5eeb1d20cc38..676092d7bd81 100644
349 +--- a/net/core/sock_reuseport.c
350 ++++ b/net/core/sock_reuseport.c
351 +@@ -94,6 +94,16 @@ static struct sock_reuseport *reuseport_grow(struct sock_reuseport *reuse)
352 + return more_reuse;
353 + }
354 +
355 ++static void reuseport_free_rcu(struct rcu_head *head)
356 ++{
357 ++ struct sock_reuseport *reuse;
358 ++
359 ++ reuse = container_of(head, struct sock_reuseport, rcu);
360 ++ if (reuse->prog)
361 ++ bpf_prog_destroy(reuse->prog);
362 ++ kfree(reuse);
363 ++}
364 ++
365 + /**
366 + * reuseport_add_sock - Add a socket to the reuseport group of another.
367 + * @sk: New socket to add to the group.
368 +@@ -102,7 +112,7 @@ static struct sock_reuseport *reuseport_grow(struct sock_reuseport *reuse)
369 + */
370 + int reuseport_add_sock(struct sock *sk, struct sock *sk2)
371 + {
372 +- struct sock_reuseport *reuse;
373 ++ struct sock_reuseport *old_reuse, *reuse;
374 +
375 + if (!rcu_access_pointer(sk2->sk_reuseport_cb)) {
376 + int err = reuseport_alloc(sk2);
377 +@@ -113,10 +123,13 @@ int reuseport_add_sock(struct sock *sk, struct sock *sk2)
378 +
379 + spin_lock_bh(&reuseport_lock);
380 + reuse = rcu_dereference_protected(sk2->sk_reuseport_cb,
381 +- lockdep_is_held(&reuseport_lock)),
382 +- WARN_ONCE(rcu_dereference_protected(sk->sk_reuseport_cb,
383 +- lockdep_is_held(&reuseport_lock)),
384 +- "socket already in reuseport group");
385 ++ lockdep_is_held(&reuseport_lock));
386 ++ old_reuse = rcu_dereference_protected(sk->sk_reuseport_cb,
387 ++ lockdep_is_held(&reuseport_lock));
388 ++ if (old_reuse && old_reuse->num_socks != 1) {
389 ++ spin_unlock_bh(&reuseport_lock);
390 ++ return -EBUSY;
391 ++ }
392 +
393 + if (reuse->num_socks == reuse->max_socks) {
394 + reuse = reuseport_grow(reuse);
395 +@@ -134,19 +147,11 @@ int reuseport_add_sock(struct sock *sk, struct sock *sk2)
396 +
397 + spin_unlock_bh(&reuseport_lock);
398 +
399 ++ if (old_reuse)
400 ++ call_rcu(&old_reuse->rcu, reuseport_free_rcu);
401 + return 0;
402 + }
403 +
404 +-static void reuseport_free_rcu(struct rcu_head *head)
405 +-{
406 +- struct sock_reuseport *reuse;
407 +-
408 +- reuse = container_of(head, struct sock_reuseport, rcu);
409 +- if (reuse->prog)
410 +- bpf_prog_destroy(reuse->prog);
411 +- kfree(reuse);
412 +-}
413 +-
414 + void reuseport_detach_sock(struct sock *sk)
415 + {
416 + struct sock_reuseport *reuse;
417 +diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
418 +index 2d49717a7421..f0b1fc35dde1 100644
419 +--- a/net/ipv4/igmp.c
420 ++++ b/net/ipv4/igmp.c
421 +@@ -386,7 +386,11 @@ static struct sk_buff *igmpv3_newpack(struct net_device *dev, unsigned int mtu)
422 + pip->frag_off = htons(IP_DF);
423 + pip->ttl = 1;
424 + pip->daddr = fl4.daddr;
425 ++
426 ++ rcu_read_lock();
427 + pip->saddr = igmpv3_get_srcaddr(dev, &fl4);
428 ++ rcu_read_unlock();
429 ++
430 + pip->protocol = IPPROTO_IGMP;
431 + pip->tot_len = 0; /* filled in later */
432 + ip_select_ident(net, skb, NULL);
433 +diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
434 +index 4ca46dc08e63..3668c4182655 100644
435 +--- a/net/ipv4/inet_connection_sock.c
436 ++++ b/net/ipv4/inet_connection_sock.c
437 +@@ -475,7 +475,6 @@ struct sock *inet_csk_accept(struct sock *sk, int flags, int *err, bool kern)
438 + }
439 + spin_unlock_bh(&queue->fastopenq.lock);
440 + }
441 +- mem_cgroup_sk_alloc(newsk);
442 + out:
443 + release_sock(sk);
444 + if (req)
445 +diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
446 +index 8e053ad7cae2..c821f5d68720 100644
447 +--- a/net/ipv4/tcp.c
448 ++++ b/net/ipv4/tcp.c
449 +@@ -2434,6 +2434,12 @@ int tcp_disconnect(struct sock *sk, int flags)
450 +
451 + WARN_ON(inet->inet_num && !icsk->icsk_bind_hash);
452 +
453 ++ if (sk->sk_frag.page) {
454 ++ put_page(sk->sk_frag.page);
455 ++ sk->sk_frag.page = NULL;
456 ++ sk->sk_frag.offset = 0;
457 ++ }
458 ++
459 + sk->sk_error_report(sk);
460 + return err;
461 + }
462 +diff --git a/net/ipv4/tcp_bbr.c b/net/ipv4/tcp_bbr.c
463 +index 8322f26e770e..25c5a0b60cfc 100644
464 +--- a/net/ipv4/tcp_bbr.c
465 ++++ b/net/ipv4/tcp_bbr.c
466 +@@ -481,7 +481,8 @@ static void bbr_advance_cycle_phase(struct sock *sk)
467 +
468 + bbr->cycle_idx = (bbr->cycle_idx + 1) & (CYCLE_LEN - 1);
469 + bbr->cycle_mstamp = tp->delivered_mstamp;
470 +- bbr->pacing_gain = bbr_pacing_gain[bbr->cycle_idx];
471 ++ bbr->pacing_gain = bbr->lt_use_bw ? BBR_UNIT :
472 ++ bbr_pacing_gain[bbr->cycle_idx];
473 + }
474 +
475 + /* Gain cycling: cycle pacing gain to converge to fair share of available bw. */
476 +@@ -490,8 +491,7 @@ static void bbr_update_cycle_phase(struct sock *sk,
477 + {
478 + struct bbr *bbr = inet_csk_ca(sk);
479 +
480 +- if ((bbr->mode == BBR_PROBE_BW) && !bbr->lt_use_bw &&
481 +- bbr_is_next_cycle_phase(sk, rs))
482 ++ if (bbr->mode == BBR_PROBE_BW && bbr_is_next_cycle_phase(sk, rs))
483 + bbr_advance_cycle_phase(sk);
484 + }
485 +
486 +diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
487 +index f49bd7897e95..2547222589fe 100644
488 +--- a/net/ipv6/addrconf.c
489 ++++ b/net/ipv6/addrconf.c
490 +@@ -186,7 +186,8 @@ static struct rt6_info *addrconf_get_prefix_route(const struct in6_addr *pfx,
491 +
492 + static void addrconf_dad_start(struct inet6_ifaddr *ifp);
493 + static void addrconf_dad_work(struct work_struct *w);
494 +-static void addrconf_dad_completed(struct inet6_ifaddr *ifp, bool bump_id);
495 ++static void addrconf_dad_completed(struct inet6_ifaddr *ifp, bool bump_id,
496 ++ bool send_na);
497 + static void addrconf_dad_run(struct inet6_dev *idev);
498 + static void addrconf_rs_timer(struct timer_list *t);
499 + static void __ipv6_ifa_notify(int event, struct inet6_ifaddr *ifa);
500 +@@ -3833,12 +3834,17 @@ static void addrconf_dad_begin(struct inet6_ifaddr *ifp)
501 + idev->cnf.accept_dad < 1) ||
502 + !(ifp->flags&IFA_F_TENTATIVE) ||
503 + ifp->flags & IFA_F_NODAD) {
504 ++ bool send_na = false;
505 ++
506 ++ if (ifp->flags & IFA_F_TENTATIVE &&
507 ++ !(ifp->flags & IFA_F_OPTIMISTIC))
508 ++ send_na = true;
509 + bump_id = ifp->flags & IFA_F_TENTATIVE;
510 + ifp->flags &= ~(IFA_F_TENTATIVE|IFA_F_OPTIMISTIC|IFA_F_DADFAILED);
511 + spin_unlock(&ifp->lock);
512 + read_unlock_bh(&idev->lock);
513 +
514 +- addrconf_dad_completed(ifp, bump_id);
515 ++ addrconf_dad_completed(ifp, bump_id, send_na);
516 + return;
517 + }
518 +
519 +@@ -3967,16 +3973,21 @@ static void addrconf_dad_work(struct work_struct *w)
520 + }
521 +
522 + if (ifp->dad_probes == 0) {
523 ++ bool send_na = false;
524 ++
525 + /*
526 + * DAD was successful
527 + */
528 +
529 ++ if (ifp->flags & IFA_F_TENTATIVE &&
530 ++ !(ifp->flags & IFA_F_OPTIMISTIC))
531 ++ send_na = true;
532 + bump_id = ifp->flags & IFA_F_TENTATIVE;
533 + ifp->flags &= ~(IFA_F_TENTATIVE|IFA_F_OPTIMISTIC|IFA_F_DADFAILED);
534 + spin_unlock(&ifp->lock);
535 + write_unlock_bh(&idev->lock);
536 +
537 +- addrconf_dad_completed(ifp, bump_id);
538 ++ addrconf_dad_completed(ifp, bump_id, send_na);
539 +
540 + goto out;
541 + }
542 +@@ -4014,7 +4025,8 @@ static bool ipv6_lonely_lladdr(struct inet6_ifaddr *ifp)
543 + return true;
544 + }
545 +
546 +-static void addrconf_dad_completed(struct inet6_ifaddr *ifp, bool bump_id)
547 ++static void addrconf_dad_completed(struct inet6_ifaddr *ifp, bool bump_id,
548 ++ bool send_na)
549 + {
550 + struct net_device *dev = ifp->idev->dev;
551 + struct in6_addr lladdr;
552 +@@ -4046,6 +4058,16 @@ static void addrconf_dad_completed(struct inet6_ifaddr *ifp, bool bump_id)
553 + if (send_mld)
554 + ipv6_mc_dad_complete(ifp->idev);
555 +
556 ++ /* send unsolicited NA if enabled */
557 ++ if (send_na &&
558 ++ (ifp->idev->cnf.ndisc_notify ||
559 ++ dev_net(dev)->ipv6.devconf_all->ndisc_notify)) {
560 ++ ndisc_send_na(dev, &in6addr_linklocal_allnodes, &ifp->addr,
561 ++ /*router=*/ !!ifp->idev->cnf.forwarding,
562 ++ /*solicited=*/ false, /*override=*/ true,
563 ++ /*inc_opt=*/ true);
564 ++ }
565 ++
566 + if (send_rs) {
567 + /*
568 + * If a host as already performed a random delay
569 +@@ -4352,9 +4374,11 @@ static void addrconf_verify_rtnl(void)
570 + spin_lock(&ifpub->lock);
571 + ifpub->regen_count = 0;
572 + spin_unlock(&ifpub->lock);
573 ++ rcu_read_unlock_bh();
574 + ipv6_create_tempaddr(ifpub, ifp, true);
575 + in6_ifa_put(ifpub);
576 + in6_ifa_put(ifp);
577 ++ rcu_read_lock_bh();
578 + goto restart;
579 + }
580 + } else if (time_before(ifp->tstamp + ifp->prefered_lft * HZ - regen_advance * HZ, next))
581 +diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c
582 +index c9441ca45399..416917719a6f 100644
583 +--- a/net/ipv6/af_inet6.c
584 ++++ b/net/ipv6/af_inet6.c
585 +@@ -284,6 +284,7 @@ int inet6_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
586 + struct net *net = sock_net(sk);
587 + __be32 v4addr = 0;
588 + unsigned short snum;
589 ++ bool saved_ipv6only;
590 + int addr_type = 0;
591 + int err = 0;
592 +
593 +@@ -389,19 +390,21 @@ int inet6_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
594 + if (!(addr_type & IPV6_ADDR_MULTICAST))
595 + np->saddr = addr->sin6_addr;
596 +
597 ++ saved_ipv6only = sk->sk_ipv6only;
598 ++ if (addr_type != IPV6_ADDR_ANY && addr_type != IPV6_ADDR_MAPPED)
599 ++ sk->sk_ipv6only = 1;
600 ++
601 + /* Make sure we are allowed to bind here. */
602 + if ((snum || !inet->bind_address_no_port) &&
603 + sk->sk_prot->get_port(sk, snum)) {
604 ++ sk->sk_ipv6only = saved_ipv6only;
605 + inet_reset_saddr(sk);
606 + err = -EADDRINUSE;
607 + goto out;
608 + }
609 +
610 +- if (addr_type != IPV6_ADDR_ANY) {
611 ++ if (addr_type != IPV6_ADDR_ANY)
612 + sk->sk_userlocks |= SOCK_BINDADDR_LOCK;
613 +- if (addr_type != IPV6_ADDR_MAPPED)
614 +- sk->sk_ipv6only = 1;
615 +- }
616 + if (snum)
617 + sk->sk_userlocks |= SOCK_BINDPORT_LOCK;
618 + inet->inet_sport = htons(inet->inet_num);
619 +diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c
620 +index a2e1a864eb46..4fc566ec7e79 100644
621 +--- a/net/ipv6/ip6mr.c
622 ++++ b/net/ipv6/ip6mr.c
623 +@@ -495,6 +495,7 @@ static void *ipmr_mfc_seq_start(struct seq_file *seq, loff_t *pos)
624 + return ERR_PTR(-ENOENT);
625 +
626 + it->mrt = mrt;
627 ++ it->cache = NULL;
628 + return *pos ? ipmr_mfc_seq_idx(net, seq->private, *pos - 1)
629 + : SEQ_START_TOKEN;
630 + }
631 +diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c
632 +index b3cea200c85e..f61a5b613b52 100644
633 +--- a/net/ipv6/ndisc.c
634 ++++ b/net/ipv6/ndisc.c
635 +@@ -566,6 +566,11 @@ static void ndisc_send_unsol_na(struct net_device *dev)
636 +
637 + read_lock_bh(&idev->lock);
638 + list_for_each_entry(ifa, &idev->addr_list, if_list) {
639 ++ /* skip tentative addresses until dad completes */
640 ++ if (ifa->flags & IFA_F_TENTATIVE &&
641 ++ !(ifa->flags & IFA_F_OPTIMISTIC))
642 ++ continue;
643 ++
644 + ndisc_send_na(dev, &in6addr_linklocal_allnodes, &ifa->addr,
645 + /*router=*/ !!idev->cnf.forwarding,
646 + /*solicited=*/ false, /*override=*/ true,
647 +diff --git a/net/ipv6/route.c b/net/ipv6/route.c
648 +index 0458b761f3c5..a560fb1d0230 100644
649 +--- a/net/ipv6/route.c
650 ++++ b/net/ipv6/route.c
651 +@@ -1586,12 +1586,19 @@ static void rt6_age_examine_exception(struct rt6_exception_bucket *bucket,
652 + * EXPIRES exceptions - e.g. pmtu-generated ones are pruned when
653 + * expired, independently from their aging, as per RFC 8201 section 4
654 + */
655 +- if (!(rt->rt6i_flags & RTF_EXPIRES) &&
656 +- time_after_eq(now, rt->dst.lastuse + gc_args->timeout)) {
657 +- RT6_TRACE("aging clone %p\n", rt);
658 ++ if (!(rt->rt6i_flags & RTF_EXPIRES)) {
659 ++ if (time_after_eq(now, rt->dst.lastuse + gc_args->timeout)) {
660 ++ RT6_TRACE("aging clone %p\n", rt);
661 ++ rt6_remove_exception(bucket, rt6_ex);
662 ++ return;
663 ++ }
664 ++ } else if (time_after(jiffies, rt->dst.expires)) {
665 ++ RT6_TRACE("purging expired route %p\n", rt);
666 + rt6_remove_exception(bucket, rt6_ex);
667 + return;
668 +- } else if (rt->rt6i_flags & RTF_GATEWAY) {
669 ++ }
670 ++
671 ++ if (rt->rt6i_flags & RTF_GATEWAY) {
672 + struct neighbour *neigh;
673 + __u8 neigh_flags = 0;
674 +
675 +@@ -1606,11 +1613,8 @@ static void rt6_age_examine_exception(struct rt6_exception_bucket *bucket,
676 + rt6_remove_exception(bucket, rt6_ex);
677 + return;
678 + }
679 +- } else if (__rt6_check_expired(rt)) {
680 +- RT6_TRACE("purging expired route %p\n", rt);
681 +- rt6_remove_exception(bucket, rt6_ex);
682 +- return;
683 + }
684 ++
685 + gc_args->more++;
686 + }
687 +
688 +diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c
689 +index b9d63d2246e6..e6b853f0ee4f 100644
690 +--- a/net/sched/cls_api.c
691 ++++ b/net/sched/cls_api.c
692 +@@ -217,8 +217,12 @@ static void tcf_chain_flush(struct tcf_chain *chain)
693 +
694 + static void tcf_chain_destroy(struct tcf_chain *chain)
695 + {
696 ++ struct tcf_block *block = chain->block;
697 ++
698 + list_del(&chain->list);
699 + kfree(chain);
700 ++ if (list_empty(&block->chain_list))
701 ++ kfree(block);
702 + }
703 +
704 + static void tcf_chain_hold(struct tcf_chain *chain)
705 +@@ -329,49 +333,34 @@ int tcf_block_get(struct tcf_block **p_block,
706 + }
707 + EXPORT_SYMBOL(tcf_block_get);
708 +
709 +-static void tcf_block_put_final(struct work_struct *work)
710 +-{
711 +- struct tcf_block *block = container_of(work, struct tcf_block, work);
712 +- struct tcf_chain *chain, *tmp;
713 +-
714 +- rtnl_lock();
715 +-
716 +- /* At this point, all the chains should have refcnt == 1. */
717 +- list_for_each_entry_safe(chain, tmp, &block->chain_list, list)
718 +- tcf_chain_put(chain);
719 +- rtnl_unlock();
720 +- kfree(block);
721 +-}
722 +-
723 + /* XXX: Standalone actions are not allowed to jump to any chain, and bound
724 + * actions should be all removed after flushing.
725 + */
726 + void tcf_block_put_ext(struct tcf_block *block, struct Qdisc *q,
727 + struct tcf_block_ext_info *ei)
728 + {
729 +- struct tcf_chain *chain;
730 ++ struct tcf_chain *chain, *tmp;
731 +
732 + if (!block)
733 + return;
734 +- /* Hold a refcnt for all chains, except 0, so that they don't disappear
735 ++ /* Hold a refcnt for all chains, so that they don't disappear
736 + * while we are iterating.
737 + */
738 + list_for_each_entry(chain, &block->chain_list, list)
739 +- if (chain->index)
740 +- tcf_chain_hold(chain);
741 ++ tcf_chain_hold(chain);
742 +
743 + list_for_each_entry(chain, &block->chain_list, list)
744 + tcf_chain_flush(chain);
745 +
746 + tcf_block_offload_unbind(block, q, ei);
747 +
748 +- INIT_WORK(&block->work, tcf_block_put_final);
749 +- /* Wait for existing RCU callbacks to cool down, make sure their works
750 +- * have been queued before this. We can not flush pending works here
751 +- * because we are holding the RTNL lock.
752 +- */
753 +- rcu_barrier();
754 +- tcf_queue_work(&block->work);
755 ++ /* At this point, all the chains should have refcnt >= 1. */
756 ++ list_for_each_entry_safe(chain, tmp, &block->chain_list, list)
757 ++ tcf_chain_put(chain);
758 ++
759 ++ /* Finally, put chain 0 and allow block to be freed. */
760 ++ chain = list_first_entry(&block->chain_list, struct tcf_chain, list);
761 ++ tcf_chain_put(chain);
762 + }
763 + EXPORT_SYMBOL(tcf_block_put_ext);
764 +
765 +diff --git a/net/sched/cls_u32.c b/net/sched/cls_u32.c
766 +index 507859cdd1cb..33294b5b2c6a 100644
767 +--- a/net/sched/cls_u32.c
768 ++++ b/net/sched/cls_u32.c
769 +@@ -544,6 +544,7 @@ static void u32_remove_hw_knode(struct tcf_proto *tp, u32 handle)
770 + static int u32_replace_hw_knode(struct tcf_proto *tp, struct tc_u_knode *n,
771 + u32 flags)
772 + {
773 ++ struct tc_u_hnode *ht = rtnl_dereference(n->ht_down);
774 + struct tcf_block *block = tp->chain->block;
775 + struct tc_cls_u32_offload cls_u32 = {};
776 + bool skip_sw = tc_skip_sw(flags);
777 +@@ -563,7 +564,7 @@ static int u32_replace_hw_knode(struct tcf_proto *tp, struct tc_u_knode *n,
778 + cls_u32.knode.sel = &n->sel;
779 + cls_u32.knode.exts = &n->exts;
780 + if (n->ht_down)
781 +- cls_u32.knode.link_handle = n->ht_down->handle;
782 ++ cls_u32.knode.link_handle = ht->handle;
783 +
784 + err = tc_setup_cb_call(block, NULL, TC_SETUP_CLSU32, &cls_u32, skip_sw);
785 + if (err < 0) {
786 +@@ -840,8 +841,9 @@ static void u32_replace_knode(struct tcf_proto *tp, struct tc_u_common *tp_c,
787 + static struct tc_u_knode *u32_init_knode(struct tcf_proto *tp,
788 + struct tc_u_knode *n)
789 + {
790 +- struct tc_u_knode *new;
791 ++ struct tc_u_hnode *ht = rtnl_dereference(n->ht_down);
792 + struct tc_u32_sel *s = &n->sel;
793 ++ struct tc_u_knode *new;
794 +
795 + new = kzalloc(sizeof(*n) + s->nkeys*sizeof(struct tc_u32_key),
796 + GFP_KERNEL);
797 +@@ -859,11 +861,11 @@ static struct tc_u_knode *u32_init_knode(struct tcf_proto *tp,
798 + new->fshift = n->fshift;
799 + new->res = n->res;
800 + new->flags = n->flags;
801 +- RCU_INIT_POINTER(new->ht_down, n->ht_down);
802 ++ RCU_INIT_POINTER(new->ht_down, ht);
803 +
804 + /* bump reference count as long as we hold pointer to structure */
805 +- if (new->ht_down)
806 +- new->ht_down->refcnt++;
807 ++ if (ht)
808 ++ ht->refcnt++;
809 +
810 + #ifdef CONFIG_CLS_U32_PERF
811 + /* Statistics may be incremented by readers during update