Gentoo Archives: gentoo-commits

From: Alice Ferrazzi <alicef@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:5.4 commit in: /
Date: Wed, 03 Aug 2022 14:51:14
Message-Id: 1659537468.7da5eb1abe210716a88e1a592dbb4b83449ca838.alicef@gentoo
1 commit: 7da5eb1abe210716a88e1a592dbb4b83449ca838
2 Author: Alice Ferrazzi <alicef <AT> gentoo <DOT> org>
3 AuthorDate: Wed Aug 3 14:37:41 2022 +0000
4 Commit: Alice Ferrazzi <alicef <AT> gentoo <DOT> org>
5 CommitDate: Wed Aug 3 14:37:48 2022 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=7da5eb1a
7
8 Linux patch 5.4.209
9
10 Signed-off-by: Alice Ferrazzi <alicef <AT> gentoo.org>
11
12 0000_README | 4 +
13 1208_linux-5.4.209.patch | 1020 ++++++++++++++++++++++++++++++++++++++++++++++
14 2 files changed, 1024 insertions(+)
15
16 diff --git a/0000_README b/0000_README
17 index d4fe1a15..b02651d3 100644
18 --- a/0000_README
19 +++ b/0000_README
20 @@ -875,6 +875,10 @@ Patch: 1207_linux-5.4.208.patch
21 From: http://www.kernel.org
22 Desc: Linux 5.4.208
23
24 +Patch: 1208_linux-5.4.209.patch
25 +From: http://www.kernel.org
26 +Desc: Linux 5.4.209
27 +
28 Patch: 1500_XATTR_USER_PREFIX.patch
29 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
30 Desc: Support for namespace user.pax.* on tmpfs.
31
32 diff --git a/1208_linux-5.4.209.patch b/1208_linux-5.4.209.patch
33 new file mode 100644
34 index 00000000..bb150ac3
35 --- /dev/null
36 +++ b/1208_linux-5.4.209.patch
37 @@ -0,0 +1,1020 @@
38 +diff --git a/Documentation/networking/ip-sysctl.txt b/Documentation/networking/ip-sysctl.txt
39 +index 787a9c077ef1d..5cf601c94e354 100644
40 +--- a/Documentation/networking/ip-sysctl.txt
41 ++++ b/Documentation/networking/ip-sysctl.txt
42 +@@ -2284,7 +2284,14 @@ sctp_rmem - vector of 3 INTEGERs: min, default, max
43 + Default: 4K
44 +
45 + sctp_wmem - vector of 3 INTEGERs: min, default, max
46 +- Currently this tunable has no effect.
47 ++ Only the first value ("min") is used, "default" and "max" are
48 ++ ignored.
49 ++
50 ++ min: Minimum size of send buffer that can be used by SCTP sockets.
51 ++ It is guaranteed to each SCTP socket (but not association) even
52 ++ under moderate memory pressure.
53 ++
54 ++ Default: 4K
55 +
56 + addr_scope_policy - INTEGER
57 + Control IPv4 address scoping - draft-stewart-tsvwg-sctp-ipv4-00
58 +diff --git a/Makefile b/Makefile
59 +index 884a3f314baf8..7093e3b03b9f7 100644
60 +--- a/Makefile
61 ++++ b/Makefile
62 +@@ -1,7 +1,7 @@
63 + # SPDX-License-Identifier: GPL-2.0
64 + VERSION = 5
65 + PATCHLEVEL = 4
66 +-SUBLEVEL = 208
67 ++SUBLEVEL = 209
68 + EXTRAVERSION =
69 + NAME = Kleptomaniac Octopus
70 +
71 +diff --git a/arch/arm/lib/xor-neon.c b/arch/arm/lib/xor-neon.c
72 +index b99dd8e1c93f1..7ba6cf8261626 100644
73 +--- a/arch/arm/lib/xor-neon.c
74 ++++ b/arch/arm/lib/xor-neon.c
75 +@@ -26,8 +26,9 @@ MODULE_LICENSE("GPL");
76 + * While older versions of GCC do not generate incorrect code, they fail to
77 + * recognize the parallel nature of these functions, and emit plain ARM code,
78 + * which is known to be slower than the optimized ARM code in asm-arm/xor.h.
79 ++ *
80 ++ * #warning This code requires at least version 4.6 of GCC
81 + */
82 +-#warning This code requires at least version 4.6 of GCC
83 + #endif
84 +
85 + #pragma GCC diagnostic ignored "-Wunused-variable"
86 +diff --git a/arch/s390/include/asm/archrandom.h b/arch/s390/include/asm/archrandom.h
87 +index 2c6e1c6ecbe78..4120c428dc378 100644
88 +--- a/arch/s390/include/asm/archrandom.h
89 ++++ b/arch/s390/include/asm/archrandom.h
90 +@@ -2,7 +2,7 @@
91 + /*
92 + * Kernel interface for the s390 arch_random_* functions
93 + *
94 +- * Copyright IBM Corp. 2017, 2020
95 ++ * Copyright IBM Corp. 2017, 2022
96 + *
97 + * Author: Harald Freudenberger <freude@××××××.com>
98 + *
99 +@@ -14,6 +14,7 @@
100 + #ifdef CONFIG_ARCH_RANDOM
101 +
102 + #include <linux/static_key.h>
103 ++#include <linux/preempt.h>
104 + #include <linux/atomic.h>
105 + #include <asm/cpacf.h>
106 +
107 +@@ -32,7 +33,8 @@ static inline bool __must_check arch_get_random_int(unsigned int *v)
108 +
109 + static inline bool __must_check arch_get_random_seed_long(unsigned long *v)
110 + {
111 +- if (static_branch_likely(&s390_arch_random_available)) {
112 ++ if (static_branch_likely(&s390_arch_random_available) &&
113 ++ in_task()) {
114 + cpacf_trng(NULL, 0, (u8 *)v, sizeof(*v));
115 + atomic64_add(sizeof(*v), &s390_arch_random_counter);
116 + return true;
117 +@@ -42,7 +44,8 @@ static inline bool __must_check arch_get_random_seed_long(unsigned long *v)
118 +
119 + static inline bool __must_check arch_get_random_seed_int(unsigned int *v)
120 + {
121 +- if (static_branch_likely(&s390_arch_random_available)) {
122 ++ if (static_branch_likely(&s390_arch_random_available) &&
123 ++ in_task()) {
124 + cpacf_trng(NULL, 0, (u8 *)v, sizeof(*v));
125 + atomic64_add(sizeof(*v), &s390_arch_random_counter);
126 + return true;
127 +diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
128 +index 0610d344fdbf0..637f6ed78b489 100644
129 +--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
130 ++++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
131 +@@ -1821,11 +1821,15 @@ static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi,
132 + * non-zero req_queue_pairs says that user requested a new
133 + * queue count via ethtool's set_channels, so use this
134 + * value for queues distribution across traffic classes
135 ++ * We need at least one queue pair for the interface
136 ++ * to be usable as we see in else statement.
137 + */
138 + if (vsi->req_queue_pairs > 0)
139 + vsi->num_queue_pairs = vsi->req_queue_pairs;
140 + else if (pf->flags & I40E_FLAG_MSIX_ENABLED)
141 + vsi->num_queue_pairs = pf->num_lan_msix;
142 ++ else
143 ++ vsi->num_queue_pairs = 1;
144 + }
145 +
146 + /* Number of queues per enabled TC */
147 +diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c
148 +index b297a3ca22fc8..83678120573ec 100644
149 +--- a/drivers/net/ethernet/intel/ice/ice_ethtool.c
150 ++++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c
151 +@@ -619,7 +619,8 @@ static int ice_lbtest_receive_frames(struct ice_ring *rx_ring)
152 + rx_desc = ICE_RX_DESC(rx_ring, i);
153 +
154 + if (!(rx_desc->wb.status_error0 &
155 +- cpu_to_le16(ICE_TX_DESC_CMD_EOP | ICE_TX_DESC_CMD_RS)))
156 ++ (cpu_to_le16(BIT(ICE_RX_FLEX_DESC_STATUS0_DD_S)) |
157 ++ cpu_to_le16(BIT(ICE_RX_FLEX_DESC_STATUS0_EOF_S)))))
158 + continue;
159 +
160 + rx_buf = &rx_ring->rx_buf[i];
161 +diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
162 +index 88750a96cb3f2..7d28563ab7946 100644
163 +--- a/drivers/net/ethernet/intel/ice/ice_main.c
164 ++++ b/drivers/net/ethernet/intel/ice/ice_main.c
165 +@@ -3495,10 +3495,12 @@ int ice_vsi_cfg(struct ice_vsi *vsi)
166 + if (vsi->netdev) {
167 + ice_set_rx_mode(vsi->netdev);
168 +
169 +- err = ice_vsi_vlan_setup(vsi);
170 ++ if (vsi->type != ICE_VSI_LB) {
171 ++ err = ice_vsi_vlan_setup(vsi);
172 +
173 +- if (err)
174 +- return err;
175 ++ if (err)
176 ++ return err;
177 ++ }
178 + }
179 + ice_vsi_cfg_dcb_rings(vsi);
180 +
181 +diff --git a/drivers/net/ethernet/sfc/ptp.c b/drivers/net/ethernet/sfc/ptp.c
182 +index 1fa1b71dbfa11..ed1140ecca603 100644
183 +--- a/drivers/net/ethernet/sfc/ptp.c
184 ++++ b/drivers/net/ethernet/sfc/ptp.c
185 +@@ -1093,7 +1093,29 @@ static void efx_ptp_xmit_skb_queue(struct efx_nic *efx, struct sk_buff *skb)
186 +
187 + tx_queue = &ptp_data->channel->tx_queue[type];
188 + if (tx_queue && tx_queue->timestamping) {
189 ++ /* This code invokes normal driver TX code which is always
190 ++ * protected from softirqs when called from generic TX code,
191 ++ * which in turn disables preemption. Look at __dev_queue_xmit
192 ++ * which uses rcu_read_lock_bh disabling preemption for RCU
193 ++ * plus disabling softirqs. We do not need RCU reader
194 ++ * protection here.
195 ++ *
196 ++ * Although it is theoretically safe for current PTP TX/RX code
197 ++ * running without disabling softirqs, there are three good
198 ++ * reasond for doing so:
199 ++ *
200 ++ * 1) The code invoked is mainly implemented for non-PTP
201 ++ * packets and it is always executed with softirqs
202 ++ * disabled.
203 ++ * 2) This being a single PTP packet, better to not
204 ++ * interrupt its processing by softirqs which can lead
205 ++ * to high latencies.
206 ++ * 3) netdev_xmit_more checks preemption is disabled and
207 ++ * triggers a BUG_ON if not.
208 ++ */
209 ++ local_bh_disable();
210 + efx_enqueue_skb(tx_queue, skb);
211 ++ local_bh_enable();
212 + } else {
213 + WARN_ONCE(1, "PTP channel has no timestamped tx queue\n");
214 + dev_kfree_skb_any(skb);
215 +diff --git a/drivers/net/sungem_phy.c b/drivers/net/sungem_phy.c
216 +index 291fa449993fb..45f295403cb55 100644
217 +--- a/drivers/net/sungem_phy.c
218 ++++ b/drivers/net/sungem_phy.c
219 +@@ -454,6 +454,7 @@ static int bcm5421_init(struct mii_phy* phy)
220 + int can_low_power = 1;
221 + if (np == NULL || of_get_property(np, "no-autolowpower", NULL))
222 + can_low_power = 0;
223 ++ of_node_put(np);
224 + if (can_low_power) {
225 + /* Enable automatic low-power */
226 + sungem_phy_write(phy, 0x1c, 0x9002);
227 +diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
228 +index e14842fbe3d62..579df7c5411d3 100644
229 +--- a/drivers/net/virtio_net.c
230 ++++ b/drivers/net/virtio_net.c
231 +@@ -213,9 +213,15 @@ struct virtnet_info {
232 + /* Packet virtio header size */
233 + u8 hdr_len;
234 +
235 +- /* Work struct for refilling if we run low on memory. */
236 ++ /* Work struct for delayed refilling if we run low on memory. */
237 + struct delayed_work refill;
238 +
239 ++ /* Is delayed refill enabled? */
240 ++ bool refill_enabled;
241 ++
242 ++ /* The lock to synchronize the access to refill_enabled */
243 ++ spinlock_t refill_lock;
244 ++
245 + /* Work struct for config space updates */
246 + struct work_struct config_work;
247 +
248 +@@ -319,6 +325,20 @@ static struct page *get_a_page(struct receive_queue *rq, gfp_t gfp_mask)
249 + return p;
250 + }
251 +
252 ++static void enable_delayed_refill(struct virtnet_info *vi)
253 ++{
254 ++ spin_lock_bh(&vi->refill_lock);
255 ++ vi->refill_enabled = true;
256 ++ spin_unlock_bh(&vi->refill_lock);
257 ++}
258 ++
259 ++static void disable_delayed_refill(struct virtnet_info *vi)
260 ++{
261 ++ spin_lock_bh(&vi->refill_lock);
262 ++ vi->refill_enabled = false;
263 ++ spin_unlock_bh(&vi->refill_lock);
264 ++}
265 ++
266 + static void virtqueue_napi_schedule(struct napi_struct *napi,
267 + struct virtqueue *vq)
268 + {
269 +@@ -1388,8 +1408,12 @@ static int virtnet_receive(struct receive_queue *rq, int budget,
270 + }
271 +
272 + if (rq->vq->num_free > min((unsigned int)budget, virtqueue_get_vring_size(rq->vq)) / 2) {
273 +- if (!try_fill_recv(vi, rq, GFP_ATOMIC))
274 +- schedule_delayed_work(&vi->refill, 0);
275 ++ if (!try_fill_recv(vi, rq, GFP_ATOMIC)) {
276 ++ spin_lock(&vi->refill_lock);
277 ++ if (vi->refill_enabled)
278 ++ schedule_delayed_work(&vi->refill, 0);
279 ++ spin_unlock(&vi->refill_lock);
280 ++ }
281 + }
282 +
283 + u64_stats_update_begin(&rq->stats.syncp);
284 +@@ -1508,6 +1532,8 @@ static int virtnet_open(struct net_device *dev)
285 + struct virtnet_info *vi = netdev_priv(dev);
286 + int i, err;
287 +
288 ++ enable_delayed_refill(vi);
289 ++
290 + for (i = 0; i < vi->max_queue_pairs; i++) {
291 + if (i < vi->curr_queue_pairs)
292 + /* Make sure we have some buffers: if oom use wq. */
293 +@@ -1878,6 +1904,8 @@ static int virtnet_close(struct net_device *dev)
294 + struct virtnet_info *vi = netdev_priv(dev);
295 + int i;
296 +
297 ++ /* Make sure NAPI doesn't schedule refill work */
298 ++ disable_delayed_refill(vi);
299 + /* Make sure refill_work doesn't re-enable napi! */
300 + cancel_delayed_work_sync(&vi->refill);
301 +
302 +@@ -2417,6 +2445,8 @@ static int virtnet_restore_up(struct virtio_device *vdev)
303 +
304 + virtio_device_ready(vdev);
305 +
306 ++ enable_delayed_refill(vi);
307 ++
308 + if (netif_running(vi->dev)) {
309 + err = virtnet_open(vi->dev);
310 + if (err)
311 +@@ -3140,6 +3170,7 @@ static int virtnet_probe(struct virtio_device *vdev)
312 + vdev->priv = vi;
313 +
314 + INIT_WORK(&vi->config_work, virtnet_config_changed_work);
315 ++ spin_lock_init(&vi->refill_lock);
316 +
317 + /* If we can receive ANY GSO packets, we must allocate large ones. */
318 + if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO4) ||
319 +diff --git a/drivers/net/wireless/mediatek/mt7601u/usb.c b/drivers/net/wireless/mediatek/mt7601u/usb.c
320 +index 6bcc4a13ae6c7..cc772045d526f 100644
321 +--- a/drivers/net/wireless/mediatek/mt7601u/usb.c
322 ++++ b/drivers/net/wireless/mediatek/mt7601u/usb.c
323 +@@ -26,6 +26,7 @@ static const struct usb_device_id mt7601u_device_table[] = {
324 + { USB_DEVICE(0x2717, 0x4106) },
325 + { USB_DEVICE(0x2955, 0x0001) },
326 + { USB_DEVICE(0x2955, 0x1001) },
327 ++ { USB_DEVICE(0x2955, 0x1003) },
328 + { USB_DEVICE(0x2a5f, 0x1000) },
329 + { USB_DEVICE(0x7392, 0x7710) },
330 + { 0, }
331 +diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
332 +index 8e6d7ba95df14..98e363d0025b4 100644
333 +--- a/drivers/scsi/scsi_lib.c
334 ++++ b/drivers/scsi/scsi_lib.c
335 +@@ -1719,8 +1719,7 @@ out_put_budget:
336 + case BLK_STS_OK:
337 + break;
338 + case BLK_STS_RESOURCE:
339 +- if (atomic_read(&sdev->device_busy) ||
340 +- scsi_device_blocked(sdev))
341 ++ if (scsi_device_blocked(sdev))
342 + ret = BLK_STS_DEV_RESOURCE;
343 + break;
344 + default:
345 +diff --git a/drivers/scsi/ufs/ufshcd-pltfrm.c b/drivers/scsi/ufs/ufshcd-pltfrm.c
346 +index 10eec501f6b39..bfc589f4baf53 100644
347 +--- a/drivers/scsi/ufs/ufshcd-pltfrm.c
348 ++++ b/drivers/scsi/ufs/ufshcd-pltfrm.c
349 +@@ -125,9 +125,20 @@ out:
350 + return ret;
351 + }
352 +
353 ++static bool phandle_exists(const struct device_node *np,
354 ++ const char *phandle_name, int index)
355 ++{
356 ++ struct device_node *parse_np = of_parse_phandle(np, phandle_name, index);
357 ++
358 ++ if (parse_np)
359 ++ of_node_put(parse_np);
360 ++
361 ++ return parse_np != NULL;
362 ++}
363 ++
364 + #define MAX_PROP_SIZE 32
365 + static int ufshcd_populate_vreg(struct device *dev, const char *name,
366 +- struct ufs_vreg **out_vreg)
367 ++ struct ufs_vreg **out_vreg)
368 + {
369 + int ret = 0;
370 + char prop_name[MAX_PROP_SIZE];
371 +@@ -140,7 +151,7 @@ static int ufshcd_populate_vreg(struct device *dev, const char *name,
372 + }
373 +
374 + snprintf(prop_name, MAX_PROP_SIZE, "%s-supply", name);
375 +- if (!of_parse_phandle(np, prop_name, 0)) {
376 ++ if (!phandle_exists(np, prop_name, 0)) {
377 + dev_info(dev, "%s: Unable to find %s regulator, assuming enabled\n",
378 + __func__, prop_name);
379 + goto out;
380 +diff --git a/fs/ntfs/attrib.c b/fs/ntfs/attrib.c
381 +index d563abc3e1364..914e991731300 100644
382 +--- a/fs/ntfs/attrib.c
383 ++++ b/fs/ntfs/attrib.c
384 +@@ -592,8 +592,12 @@ static int ntfs_attr_find(const ATTR_TYPE type, const ntfschar *name,
385 + a = (ATTR_RECORD*)((u8*)ctx->attr +
386 + le32_to_cpu(ctx->attr->length));
387 + for (;; a = (ATTR_RECORD*)((u8*)a + le32_to_cpu(a->length))) {
388 +- if ((u8*)a < (u8*)ctx->mrec || (u8*)a > (u8*)ctx->mrec +
389 +- le32_to_cpu(ctx->mrec->bytes_allocated))
390 ++ u8 *mrec_end = (u8 *)ctx->mrec +
391 ++ le32_to_cpu(ctx->mrec->bytes_allocated);
392 ++ u8 *name_end = (u8 *)a + le16_to_cpu(a->name_offset) +
393 ++ a->name_length * sizeof(ntfschar);
394 ++ if ((u8*)a < (u8*)ctx->mrec || (u8*)a > mrec_end ||
395 ++ name_end > mrec_end)
396 + break;
397 + ctx->attr = a;
398 + if (unlikely(le32_to_cpu(a->type) > le32_to_cpu(type) ||
399 +diff --git a/include/net/addrconf.h b/include/net/addrconf.h
400 +index 8d90fb9184e8a..880e609b7352a 100644
401 +--- a/include/net/addrconf.h
402 ++++ b/include/net/addrconf.h
403 +@@ -399,6 +399,9 @@ static inline bool ip6_ignore_linkdown(const struct net_device *dev)
404 + {
405 + const struct inet6_dev *idev = __in6_dev_get(dev);
406 +
407 ++ if (unlikely(!idev))
408 ++ return true;
409 ++
410 + return !!idev->cnf.ignore_routes_with_linkdown;
411 + }
412 +
413 +diff --git a/include/net/bluetooth/l2cap.h b/include/net/bluetooth/l2cap.h
414 +index 8efc2419a815f..b2046b02d11d6 100644
415 +--- a/include/net/bluetooth/l2cap.h
416 ++++ b/include/net/bluetooth/l2cap.h
417 +@@ -802,6 +802,7 @@ enum {
418 + };
419 +
420 + void l2cap_chan_hold(struct l2cap_chan *c);
421 ++struct l2cap_chan *l2cap_chan_hold_unless_zero(struct l2cap_chan *c);
422 + void l2cap_chan_put(struct l2cap_chan *c);
423 +
424 + static inline void l2cap_chan_lock(struct l2cap_chan *chan)
425 +diff --git a/include/net/tcp.h b/include/net/tcp.h
426 +index aaf1d5d5a13b0..8459145497b74 100644
427 +--- a/include/net/tcp.h
428 ++++ b/include/net/tcp.h
429 +@@ -1389,7 +1389,7 @@ void tcp_select_initial_window(const struct sock *sk, int __space,
430 +
431 + static inline int tcp_win_from_space(const struct sock *sk, int space)
432 + {
433 +- int tcp_adv_win_scale = sock_net(sk)->ipv4.sysctl_tcp_adv_win_scale;
434 ++ int tcp_adv_win_scale = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_adv_win_scale);
435 +
436 + return tcp_adv_win_scale <= 0 ?
437 + (space>>(-tcp_adv_win_scale)) :
438 +diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
439 +index 959a16b133033..286fca6a9ab2a 100644
440 +--- a/net/bluetooth/l2cap_core.c
441 ++++ b/net/bluetooth/l2cap_core.c
442 +@@ -110,7 +110,8 @@ static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn,
443 + }
444 +
445 + /* Find channel with given SCID.
446 +- * Returns locked channel. */
447 ++ * Returns a reference locked channel.
448 ++ */
449 + static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn,
450 + u16 cid)
451 + {
452 +@@ -118,15 +119,19 @@ static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn,
453 +
454 + mutex_lock(&conn->chan_lock);
455 + c = __l2cap_get_chan_by_scid(conn, cid);
456 +- if (c)
457 +- l2cap_chan_lock(c);
458 ++ if (c) {
459 ++ /* Only lock if chan reference is not 0 */
460 ++ c = l2cap_chan_hold_unless_zero(c);
461 ++ if (c)
462 ++ l2cap_chan_lock(c);
463 ++ }
464 + mutex_unlock(&conn->chan_lock);
465 +
466 + return c;
467 + }
468 +
469 + /* Find channel with given DCID.
470 +- * Returns locked channel.
471 ++ * Returns a reference locked channel.
472 + */
473 + static struct l2cap_chan *l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
474 + u16 cid)
475 +@@ -135,8 +140,12 @@ static struct l2cap_chan *l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
476 +
477 + mutex_lock(&conn->chan_lock);
478 + c = __l2cap_get_chan_by_dcid(conn, cid);
479 +- if (c)
480 +- l2cap_chan_lock(c);
481 ++ if (c) {
482 ++ /* Only lock if chan reference is not 0 */
483 ++ c = l2cap_chan_hold_unless_zero(c);
484 ++ if (c)
485 ++ l2cap_chan_lock(c);
486 ++ }
487 + mutex_unlock(&conn->chan_lock);
488 +
489 + return c;
490 +@@ -161,8 +170,12 @@ static struct l2cap_chan *l2cap_get_chan_by_ident(struct l2cap_conn *conn,
491 +
492 + mutex_lock(&conn->chan_lock);
493 + c = __l2cap_get_chan_by_ident(conn, ident);
494 +- if (c)
495 +- l2cap_chan_lock(c);
496 ++ if (c) {
497 ++ /* Only lock if chan reference is not 0 */
498 ++ c = l2cap_chan_hold_unless_zero(c);
499 ++ if (c)
500 ++ l2cap_chan_lock(c);
501 ++ }
502 + mutex_unlock(&conn->chan_lock);
503 +
504 + return c;
505 +@@ -496,6 +509,16 @@ void l2cap_chan_hold(struct l2cap_chan *c)
506 + kref_get(&c->kref);
507 + }
508 +
509 ++struct l2cap_chan *l2cap_chan_hold_unless_zero(struct l2cap_chan *c)
510 ++{
511 ++ BT_DBG("chan %p orig refcnt %u", c, kref_read(&c->kref));
512 ++
513 ++ if (!kref_get_unless_zero(&c->kref))
514 ++ return NULL;
515 ++
516 ++ return c;
517 ++}
518 ++
519 + void l2cap_chan_put(struct l2cap_chan *c)
520 + {
521 + BT_DBG("chan %p orig refcnt %d", c, kref_read(&c->kref));
522 +@@ -1812,7 +1835,10 @@ static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm,
523 + src_match = !bacmp(&c->src, src);
524 + dst_match = !bacmp(&c->dst, dst);
525 + if (src_match && dst_match) {
526 +- l2cap_chan_hold(c);
527 ++ c = l2cap_chan_hold_unless_zero(c);
528 ++ if (!c)
529 ++ continue;
530 ++
531 + read_unlock(&chan_list_lock);
532 + return c;
533 + }
534 +@@ -1827,7 +1853,7 @@ static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm,
535 + }
536 +
537 + if (c1)
538 +- l2cap_chan_hold(c1);
539 ++ c1 = l2cap_chan_hold_unless_zero(c1);
540 +
541 + read_unlock(&chan_list_lock);
542 +
543 +@@ -4221,6 +4247,7 @@ static inline int l2cap_config_req(struct l2cap_conn *conn,
544 +
545 + unlock:
546 + l2cap_chan_unlock(chan);
547 ++ l2cap_chan_put(chan);
548 + return err;
549 + }
550 +
551 +@@ -4334,6 +4361,7 @@ static inline int l2cap_config_rsp(struct l2cap_conn *conn,
552 +
553 + done:
554 + l2cap_chan_unlock(chan);
555 ++ l2cap_chan_put(chan);
556 + return err;
557 + }
558 +
559 +@@ -5062,6 +5090,7 @@ send_move_response:
560 + l2cap_send_move_chan_rsp(chan, result);
561 +
562 + l2cap_chan_unlock(chan);
563 ++ l2cap_chan_put(chan);
564 +
565 + return 0;
566 + }
567 +@@ -5154,6 +5183,7 @@ static void l2cap_move_continue(struct l2cap_conn *conn, u16 icid, u16 result)
568 + }
569 +
570 + l2cap_chan_unlock(chan);
571 ++ l2cap_chan_put(chan);
572 + }
573 +
574 + static void l2cap_move_fail(struct l2cap_conn *conn, u8 ident, u16 icid,
575 +@@ -5183,6 +5213,7 @@ static void l2cap_move_fail(struct l2cap_conn *conn, u8 ident, u16 icid,
576 + l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
577 +
578 + l2cap_chan_unlock(chan);
579 ++ l2cap_chan_put(chan);
580 + }
581 +
582 + static int l2cap_move_channel_rsp(struct l2cap_conn *conn,
583 +@@ -5246,6 +5277,7 @@ static int l2cap_move_channel_confirm(struct l2cap_conn *conn,
584 + l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
585 +
586 + l2cap_chan_unlock(chan);
587 ++ l2cap_chan_put(chan);
588 +
589 + return 0;
590 + }
591 +@@ -5281,6 +5313,7 @@ static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn *conn,
592 + }
593 +
594 + l2cap_chan_unlock(chan);
595 ++ l2cap_chan_put(chan);
596 +
597 + return 0;
598 + }
599 +@@ -5653,12 +5686,11 @@ static inline int l2cap_le_credits(struct l2cap_conn *conn,
600 + if (credits > max_credits) {
601 + BT_ERR("LE credits overflow");
602 + l2cap_send_disconn_req(chan, ECONNRESET);
603 +- l2cap_chan_unlock(chan);
604 +
605 + /* Return 0 so that we don't trigger an unnecessary
606 + * command reject packet.
607 + */
608 +- return 0;
609 ++ goto unlock;
610 + }
611 +
612 + chan->tx_credits += credits;
613 +@@ -5669,7 +5701,9 @@ static inline int l2cap_le_credits(struct l2cap_conn *conn,
614 + if (chan->tx_credits)
615 + chan->ops->resume(chan);
616 +
617 ++unlock:
618 + l2cap_chan_unlock(chan);
619 ++ l2cap_chan_put(chan);
620 +
621 + return 0;
622 + }
623 +@@ -6983,6 +7017,7 @@ drop:
624 +
625 + done:
626 + l2cap_chan_unlock(chan);
627 ++ l2cap_chan_put(chan);
628 + }
629 +
630 + static void l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm,
631 +@@ -7386,7 +7421,7 @@ static struct l2cap_chan *l2cap_global_fixed_chan(struct l2cap_chan *c,
632 + if (src_type != c->src_type)
633 + continue;
634 +
635 +- l2cap_chan_hold(c);
636 ++ c = l2cap_chan_hold_unless_zero(c);
637 + read_unlock(&chan_list_lock);
638 + return c;
639 + }
640 +diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
641 +index 660b41040c771..1023f881091ef 100644
642 +--- a/net/ipv4/igmp.c
643 ++++ b/net/ipv4/igmp.c
644 +@@ -829,7 +829,7 @@ static void igmp_ifc_event(struct in_device *in_dev)
645 + struct net *net = dev_net(in_dev->dev);
646 + if (IGMP_V1_SEEN(in_dev) || IGMP_V2_SEEN(in_dev))
647 + return;
648 +- WRITE_ONCE(in_dev->mr_ifc_count, in_dev->mr_qrv ?: net->ipv4.sysctl_igmp_qrv);
649 ++ WRITE_ONCE(in_dev->mr_ifc_count, in_dev->mr_qrv ?: READ_ONCE(net->ipv4.sysctl_igmp_qrv));
650 + igmp_ifc_start_timer(in_dev, 1);
651 + }
652 +
653 +@@ -1011,7 +1011,7 @@ static bool igmp_heard_query(struct in_device *in_dev, struct sk_buff *skb,
654 + * received value was zero, use the default or statically
655 + * configured value.
656 + */
657 +- in_dev->mr_qrv = ih3->qrv ?: net->ipv4.sysctl_igmp_qrv;
658 ++ in_dev->mr_qrv = ih3->qrv ?: READ_ONCE(net->ipv4.sysctl_igmp_qrv);
659 + in_dev->mr_qi = IGMPV3_QQIC(ih3->qqic)*HZ ?: IGMP_QUERY_INTERVAL;
660 +
661 + /* RFC3376, 8.3. Query Response Interval:
662 +@@ -1191,7 +1191,7 @@ static void igmpv3_add_delrec(struct in_device *in_dev, struct ip_mc_list *im,
663 + pmc->interface = im->interface;
664 + in_dev_hold(in_dev);
665 + pmc->multiaddr = im->multiaddr;
666 +- pmc->crcount = in_dev->mr_qrv ?: net->ipv4.sysctl_igmp_qrv;
667 ++ pmc->crcount = in_dev->mr_qrv ?: READ_ONCE(net->ipv4.sysctl_igmp_qrv);
668 + pmc->sfmode = im->sfmode;
669 + if (pmc->sfmode == MCAST_INCLUDE) {
670 + struct ip_sf_list *psf;
671 +@@ -1242,9 +1242,11 @@ static void igmpv3_del_delrec(struct in_device *in_dev, struct ip_mc_list *im)
672 + swap(im->tomb, pmc->tomb);
673 + swap(im->sources, pmc->sources);
674 + for (psf = im->sources; psf; psf = psf->sf_next)
675 +- psf->sf_crcount = in_dev->mr_qrv ?: net->ipv4.sysctl_igmp_qrv;
676 ++ psf->sf_crcount = in_dev->mr_qrv ?:
677 ++ READ_ONCE(net->ipv4.sysctl_igmp_qrv);
678 + } else {
679 +- im->crcount = in_dev->mr_qrv ?: net->ipv4.sysctl_igmp_qrv;
680 ++ im->crcount = in_dev->mr_qrv ?:
681 ++ READ_ONCE(net->ipv4.sysctl_igmp_qrv);
682 + }
683 + in_dev_put(pmc->interface);
684 + kfree_pmc(pmc);
685 +@@ -1351,7 +1353,7 @@ static void igmp_group_added(struct ip_mc_list *im)
686 + if (in_dev->dead)
687 + return;
688 +
689 +- im->unsolicit_count = net->ipv4.sysctl_igmp_qrv;
690 ++ im->unsolicit_count = READ_ONCE(net->ipv4.sysctl_igmp_qrv);
691 + if (IGMP_V1_SEEN(in_dev) || IGMP_V2_SEEN(in_dev)) {
692 + spin_lock_bh(&im->lock);
693 + igmp_start_timer(im, IGMP_INITIAL_REPORT_DELAY);
694 +@@ -1365,7 +1367,7 @@ static void igmp_group_added(struct ip_mc_list *im)
695 + * IN() to IN(A).
696 + */
697 + if (im->sfmode == MCAST_EXCLUDE)
698 +- im->crcount = in_dev->mr_qrv ?: net->ipv4.sysctl_igmp_qrv;
699 ++ im->crcount = in_dev->mr_qrv ?: READ_ONCE(net->ipv4.sysctl_igmp_qrv);
700 +
701 + igmp_ifc_event(in_dev);
702 + #endif
703 +@@ -1756,7 +1758,7 @@ static void ip_mc_reset(struct in_device *in_dev)
704 +
705 + in_dev->mr_qi = IGMP_QUERY_INTERVAL;
706 + in_dev->mr_qri = IGMP_QUERY_RESPONSE_INTERVAL;
707 +- in_dev->mr_qrv = net->ipv4.sysctl_igmp_qrv;
708 ++ in_dev->mr_qrv = READ_ONCE(net->ipv4.sysctl_igmp_qrv);
709 + }
710 + #else
711 + static void ip_mc_reset(struct in_device *in_dev)
712 +@@ -1890,7 +1892,7 @@ static int ip_mc_del1_src(struct ip_mc_list *pmc, int sfmode,
713 + #ifdef CONFIG_IP_MULTICAST
714 + if (psf->sf_oldin &&
715 + !IGMP_V1_SEEN(in_dev) && !IGMP_V2_SEEN(in_dev)) {
716 +- psf->sf_crcount = in_dev->mr_qrv ?: net->ipv4.sysctl_igmp_qrv;
717 ++ psf->sf_crcount = in_dev->mr_qrv ?: READ_ONCE(net->ipv4.sysctl_igmp_qrv);
718 + psf->sf_next = pmc->tomb;
719 + pmc->tomb = psf;
720 + rv = 1;
721 +@@ -1954,7 +1956,7 @@ static int ip_mc_del_src(struct in_device *in_dev, __be32 *pmca, int sfmode,
722 + /* filter mode change */
723 + pmc->sfmode = MCAST_INCLUDE;
724 + #ifdef CONFIG_IP_MULTICAST
725 +- pmc->crcount = in_dev->mr_qrv ?: net->ipv4.sysctl_igmp_qrv;
726 ++ pmc->crcount = in_dev->mr_qrv ?: READ_ONCE(net->ipv4.sysctl_igmp_qrv);
727 + WRITE_ONCE(in_dev->mr_ifc_count, pmc->crcount);
728 + for (psf = pmc->sources; psf; psf = psf->sf_next)
729 + psf->sf_crcount = 0;
730 +@@ -2133,7 +2135,7 @@ static int ip_mc_add_src(struct in_device *in_dev, __be32 *pmca, int sfmode,
731 + #ifdef CONFIG_IP_MULTICAST
732 + /* else no filters; keep old mode for reports */
733 +
734 +- pmc->crcount = in_dev->mr_qrv ?: net->ipv4.sysctl_igmp_qrv;
735 ++ pmc->crcount = in_dev->mr_qrv ?: READ_ONCE(net->ipv4.sysctl_igmp_qrv);
736 + WRITE_ONCE(in_dev->mr_ifc_count, pmc->crcount);
737 + for (psf = pmc->sources; psf; psf = psf->sf_next)
738 + psf->sf_crcount = 0;
739 +diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
740 +index 4b31f6e9ec61f..0a570d5d0b38f 100644
741 +--- a/net/ipv4/tcp.c
742 ++++ b/net/ipv4/tcp.c
743 +@@ -697,7 +697,7 @@ static bool tcp_should_autocork(struct sock *sk, struct sk_buff *skb,
744 + int size_goal)
745 + {
746 + return skb->len < size_goal &&
747 +- sock_net(sk)->ipv4.sysctl_tcp_autocorking &&
748 ++ READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_autocorking) &&
749 + !tcp_rtx_queue_empty(sk) &&
750 + refcount_read(&sk->sk_wmem_alloc) > skb->truesize;
751 + }
752 +diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
753 +index c151c4dd4ae63..f4e00ff909da3 100644
754 +--- a/net/ipv4/tcp_input.c
755 ++++ b/net/ipv4/tcp_input.c
756 +@@ -439,7 +439,7 @@ static void tcp_grow_window(struct sock *sk, const struct sk_buff *skb)
757 + */
758 + void tcp_init_buffer_space(struct sock *sk)
759 + {
760 +- int tcp_app_win = sock_net(sk)->ipv4.sysctl_tcp_app_win;
761 ++ int tcp_app_win = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_app_win);
762 + struct tcp_sock *tp = tcp_sk(sk);
763 + int maxwin;
764 +
765 +@@ -2030,7 +2030,7 @@ void tcp_enter_loss(struct sock *sk)
766 + * loss recovery is underway except recurring timeout(s) on
767 + * the same SND.UNA (sec 3.2). Disable F-RTO on path MTU probing
768 + */
769 +- tp->frto = net->ipv4.sysctl_tcp_frto &&
770 ++ tp->frto = READ_ONCE(net->ipv4.sysctl_tcp_frto) &&
771 + (new_recovery || icsk->icsk_retransmits) &&
772 + !inet_csk(sk)->icsk_mtup.probe_size;
773 + }
774 +@@ -2914,7 +2914,7 @@ static void tcp_fastretrans_alert(struct sock *sk, const u32 prior_snd_una,
775 +
776 + static void tcp_update_rtt_min(struct sock *sk, u32 rtt_us, const int flag)
777 + {
778 +- u32 wlen = sock_net(sk)->ipv4.sysctl_tcp_min_rtt_wlen * HZ;
779 ++ u32 wlen = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_min_rtt_wlen) * HZ;
780 + struct tcp_sock *tp = tcp_sk(sk);
781 +
782 + if ((flag & FLAG_ACK_MAYBE_DELAYED) && rtt_us > tcp_min_rtt(tp)) {
783 +@@ -3436,7 +3436,8 @@ static bool __tcp_oow_rate_limited(struct net *net, int mib_idx,
784 + if (*last_oow_ack_time) {
785 + s32 elapsed = (s32)(tcp_jiffies32 - *last_oow_ack_time);
786 +
787 +- if (0 <= elapsed && elapsed < net->ipv4.sysctl_tcp_invalid_ratelimit) {
788 ++ if (0 <= elapsed &&
789 ++ elapsed < READ_ONCE(net->ipv4.sysctl_tcp_invalid_ratelimit)) {
790 + NET_INC_STATS(net, mib_idx);
791 + return true; /* rate-limited: don't send yet! */
792 + }
793 +@@ -3484,7 +3485,7 @@ static void tcp_send_challenge_ack(struct sock *sk, const struct sk_buff *skb)
794 + /* Then check host-wide RFC 5961 rate limit. */
795 + now = jiffies / HZ;
796 + if (now != challenge_timestamp) {
797 +- u32 ack_limit = net->ipv4.sysctl_tcp_challenge_ack_limit;
798 ++ u32 ack_limit = READ_ONCE(net->ipv4.sysctl_tcp_challenge_ack_limit);
799 + u32 half = (ack_limit + 1) >> 1;
800 +
801 + challenge_timestamp = now;
802 +@@ -4260,7 +4261,7 @@ static void tcp_dsack_set(struct sock *sk, u32 seq, u32 end_seq)
803 + {
804 + struct tcp_sock *tp = tcp_sk(sk);
805 +
806 +- if (tcp_is_sack(tp) && sock_net(sk)->ipv4.sysctl_tcp_dsack) {
807 ++ if (tcp_is_sack(tp) && READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_dsack)) {
808 + int mib_idx;
809 +
810 + if (before(seq, tp->rcv_nxt))
811 +@@ -4306,7 +4307,7 @@ static void tcp_send_dupack(struct sock *sk, const struct sk_buff *skb)
812 + NET_INC_STATS(sock_net(sk), LINUX_MIB_DELAYEDACKLOST);
813 + tcp_enter_quickack_mode(sk, TCP_MAX_QUICKACKS);
814 +
815 +- if (tcp_is_sack(tp) && sock_net(sk)->ipv4.sysctl_tcp_dsack) {
816 ++ if (tcp_is_sack(tp) && READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_dsack)) {
817 + u32 end_seq = TCP_SKB_CB(skb)->end_seq;
818 +
819 + tcp_rcv_spurious_retrans(sk, skb);
820 +@@ -5302,7 +5303,7 @@ send_now:
821 + }
822 +
823 + if (!tcp_is_sack(tp) ||
824 +- tp->compressed_ack >= sock_net(sk)->ipv4.sysctl_tcp_comp_sack_nr)
825 ++ tp->compressed_ack >= READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_comp_sack_nr))
826 + goto send_now;
827 +
828 + if (tp->compressed_ack_rcv_nxt != tp->rcv_nxt) {
829 +@@ -5325,7 +5326,8 @@ send_now:
830 + if (tp->srtt_us && tp->srtt_us < rtt)
831 + rtt = tp->srtt_us;
832 +
833 +- delay = min_t(unsigned long, sock_net(sk)->ipv4.sysctl_tcp_comp_sack_delay_ns,
834 ++ delay = min_t(unsigned long,
835 ++ READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_comp_sack_delay_ns),
836 + rtt * (NSEC_PER_USEC >> 3)/20);
837 + sock_hold(sk);
838 + hrtimer_start(&tp->compressed_ack_timer, ns_to_ktime(delay),
839 +diff --git a/net/ipv4/tcp_metrics.c b/net/ipv4/tcp_metrics.c
840 +index 9a7d8a5998578..0af6249a993af 100644
841 +--- a/net/ipv4/tcp_metrics.c
842 ++++ b/net/ipv4/tcp_metrics.c
843 +@@ -329,7 +329,7 @@ void tcp_update_metrics(struct sock *sk)
844 + int m;
845 +
846 + sk_dst_confirm(sk);
847 +- if (net->ipv4.sysctl_tcp_nometrics_save || !dst)
848 ++ if (READ_ONCE(net->ipv4.sysctl_tcp_nometrics_save) || !dst)
849 + return;
850 +
851 + rcu_read_lock();
852 +diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
853 +index 97f29ece38000..ef749a47768a5 100644
854 +--- a/net/ipv4/tcp_output.c
855 ++++ b/net/ipv4/tcp_output.c
856 +@@ -1761,7 +1761,7 @@ static u32 tcp_tso_segs(struct sock *sk, unsigned int mss_now)
857 +
858 + min_tso = ca_ops->min_tso_segs ?
859 + ca_ops->min_tso_segs(sk) :
860 +- sock_net(sk)->ipv4.sysctl_tcp_min_tso_segs;
861 ++ READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_min_tso_segs);
862 +
863 + tso_segs = tcp_tso_autosize(sk, mss_now, min_tso);
864 + return min_t(u32, tso_segs, sk->sk_gso_max_segs);
865 +@@ -2276,7 +2276,7 @@ static bool tcp_small_queue_check(struct sock *sk, const struct sk_buff *skb,
866 + sk->sk_pacing_rate >> READ_ONCE(sk->sk_pacing_shift));
867 + if (sk->sk_pacing_status == SK_PACING_NONE)
868 + limit = min_t(unsigned long, limit,
869 +- sock_net(sk)->ipv4.sysctl_tcp_limit_output_bytes);
870 ++ READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_limit_output_bytes));
871 + limit <<= factor;
872 +
873 + if (static_branch_unlikely(&tcp_tx_delay_enabled) &&
874 +diff --git a/net/ipv6/ping.c b/net/ipv6/ping.c
875 +index 98ac32b49d8c9..051bbd0726dff 100644
876 +--- a/net/ipv6/ping.c
877 ++++ b/net/ipv6/ping.c
878 +@@ -22,6 +22,11 @@
879 + #include <linux/proc_fs.h>
880 + #include <net/ping.h>
881 +
882 ++static void ping_v6_destroy(struct sock *sk)
883 ++{
884 ++ inet6_destroy_sock(sk);
885 ++}
886 ++
887 + /* Compatibility glue so we can support IPv6 when it's compiled as a module */
888 + static int dummy_ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len,
889 + int *addr_len)
890 +@@ -165,6 +170,7 @@ struct proto pingv6_prot = {
891 + .owner = THIS_MODULE,
892 + .init = ping_init_sock,
893 + .close = ping_close,
894 ++ .destroy = ping_v6_destroy,
895 + .connect = ip6_datagram_connect_v6_only,
896 + .disconnect = __udp_disconnect,
897 + .setsockopt = ipv6_setsockopt,
898 +diff --git a/net/netfilter/nfnetlink_queue.c b/net/netfilter/nfnetlink_queue.c
899 +index 7d3ab08a5a2d0..581bd1353a447 100644
900 +--- a/net/netfilter/nfnetlink_queue.c
901 ++++ b/net/netfilter/nfnetlink_queue.c
902 +@@ -846,11 +846,16 @@ nfqnl_enqueue_packet(struct nf_queue_entry *entry, unsigned int queuenum)
903 + }
904 +
905 + static int
906 +-nfqnl_mangle(void *data, int data_len, struct nf_queue_entry *e, int diff)
907 ++nfqnl_mangle(void *data, unsigned int data_len, struct nf_queue_entry *e, int diff)
908 + {
909 + struct sk_buff *nskb;
910 +
911 + if (diff < 0) {
912 ++ unsigned int min_len = skb_transport_offset(e->skb);
913 ++
914 ++ if (data_len < min_len)
915 ++ return -EINVAL;
916 ++
917 + if (pskb_trim(e->skb, data_len))
918 + return -ENOMEM;
919 + } else if (diff > 0) {
920 +diff --git a/net/sctp/associola.c b/net/sctp/associola.c
921 +index fb6f62264e874..f960b0e1e552c 100644
922 +--- a/net/sctp/associola.c
923 ++++ b/net/sctp/associola.c
924 +@@ -224,9 +224,8 @@ static struct sctp_association *sctp_association_init(
925 + if (!sctp_ulpq_init(&asoc->ulpq, asoc))
926 + goto fail_init;
927 +
928 +- if (sctp_stream_init(&asoc->stream, asoc->c.sinit_num_ostreams,
929 +- 0, gfp))
930 +- goto fail_init;
931 ++ if (sctp_stream_init(&asoc->stream, asoc->c.sinit_num_ostreams, 0, gfp))
932 ++ goto stream_free;
933 +
934 + /* Initialize default path MTU. */
935 + asoc->pathmtu = sp->pathmtu;
936 +diff --git a/net/sctp/stream.c b/net/sctp/stream.c
937 +index cd20638b61514..56762745d6e4e 100644
938 +--- a/net/sctp/stream.c
939 ++++ b/net/sctp/stream.c
940 +@@ -137,7 +137,7 @@ int sctp_stream_init(struct sctp_stream *stream, __u16 outcnt, __u16 incnt,
941 +
942 + ret = sctp_stream_alloc_out(stream, outcnt, gfp);
943 + if (ret)
944 +- goto out_err;
945 ++ return ret;
946 +
947 + for (i = 0; i < stream->outcnt; i++)
948 + SCTP_SO(stream, i)->state = SCTP_STREAM_OPEN;
949 +@@ -145,22 +145,9 @@ int sctp_stream_init(struct sctp_stream *stream, __u16 outcnt, __u16 incnt,
950 + handle_in:
951 + sctp_stream_interleave_init(stream);
952 + if (!incnt)
953 +- goto out;
954 +-
955 +- ret = sctp_stream_alloc_in(stream, incnt, gfp);
956 +- if (ret)
957 +- goto in_err;
958 +-
959 +- goto out;
960 ++ return 0;
961 +
962 +-in_err:
963 +- sched->free(stream);
964 +- genradix_free(&stream->in);
965 +-out_err:
966 +- genradix_free(&stream->out);
967 +- stream->outcnt = 0;
968 +-out:
969 +- return ret;
970 ++ return sctp_stream_alloc_in(stream, incnt, gfp);
971 + }
972 +
973 + int sctp_stream_init_ext(struct sctp_stream *stream, __u16 sid)
974 +diff --git a/net/sctp/stream_sched.c b/net/sctp/stream_sched.c
975 +index 99e5f69fbb742..a2e1d34f52c5b 100644
976 +--- a/net/sctp/stream_sched.c
977 ++++ b/net/sctp/stream_sched.c
978 +@@ -163,7 +163,7 @@ int sctp_sched_set_sched(struct sctp_association *asoc,
979 + if (!SCTP_SO(&asoc->stream, i)->ext)
980 + continue;
981 +
982 +- ret = n->init_sid(&asoc->stream, i, GFP_KERNEL);
983 ++ ret = n->init_sid(&asoc->stream, i, GFP_ATOMIC);
984 + if (ret)
985 + goto err;
986 + }
987 +diff --git a/tools/perf/util/symbol-elf.c b/tools/perf/util/symbol-elf.c
988 +index 2ec0a32da5793..0b185b1090ff3 100644
989 +--- a/tools/perf/util/symbol-elf.c
990 ++++ b/tools/perf/util/symbol-elf.c
991 +@@ -230,6 +230,33 @@ Elf_Scn *elf_section_by_name(Elf *elf, GElf_Ehdr *ep,
992 + return NULL;
993 + }
994 +
995 ++static int elf_read_program_header(Elf *elf, u64 vaddr, GElf_Phdr *phdr)
996 ++{
997 ++ size_t i, phdrnum;
998 ++ u64 sz;
999 ++
1000 ++ if (elf_getphdrnum(elf, &phdrnum))
1001 ++ return -1;
1002 ++
1003 ++ for (i = 0; i < phdrnum; i++) {
1004 ++ if (gelf_getphdr(elf, i, phdr) == NULL)
1005 ++ return -1;
1006 ++
1007 ++ if (phdr->p_type != PT_LOAD)
1008 ++ continue;
1009 ++
1010 ++ sz = max(phdr->p_memsz, phdr->p_filesz);
1011 ++ if (!sz)
1012 ++ continue;
1013 ++
1014 ++ if (vaddr >= phdr->p_vaddr && (vaddr < phdr->p_vaddr + sz))
1015 ++ return 0;
1016 ++ }
1017 ++
1018 ++ /* Not found any valid program header */
1019 ++ return -1;
1020 ++}
1021 ++
1022 + static bool want_demangle(bool is_kernel_sym)
1023 + {
1024 + return is_kernel_sym ? symbol_conf.demangle_kernel : symbol_conf.demangle;
1025 +@@ -1091,6 +1118,7 @@ int dso__load_sym(struct dso *dso, struct map *map, struct symsrc *syms_ss,
1026 + sym.st_value);
1027 + used_opd = true;
1028 + }
1029 ++
1030 + /*
1031 + * When loading symbols in a data mapping, ABS symbols (which
1032 + * has a value of SHN_ABS in its st_shndx) failed at
1033 +@@ -1127,11 +1155,20 @@ int dso__load_sym(struct dso *dso, struct map *map, struct symsrc *syms_ss,
1034 + goto out_elf_end;
1035 + } else if ((used_opd && runtime_ss->adjust_symbols) ||
1036 + (!used_opd && syms_ss->adjust_symbols)) {
1037 ++ GElf_Phdr phdr;
1038 ++
1039 ++ if (elf_read_program_header(syms_ss->elf,
1040 ++ (u64)sym.st_value, &phdr)) {
1041 ++ pr_warning("%s: failed to find program header for "
1042 ++ "symbol: %s st_value: %#" PRIx64 "\n",
1043 ++ __func__, elf_name, (u64)sym.st_value);
1044 ++ continue;
1045 ++ }
1046 + pr_debug4("%s: adjusting symbol: st_value: %#" PRIx64 " "
1047 +- "sh_addr: %#" PRIx64 " sh_offset: %#" PRIx64 "\n", __func__,
1048 +- (u64)sym.st_value, (u64)shdr.sh_addr,
1049 +- (u64)shdr.sh_offset);
1050 +- sym.st_value -= shdr.sh_addr - shdr.sh_offset;
1051 ++ "p_vaddr: %#" PRIx64 " p_offset: %#" PRIx64 "\n",
1052 ++ __func__, (u64)sym.st_value, (u64)phdr.p_vaddr,
1053 ++ (u64)phdr.p_offset);
1054 ++ sym.st_value -= phdr.p_vaddr - phdr.p_offset;
1055 + }
1056 +
1057 + demangled = demangle_sym(dso, kmodule, elf_name);