Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:4.4 commit in: /
Date: Sun, 17 Oct 2021 13:15:07
Message-Id: 1634476491.b231c41446a67f5ff5ecec615eccf7cfb454f403.mpagano@gentoo
1 commit: b231c41446a67f5ff5ecec615eccf7cfb454f403
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Sun Oct 17 13:14:51 2021 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Sun Oct 17 13:14:51 2021 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=b231c414
7
8 Linux patch 4.4.289
9
10 Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>
11
12 0000_README | 4 +
13 1288_linux-4.4.289.patch | 484 +++++++++++++++++++++++++++++++++++++++++++++++
14 2 files changed, 488 insertions(+)
15
16 diff --git a/0000_README b/0000_README
17 index 946f1ec..a63694d 100644
18 --- a/0000_README
19 +++ b/0000_README
20 @@ -1195,6 +1195,10 @@ Patch: 1287_linux-4.4.288.patch
21 From: http://www.kernel.org
22 Desc: Linux 4.4.288
23
24 +Patch: 1288_linux-4.4.289.patch
25 +From: http://www.kernel.org
26 +Desc: Linux 4.4.289
27 +
28 Patch: 1500_XATTR_USER_PREFIX.patch
29 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
30 Desc: Support for namespace user.pax.* on tmpfs.
31
32 diff --git a/1288_linux-4.4.289.patch b/1288_linux-4.4.289.patch
33 new file mode 100644
34 index 0000000..7f84e30
35 --- /dev/null
36 +++ b/1288_linux-4.4.289.patch
37 @@ -0,0 +1,484 @@
38 +diff --git a/Makefile b/Makefile
39 +index 823d7d08088c5..84e759c8461ce 100644
40 +--- a/Makefile
41 ++++ b/Makefile
42 +@@ -1,6 +1,6 @@
43 + VERSION = 4
44 + PATCHLEVEL = 4
45 +-SUBLEVEL = 288
46 ++SUBLEVEL = 289
47 + EXTRAVERSION =
48 + NAME = Blurry Fish Butt
49 +
50 +diff --git a/arch/arm/mach-imx/pm-imx6.c b/arch/arm/mach-imx/pm-imx6.c
51 +index fff529c5f9b36..f2dcbe14cb678 100644
52 +--- a/arch/arm/mach-imx/pm-imx6.c
53 ++++ b/arch/arm/mach-imx/pm-imx6.c
54 +@@ -15,6 +15,7 @@
55 + #include <linux/io.h>
56 + #include <linux/irq.h>
57 + #include <linux/genalloc.h>
58 ++#include <linux/irqchip/arm-gic.h>
59 + #include <linux/mfd/syscon.h>
60 + #include <linux/mfd/syscon/imx6q-iomuxc-gpr.h>
61 + #include <linux/of.h>
62 +@@ -604,6 +605,7 @@ static void __init imx6_pm_common_init(const struct imx6_pm_socdata
63 +
64 + static void imx6_pm_stby_poweroff(void)
65 + {
66 ++ gic_cpu_if_down(0);
67 + imx6_set_lpm(STOP_POWER_OFF);
68 + imx6q_suspend_finish(0);
69 +
70 +diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
71 +index b983d3dc4e6c6..851fbdb99767a 100644
72 +--- a/arch/x86/kernel/cpu/perf_event.c
73 ++++ b/arch/x86/kernel/cpu/perf_event.c
74 +@@ -2001,6 +2001,7 @@ static int x86_pmu_event_init(struct perf_event *event)
75 + if (err) {
76 + if (event->destroy)
77 + event->destroy(event);
78 ++ event->destroy = NULL;
79 + }
80 +
81 + if (ACCESS_ONCE(x86_pmu.attr_rdpmc))
82 +diff --git a/arch/xtensa/kernel/irq.c b/arch/xtensa/kernel/irq.c
83 +index 441694464b1e4..fbbc24b914e30 100644
84 +--- a/arch/xtensa/kernel/irq.c
85 ++++ b/arch/xtensa/kernel/irq.c
86 +@@ -144,7 +144,7 @@ unsigned xtensa_get_ext_irq_no(unsigned irq)
87 +
88 + void __init init_IRQ(void)
89 + {
90 +-#ifdef CONFIG_OF
91 ++#ifdef CONFIG_USE_OF
92 + irqchip_init();
93 + #else
94 + #ifdef CONFIG_HAVE_SMP
95 +diff --git a/drivers/hid/hid-apple.c b/drivers/hid/hid-apple.c
96 +index 8af87dc05f2a5..73289b013dee0 100644
97 +--- a/drivers/hid/hid-apple.c
98 ++++ b/drivers/hid/hid-apple.c
99 +@@ -301,12 +301,19 @@ static int apple_event(struct hid_device *hdev, struct hid_field *field,
100 +
101 + /*
102 + * MacBook JIS keyboard has wrong logical maximum
103 ++ * Magic Keyboard JIS has wrong logical maximum
104 + */
105 + static __u8 *apple_report_fixup(struct hid_device *hdev, __u8 *rdesc,
106 + unsigned int *rsize)
107 + {
108 + struct apple_sc *asc = hid_get_drvdata(hdev);
109 +
110 ++ if(*rsize >=71 && rdesc[70] == 0x65 && rdesc[64] == 0x65) {
111 ++ hid_info(hdev,
112 ++ "fixing up Magic Keyboard JIS report descriptor\n");
113 ++ rdesc[64] = rdesc[70] = 0xe7;
114 ++ }
115 ++
116 + if ((asc->quirks & APPLE_RDESC_JIS) && *rsize >= 60 &&
117 + rdesc[53] == 0x65 && rdesc[59] == 0x65) {
118 + hid_info(hdev,
119 +diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
120 +index d6d4faa5c5424..2137c4e7289e4 100644
121 +--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
122 ++++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
123 +@@ -6574,7 +6574,7 @@ static int i40e_get_capabilities(struct i40e_pf *pf)
124 + if (pf->hw.aq.asq_last_status == I40E_AQ_RC_ENOMEM) {
125 + /* retry with a larger buffer */
126 + buf_len = data_size;
127 +- } else if (pf->hw.aq.asq_last_status != I40E_AQ_RC_OK) {
128 ++ } else if (pf->hw.aq.asq_last_status != I40E_AQ_RC_OK || err) {
129 + dev_info(&pf->pdev->dev,
130 + "capability discovery failed, err %s aq_err %s\n",
131 + i40e_stat_str(&pf->hw, err),
132 +diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c
133 +index 5ea86fd57ae6c..4066fb5a935a7 100644
134 +--- a/drivers/net/phy/mdio_bus.c
135 ++++ b/drivers/net/phy/mdio_bus.c
136 +@@ -264,6 +264,13 @@ int __mdiobus_register(struct mii_bus *bus, struct module *owner)
137 + bus->dev.groups = NULL;
138 + dev_set_name(&bus->dev, "%s", bus->id);
139 +
140 ++ /* We need to set state to MDIOBUS_UNREGISTERED to correctly release
141 ++ * the device in mdiobus_free()
142 ++ *
143 ++ * State will be updated later in this function in case of success
144 ++ */
145 ++ bus->state = MDIOBUS_UNREGISTERED;
146 ++
147 + err = device_register(&bus->dev);
148 + if (err) {
149 + pr_err("mii_bus %s failed to register\n", bus->id);
150 +diff --git a/drivers/ptp/ptp_pch.c b/drivers/ptp/ptp_pch.c
151 +index 3aa22ae4d94c0..a911325fc0b4f 100644
152 +--- a/drivers/ptp/ptp_pch.c
153 ++++ b/drivers/ptp/ptp_pch.c
154 +@@ -698,6 +698,7 @@ static const struct pci_device_id pch_ieee1588_pcidev_id[] = {
155 + },
156 + {0}
157 + };
158 ++MODULE_DEVICE_TABLE(pci, pch_ieee1588_pcidev_id);
159 +
160 + static struct pci_driver pch_driver = {
161 + .name = KBUILD_MODNAME,
162 +diff --git a/drivers/scsi/ses.c b/drivers/scsi/ses.c
163 +index 01168acc864de..1aed965c33a3f 100644
164 +--- a/drivers/scsi/ses.c
165 ++++ b/drivers/scsi/ses.c
166 +@@ -118,7 +118,7 @@ static int ses_recv_diag(struct scsi_device *sdev, int page_code,
167 + static int ses_send_diag(struct scsi_device *sdev, int page_code,
168 + void *buf, int bufflen)
169 + {
170 +- u32 result;
171 ++ int result;
172 +
173 + unsigned char cmd[] = {
174 + SEND_DIAGNOSTIC,
175 +diff --git a/drivers/scsi/virtio_scsi.c b/drivers/scsi/virtio_scsi.c
176 +index 9237427728ced..58e3f6db9928e 100644
177 +--- a/drivers/scsi/virtio_scsi.c
178 ++++ b/drivers/scsi/virtio_scsi.c
179 +@@ -342,7 +342,7 @@ static void virtscsi_handle_transport_reset(struct virtio_scsi *vscsi,
180 + }
181 + break;
182 + default:
183 +- pr_info("Unsupport virtio scsi event reason %x\n", event->reason);
184 ++ pr_info("Unsupported virtio scsi event reason %x\n", event->reason);
185 + }
186 + }
187 +
188 +@@ -395,7 +395,7 @@ static void virtscsi_handle_event(struct work_struct *work)
189 + virtscsi_handle_param_change(vscsi, event);
190 + break;
191 + default:
192 +- pr_err("Unsupport virtio scsi event %x\n", event->event);
193 ++ pr_err("Unsupported virtio scsi event %x\n", event->event);
194 + }
195 + virtscsi_kick_event(vscsi, event_node);
196 + }
197 +diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
198 +index 44184cc6585e6..d869f37b1d23e 100644
199 +--- a/drivers/usb/class/cdc-acm.c
200 ++++ b/drivers/usb/class/cdc-acm.c
201 +@@ -348,6 +348,9 @@ static void acm_ctrl_irq(struct urb *urb)
202 + acm->iocount.overrun++;
203 + spin_unlock(&acm->read_lock);
204 +
205 ++ if (newctrl & ACM_CTRL_BRK)
206 ++ tty_flip_buffer_push(&acm->port);
207 ++
208 + if (difference)
209 + wake_up_all(&acm->wioctl);
210 +
211 +@@ -407,11 +410,16 @@ static int acm_submit_read_urbs(struct acm *acm, gfp_t mem_flags)
212 +
213 + static void acm_process_read_urb(struct acm *acm, struct urb *urb)
214 + {
215 ++ unsigned long flags;
216 ++
217 + if (!urb->actual_length)
218 + return;
219 +
220 ++ spin_lock_irqsave(&acm->read_lock, flags);
221 + tty_insert_flip_string(&acm->port, urb->transfer_buffer,
222 + urb->actual_length);
223 ++ spin_unlock_irqrestore(&acm->read_lock, flags);
224 ++
225 + tty_flip_buffer_push(&acm->port);
226 + }
227 +
228 +diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
229 +index ee0da259a3d3b..87708608c0ffd 100644
230 +--- a/fs/nfsd/nfs4xdr.c
231 ++++ b/fs/nfsd/nfs4xdr.c
232 +@@ -2988,15 +2988,18 @@ nfsd4_encode_dirent(void *ccdv, const char *name, int namlen,
233 + goto fail;
234 + cd->rd_maxcount -= entry_bytes;
235 + /*
236 +- * RFC 3530 14.2.24 describes rd_dircount as only a "hint", so
237 +- * let's always let through the first entry, at least:
238 ++ * RFC 3530 14.2.24 describes rd_dircount as only a "hint", and
239 ++ * notes that it could be zero. If it is zero, then the server
240 ++ * should enforce only the rd_maxcount value.
241 + */
242 +- if (!cd->rd_dircount)
243 +- goto fail;
244 +- name_and_cookie = 4 + 4 * XDR_QUADLEN(namlen) + 8;
245 +- if (name_and_cookie > cd->rd_dircount && cd->cookie_offset)
246 +- goto fail;
247 +- cd->rd_dircount -= min(cd->rd_dircount, name_and_cookie);
248 ++ if (cd->rd_dircount) {
249 ++ name_and_cookie = 4 + 4 * XDR_QUADLEN(namlen) + 8;
250 ++ if (name_and_cookie > cd->rd_dircount && cd->cookie_offset)
251 ++ goto fail;
252 ++ cd->rd_dircount -= min(cd->rd_dircount, name_and_cookie);
253 ++ if (!cd->rd_dircount)
254 ++ cd->rd_maxcount = 0;
255 ++ }
256 +
257 + cd->cookie_offset = cookie_offset;
258 + skip_entry:
259 +diff --git a/mm/gup.c b/mm/gup.c
260 +index 4c5857889e9d0..c80cdc4082280 100644
261 +--- a/mm/gup.c
262 ++++ b/mm/gup.c
263 +@@ -59,13 +59,22 @@ static int follow_pfn_pte(struct vm_area_struct *vma, unsigned long address,
264 + }
265 +
266 + /*
267 +- * FOLL_FORCE can write to even unwritable pte's, but only
268 +- * after we've gone through a COW cycle and they are dirty.
269 ++ * FOLL_FORCE or a forced COW break can write even to unwritable pte's,
270 ++ * but only after we've gone through a COW cycle and they are dirty.
271 + */
272 + static inline bool can_follow_write_pte(pte_t pte, unsigned int flags)
273 + {
274 +- return pte_write(pte) ||
275 +- ((flags & FOLL_FORCE) && (flags & FOLL_COW) && pte_dirty(pte));
276 ++ return pte_write(pte) || ((flags & FOLL_COW) && pte_dirty(pte));
277 ++}
278 ++
279 ++/*
280 ++ * A (separate) COW fault might break the page the other way and
281 ++ * get_user_pages() would return the page from what is now the wrong
282 ++ * VM. So we need to force a COW break at GUP time even for reads.
283 ++ */
284 ++static inline bool should_force_cow_break(struct vm_area_struct *vma, unsigned int flags)
285 ++{
286 ++ return is_cow_mapping(vma->vm_flags) && (flags & FOLL_GET);
287 + }
288 +
289 + static struct page *follow_page_pte(struct vm_area_struct *vma,
290 +@@ -509,12 +518,18 @@ long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
291 + if (!vma || check_vma_flags(vma, gup_flags))
292 + return i ? : -EFAULT;
293 + if (is_vm_hugetlb_page(vma)) {
294 ++ if (should_force_cow_break(vma, foll_flags))
295 ++ foll_flags |= FOLL_WRITE;
296 + i = follow_hugetlb_page(mm, vma, pages, vmas,
297 + &start, &nr_pages, i,
298 +- gup_flags);
299 ++ foll_flags);
300 + continue;
301 + }
302 + }
303 ++
304 ++ if (should_force_cow_break(vma, foll_flags))
305 ++ foll_flags |= FOLL_WRITE;
306 ++
307 + retry:
308 + /*
309 + * If we have a pending SIGKILL, don't keep faulting pages and
310 +@@ -1346,6 +1361,10 @@ static int gup_pud_range(pgd_t pgd, unsigned long addr, unsigned long end,
311 + /*
312 + * Like get_user_pages_fast() except it's IRQ-safe in that it won't fall back to
313 + * the regular GUP. It will only return non-negative values.
314 ++ *
315 ++ * Careful, careful! COW breaking can go either way, so a non-write
316 ++ * access can get ambiguous page results. If you call this function without
317 ++ * 'write' set, you'd better be sure that you're ok with that ambiguity.
318 + */
319 + int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
320 + struct page **pages)
321 +@@ -1375,6 +1394,12 @@ int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
322 + *
323 + * We do not adopt an rcu_read_lock(.) here as we also want to
324 + * block IPIs that come from THPs splitting.
325 ++ *
326 ++ * NOTE! We allow read-only gup_fast() here, but you'd better be
327 ++ * careful about possible COW pages. You'll get _a_ COW page, but
328 ++ * not necessarily the one you intended to get depending on what
329 ++ * COW event happens after this. COW may break the page copy in a
330 ++ * random direction.
331 + */
332 +
333 + local_irq_save(flags);
334 +@@ -1385,15 +1410,22 @@ int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
335 + next = pgd_addr_end(addr, end);
336 + if (pgd_none(pgd))
337 + break;
338 ++ /*
339 ++ * The FAST_GUP case requires FOLL_WRITE even for pure reads,
340 ++ * because get_user_pages() may need to cause an early COW in
341 ++ * order to avoid confusing the normal COW routines. So only
342 ++ * targets that are already writable are safe to do by just
343 ++ * looking at the page tables.
344 ++ */
345 + if (unlikely(pgd_huge(pgd))) {
346 +- if (!gup_huge_pgd(pgd, pgdp, addr, next, write,
347 ++ if (!gup_huge_pgd(pgd, pgdp, addr, next, 1,
348 + pages, &nr))
349 + break;
350 + } else if (unlikely(is_hugepd(__hugepd(pgd_val(pgd))))) {
351 + if (!gup_huge_pd(__hugepd(pgd_val(pgd)), addr,
352 +- PGDIR_SHIFT, next, write, pages, &nr))
353 ++ PGDIR_SHIFT, next, 1, pages, &nr))
354 + break;
355 +- } else if (!gup_pud_range(pgd, addr, next, write, pages, &nr))
356 ++ } else if (!gup_pud_range(pgd, addr, next, 1, pages, &nr))
357 + break;
358 + } while (pgdp++, addr = next, addr != end);
359 + local_irq_restore(flags);
360 +diff --git a/mm/huge_memory.c b/mm/huge_memory.c
361 +index 6404e4fcb4ed6..2f53786098c5f 100644
362 +--- a/mm/huge_memory.c
363 ++++ b/mm/huge_memory.c
364 +@@ -1268,13 +1268,12 @@ out_unlock:
365 + }
366 +
367 + /*
368 +- * FOLL_FORCE can write to even unwritable pmd's, but only
369 +- * after we've gone through a COW cycle and they are dirty.
370 ++ * FOLL_FORCE or a forced COW break can write even to unwritable pmd's,
371 ++ * but only after we've gone through a COW cycle and they are dirty.
372 + */
373 + static inline bool can_follow_write_pmd(pmd_t pmd, unsigned int flags)
374 + {
375 +- return pmd_write(pmd) ||
376 +- ((flags & FOLL_FORCE) && (flags & FOLL_COW) && pmd_dirty(pmd));
377 ++ return pmd_write(pmd) || ((flags & FOLL_COW) && pmd_dirty(pmd));
378 + }
379 +
380 + struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
381 +@@ -1341,9 +1340,6 @@ int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
382 + bool was_writable;
383 + int flags = 0;
384 +
385 +- /* A PROT_NONE fault should not end up here */
386 +- BUG_ON(!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE)));
387 +-
388 + ptl = pmd_lock(mm, pmdp);
389 + if (unlikely(!pmd_same(pmd, *pmdp)))
390 + goto out_unlock;
391 +diff --git a/mm/memory.c b/mm/memory.c
392 +index 360d28224a8e2..6bfc6a021c4f8 100644
393 +--- a/mm/memory.c
394 ++++ b/mm/memory.c
395 +@@ -3209,9 +3209,6 @@ static int do_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
396 + bool was_writable = pte_write(pte);
397 + int flags = 0;
398 +
399 +- /* A PROT_NONE fault should not end up here */
400 +- BUG_ON(!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE)));
401 +-
402 + /*
403 + * The "pte" at this point cannot be used safely without
404 + * validation through pte_unmap_same(). It's of NUMA type but
405 +@@ -3304,6 +3301,11 @@ static int wp_huge_pmd(struct mm_struct *mm, struct vm_area_struct *vma,
406 + return VM_FAULT_FALLBACK;
407 + }
408 +
409 ++static inline bool vma_is_accessible(struct vm_area_struct *vma)
410 ++{
411 ++ return vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE);
412 ++}
413 ++
414 + /*
415 + * These routines also need to handle stuff like marking pages dirty
416 + * and/or accessed for architectures that don't do it in hardware (most
417 +@@ -3350,7 +3352,7 @@ static int handle_pte_fault(struct mm_struct *mm,
418 + pte, pmd, flags, entry);
419 + }
420 +
421 +- if (pte_protnone(entry))
422 ++ if (pte_protnone(entry) && vma_is_accessible(vma))
423 + return do_numa_page(mm, vma, address, entry, pte, pmd);
424 +
425 + ptl = pte_lockptr(mm, pmd);
426 +@@ -3425,7 +3427,7 @@ static int __handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
427 + if (pmd_trans_splitting(orig_pmd))
428 + return 0;
429 +
430 +- if (pmd_protnone(orig_pmd))
431 ++ if (pmd_protnone(orig_pmd) && vma_is_accessible(vma))
432 + return do_huge_pmd_numa_page(mm, vma, address,
433 + orig_pmd, pmd);
434 +
435 +diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c
436 +index 3057356cfdff5..43d26625b80ff 100644
437 +--- a/net/ipv6/netfilter/ip6_tables.c
438 ++++ b/net/ipv6/netfilter/ip6_tables.c
439 +@@ -339,6 +339,7 @@ ip6t_do_table(struct sk_buff *skb,
440 + * things we don't know, ie. tcp syn flag or ports). If the
441 + * rule is also a fragment-specific rule, non-fragments won't
442 + * match it. */
443 ++ acpar.fragoff = 0;
444 + acpar.hotdrop = false;
445 + acpar.net = state->net;
446 + acpar.in = state->in;
447 +diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
448 +index b5848bcc09eb3..688d7b5b71395 100644
449 +--- a/net/mac80211/rx.c
450 ++++ b/net/mac80211/rx.c
451 +@@ -3447,7 +3447,8 @@ static bool ieee80211_accept_frame(struct ieee80211_rx_data *rx)
452 + if (!bssid)
453 + return false;
454 + if (ether_addr_equal(sdata->vif.addr, hdr->addr2) ||
455 +- ether_addr_equal(sdata->u.ibss.bssid, hdr->addr2))
456 ++ ether_addr_equal(sdata->u.ibss.bssid, hdr->addr2) ||
457 ++ !is_valid_ether_addr(hdr->addr2))
458 + return false;
459 + if (ieee80211_is_beacon(hdr->frame_control))
460 + return true;
461 +diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
462 +index 260cba93a2cfb..65cf129eaad33 100644
463 +--- a/net/netlink/af_netlink.c
464 ++++ b/net/netlink/af_netlink.c
465 +@@ -574,7 +574,10 @@ static int netlink_insert(struct sock *sk, u32 portid)
466 +
467 + /* We need to ensure that the socket is hashed and visible. */
468 + smp_wmb();
469 +- nlk_sk(sk)->bound = portid;
470 ++ /* Paired with lockless reads from netlink_bind(),
471 ++ * netlink_connect() and netlink_sendmsg().
472 ++ */
473 ++ WRITE_ONCE(nlk_sk(sk)->bound, portid);
474 +
475 + err:
476 + release_sock(sk);
477 +@@ -993,7 +996,8 @@ static int netlink_bind(struct socket *sock, struct sockaddr *addr,
478 + else if (nlk->ngroups < 8*sizeof(groups))
479 + groups &= (1UL << nlk->ngroups) - 1;
480 +
481 +- bound = nlk->bound;
482 ++ /* Paired with WRITE_ONCE() in netlink_insert() */
483 ++ bound = READ_ONCE(nlk->bound);
484 + if (bound) {
485 + /* Ensure nlk->portid is up-to-date. */
486 + smp_rmb();
487 +@@ -1073,8 +1077,9 @@ static int netlink_connect(struct socket *sock, struct sockaddr *addr,
488 +
489 + /* No need for barriers here as we return to user-space without
490 + * using any of the bound attributes.
491 ++ * Paired with WRITE_ONCE() in netlink_insert().
492 + */
493 +- if (!nlk->bound)
494 ++ if (!READ_ONCE(nlk->bound))
495 + err = netlink_autobind(sock);
496 +
497 + if (err == 0) {
498 +@@ -1821,7 +1826,8 @@ static int netlink_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
499 + dst_group = nlk->dst_group;
500 + }
501 +
502 +- if (!nlk->bound) {
503 ++ /* Paired with WRITE_ONCE() in netlink_insert() */
504 ++ if (!READ_ONCE(nlk->bound)) {
505 + err = netlink_autobind(sock);
506 + if (err)
507 + goto out;
508 +diff --git a/net/sched/sch_fifo.c b/net/sched/sch_fifo.c
509 +index 2e4bd2c0a50c4..6c99b833f665c 100644
510 +--- a/net/sched/sch_fifo.c
511 ++++ b/net/sched/sch_fifo.c
512 +@@ -151,6 +151,9 @@ int fifo_set_limit(struct Qdisc *q, unsigned int limit)
513 + if (strncmp(q->ops->id + 1, "fifo", 4) != 0)
514 + return 0;
515 +
516 ++ if (!q->ops->change)
517 ++ return 0;
518 ++
519 + nla = kmalloc(nla_attr_size(sizeof(struct tc_fifo_qopt)), GFP_KERNEL);
520 + if (nla) {
521 + nla->nla_type = RTM_NEWQDISC;