Gentoo Archives: gentoo-commits

From: Alice Ferrazzi <alicef@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:4.18 commit in: /
Date: Sun, 04 Nov 2018 17:33:22
Message-Id: 1541352780.0a2b0730ed2156923899b026bd016e89fca0ee5e.alicef@gentoo
1 commit: 0a2b0730ed2156923899b026bd016e89fca0ee5e
2 Author: Alice Ferrazzi <alicef <AT> gentoo <DOT> org>
3 AuthorDate: Sun Nov 4 17:33:00 2018 +0000
4 Commit: Alice Ferrazzi <alicef <AT> gentoo <DOT> org>
5 CommitDate: Sun Nov 4 17:33:00 2018 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=0a2b0730
7
8 linux kernel 4.18.17
9
10 0000_README | 4 +
11 1016_linux-4.18.17.patch | 4982 ++++++++++++++++++++++++++++++++++++++++++++++
12 2 files changed, 4986 insertions(+)
13
14 diff --git a/0000_README b/0000_README
15 index 52e9ca9..fcd301e 100644
16 --- a/0000_README
17 +++ b/0000_README
18 @@ -107,6 +107,10 @@ Patch: 1015_linux-4.18.16.patch
19 From: http://www.kernel.org
20 Desc: Linux 4.18.16
21
22 +Patch: 1016_linux-4.18.17.patch
23 +From: http://www.kernel.org
24 +Desc: Linux 4.18.17
25 +
26 Patch: 1500_XATTR_USER_PREFIX.patch
27 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
28 Desc: Support for namespace user.pax.* on tmpfs.
29
30 diff --git a/1016_linux-4.18.17.patch b/1016_linux-4.18.17.patch
31 new file mode 100644
32 index 0000000..1e385a1
33 --- /dev/null
34 +++ b/1016_linux-4.18.17.patch
35 @@ -0,0 +1,4982 @@
36 +diff --git a/Makefile b/Makefile
37 +index 034dd990b0ae..c051db0ca5a0 100644
38 +--- a/Makefile
39 ++++ b/Makefile
40 +@@ -1,7 +1,7 @@
41 + # SPDX-License-Identifier: GPL-2.0
42 + VERSION = 4
43 + PATCHLEVEL = 18
44 +-SUBLEVEL = 16
45 ++SUBLEVEL = 17
46 + EXTRAVERSION =
47 + NAME = Merciless Moray
48 +
49 +diff --git a/arch/Kconfig b/arch/Kconfig
50 +index f03b72644902..a18371a36e03 100644
51 +--- a/arch/Kconfig
52 ++++ b/arch/Kconfig
53 +@@ -977,4 +977,12 @@ config REFCOUNT_FULL
54 + against various use-after-free conditions that can be used in
55 + security flaw exploits.
56 +
57 ++config HAVE_ARCH_COMPILER_H
58 ++ bool
59 ++ help
60 ++ An architecture can select this if it provides an
61 ++ asm/compiler.h header that should be included after
62 ++ linux/compiler-*.h in order to override macro definitions that those
63 ++ headers generally provide.
64 ++
65 + source "kernel/gcov/Kconfig"
66 +diff --git a/arch/arm/boot/dts/bcm63138.dtsi b/arch/arm/boot/dts/bcm63138.dtsi
67 +index 43ee992ccdcf..6df61518776f 100644
68 +--- a/arch/arm/boot/dts/bcm63138.dtsi
69 ++++ b/arch/arm/boot/dts/bcm63138.dtsi
70 +@@ -106,21 +106,23 @@
71 + global_timer: timer@1e200 {
72 + compatible = "arm,cortex-a9-global-timer";
73 + reg = <0x1e200 0x20>;
74 +- interrupts = <GIC_PPI 11 IRQ_TYPE_LEVEL_HIGH>;
75 ++ interrupts = <GIC_PPI 11 IRQ_TYPE_EDGE_RISING>;
76 + clocks = <&axi_clk>;
77 + };
78 +
79 + local_timer: local-timer@1e600 {
80 + compatible = "arm,cortex-a9-twd-timer";
81 + reg = <0x1e600 0x20>;
82 +- interrupts = <GIC_PPI 13 IRQ_TYPE_LEVEL_HIGH>;
83 ++ interrupts = <GIC_PPI 13 (GIC_CPU_MASK_SIMPLE(2) |
84 ++ IRQ_TYPE_EDGE_RISING)>;
85 + clocks = <&axi_clk>;
86 + };
87 +
88 + twd_watchdog: watchdog@1e620 {
89 + compatible = "arm,cortex-a9-twd-wdt";
90 + reg = <0x1e620 0x20>;
91 +- interrupts = <GIC_PPI 14 IRQ_TYPE_LEVEL_HIGH>;
92 ++ interrupts = <GIC_PPI 14 (GIC_CPU_MASK_SIMPLE(2) |
93 ++ IRQ_TYPE_LEVEL_HIGH)>;
94 + };
95 +
96 + armpll: armpll {
97 +@@ -158,7 +160,7 @@
98 + serial0: serial@600 {
99 + compatible = "brcm,bcm6345-uart";
100 + reg = <0x600 0x1b>;
101 +- interrupts = <GIC_SPI 32 0>;
102 ++ interrupts = <GIC_SPI 32 IRQ_TYPE_LEVEL_HIGH>;
103 + clocks = <&periph_clk>;
104 + clock-names = "periph";
105 + status = "disabled";
106 +@@ -167,7 +169,7 @@
107 + serial1: serial@620 {
108 + compatible = "brcm,bcm6345-uart";
109 + reg = <0x620 0x1b>;
110 +- interrupts = <GIC_SPI 33 0>;
111 ++ interrupts = <GIC_SPI 33 IRQ_TYPE_LEVEL_HIGH>;
112 + clocks = <&periph_clk>;
113 + clock-names = "periph";
114 + status = "disabled";
115 +@@ -180,7 +182,7 @@
116 + reg = <0x2000 0x600>, <0xf0 0x10>;
117 + reg-names = "nand", "nand-int-base";
118 + status = "disabled";
119 +- interrupts = <GIC_SPI 38 0>;
120 ++ interrupts = <GIC_SPI 38 IRQ_TYPE_LEVEL_HIGH>;
121 + interrupt-names = "nand";
122 + };
123 +
124 +diff --git a/arch/arm/boot/dts/imx53-qsb-common.dtsi b/arch/arm/boot/dts/imx53-qsb-common.dtsi
125 +index ef7658a78836..c1548adee789 100644
126 +--- a/arch/arm/boot/dts/imx53-qsb-common.dtsi
127 ++++ b/arch/arm/boot/dts/imx53-qsb-common.dtsi
128 +@@ -123,6 +123,17 @@
129 + };
130 + };
131 +
132 ++&cpu0 {
133 ++ /* CPU rated to 1GHz, not 1.2GHz as per the default settings */
134 ++ operating-points = <
135 ++ /* kHz uV */
136 ++ 166666 850000
137 ++ 400000 900000
138 ++ 800000 1050000
139 ++ 1000000 1200000
140 ++ >;
141 ++};
142 ++
143 + &esdhc1 {
144 + pinctrl-names = "default";
145 + pinctrl-0 = <&pinctrl_esdhc1>;
146 +diff --git a/arch/arm/kernel/vmlinux.lds.h b/arch/arm/kernel/vmlinux.lds.h
147 +index ae5fdff18406..8247bc15addc 100644
148 +--- a/arch/arm/kernel/vmlinux.lds.h
149 ++++ b/arch/arm/kernel/vmlinux.lds.h
150 +@@ -49,6 +49,8 @@
151 + #define ARM_DISCARD \
152 + *(.ARM.exidx.exit.text) \
153 + *(.ARM.extab.exit.text) \
154 ++ *(.ARM.exidx.text.exit) \
155 ++ *(.ARM.extab.text.exit) \
156 + ARM_CPU_DISCARD(*(.ARM.exidx.cpuexit.text)) \
157 + ARM_CPU_DISCARD(*(.ARM.extab.cpuexit.text)) \
158 + ARM_EXIT_DISCARD(EXIT_TEXT) \
159 +diff --git a/arch/arm/mm/ioremap.c b/arch/arm/mm/ioremap.c
160 +index fc91205ff46c..5bf9443cfbaa 100644
161 +--- a/arch/arm/mm/ioremap.c
162 ++++ b/arch/arm/mm/ioremap.c
163 +@@ -473,7 +473,7 @@ void pci_ioremap_set_mem_type(int mem_type)
164 +
165 + int pci_ioremap_io(unsigned int offset, phys_addr_t phys_addr)
166 + {
167 +- BUG_ON(offset + SZ_64K > IO_SPACE_LIMIT);
168 ++ BUG_ON(offset + SZ_64K - 1 > IO_SPACE_LIMIT);
169 +
170 + return ioremap_page_range(PCI_IO_VIRT_BASE + offset,
171 + PCI_IO_VIRT_BASE + offset + SZ_64K,
172 +diff --git a/arch/arm64/mm/hugetlbpage.c b/arch/arm64/mm/hugetlbpage.c
173 +index 192b3ba07075..f85be2f8b140 100644
174 +--- a/arch/arm64/mm/hugetlbpage.c
175 ++++ b/arch/arm64/mm/hugetlbpage.c
176 +@@ -117,11 +117,14 @@ static pte_t get_clear_flush(struct mm_struct *mm,
177 +
178 + /*
179 + * If HW_AFDBM is enabled, then the HW could turn on
180 +- * the dirty bit for any page in the set, so check
181 +- * them all. All hugetlb entries are already young.
182 ++ * the dirty or accessed bit for any page in the set,
183 ++ * so check them all.
184 + */
185 + if (pte_dirty(pte))
186 + orig_pte = pte_mkdirty(orig_pte);
187 ++
188 ++ if (pte_young(pte))
189 ++ orig_pte = pte_mkyoung(orig_pte);
190 + }
191 +
192 + if (valid) {
193 +@@ -340,10 +343,13 @@ int huge_ptep_set_access_flags(struct vm_area_struct *vma,
194 + if (!pte_same(orig_pte, pte))
195 + changed = 1;
196 +
197 +- /* Make sure we don't lose the dirty state */
198 ++ /* Make sure we don't lose the dirty or young state */
199 + if (pte_dirty(orig_pte))
200 + pte = pte_mkdirty(pte);
201 +
202 ++ if (pte_young(orig_pte))
203 ++ pte = pte_mkyoung(pte);
204 ++
205 + hugeprot = pte_pgprot(pte);
206 + for (i = 0; i < ncontig; i++, ptep++, addr += pgsize, pfn += dpfn)
207 + set_pte_at(vma->vm_mm, addr, ptep, pfn_pte(pfn, hugeprot));
208 +diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c
209 +index 59d07bd5374a..055b211b7126 100644
210 +--- a/arch/powerpc/mm/numa.c
211 ++++ b/arch/powerpc/mm/numa.c
212 +@@ -1217,9 +1217,10 @@ int find_and_online_cpu_nid(int cpu)
213 + * Need to ensure that NODE_DATA is initialized for a node from
214 + * available memory (see memblock_alloc_try_nid). If unable to
215 + * init the node, then default to nearest node that has memory
216 +- * installed.
217 ++ * installed. Skip onlining a node if the subsystems are not
218 ++ * yet initialized.
219 + */
220 +- if (try_online_node(new_nid))
221 ++ if (!topology_inited || try_online_node(new_nid))
222 + new_nid = first_online_node;
223 + #else
224 + /*
225 +diff --git a/arch/riscv/kernel/setup.c b/arch/riscv/kernel/setup.c
226 +index 0efa5b29d0a3..dcff272aee06 100644
227 +--- a/arch/riscv/kernel/setup.c
228 ++++ b/arch/riscv/kernel/setup.c
229 +@@ -165,7 +165,7 @@ static void __init setup_bootmem(void)
230 + BUG_ON(mem_size == 0);
231 +
232 + set_max_mapnr(PFN_DOWN(mem_size));
233 +- max_low_pfn = pfn_base + PFN_DOWN(mem_size);
234 ++ max_low_pfn = memblock_end_of_DRAM();
235 +
236 + #ifdef CONFIG_BLK_DEV_INITRD
237 + setup_initrd();
238 +diff --git a/arch/sparc/include/asm/cpudata_64.h b/arch/sparc/include/asm/cpudata_64.h
239 +index 666d6b5c0440..9c3fc03abe9a 100644
240 +--- a/arch/sparc/include/asm/cpudata_64.h
241 ++++ b/arch/sparc/include/asm/cpudata_64.h
242 +@@ -28,7 +28,7 @@ typedef struct {
243 + unsigned short sock_id; /* physical package */
244 + unsigned short core_id;
245 + unsigned short max_cache_id; /* groupings of highest shared cache */
246 +- unsigned short proc_id; /* strand (aka HW thread) id */
247 ++ signed short proc_id; /* strand (aka HW thread) id */
248 + } cpuinfo_sparc;
249 +
250 + DECLARE_PER_CPU(cpuinfo_sparc, __cpu_data);
251 +diff --git a/arch/sparc/include/asm/switch_to_64.h b/arch/sparc/include/asm/switch_to_64.h
252 +index 4ff29b1406a9..b1d4e2e3210f 100644
253 +--- a/arch/sparc/include/asm/switch_to_64.h
254 ++++ b/arch/sparc/include/asm/switch_to_64.h
255 +@@ -67,6 +67,7 @@ do { save_and_clear_fpu(); \
256 + } while(0)
257 +
258 + void synchronize_user_stack(void);
259 +-void fault_in_user_windows(void);
260 ++struct pt_regs;
261 ++void fault_in_user_windows(struct pt_regs *);
262 +
263 + #endif /* __SPARC64_SWITCH_TO_64_H */
264 +diff --git a/arch/sparc/kernel/perf_event.c b/arch/sparc/kernel/perf_event.c
265 +index d3149baaa33c..67b3e6b3ce5d 100644
266 +--- a/arch/sparc/kernel/perf_event.c
267 ++++ b/arch/sparc/kernel/perf_event.c
268 +@@ -24,6 +24,7 @@
269 + #include <asm/cpudata.h>
270 + #include <linux/uaccess.h>
271 + #include <linux/atomic.h>
272 ++#include <linux/sched/clock.h>
273 + #include <asm/nmi.h>
274 + #include <asm/pcr.h>
275 + #include <asm/cacheflush.h>
276 +@@ -927,6 +928,8 @@ static void read_in_all_counters(struct cpu_hw_events *cpuc)
277 + sparc_perf_event_update(cp, &cp->hw,
278 + cpuc->current_idx[i]);
279 + cpuc->current_idx[i] = PIC_NO_INDEX;
280 ++ if (cp->hw.state & PERF_HES_STOPPED)
281 ++ cp->hw.state |= PERF_HES_ARCH;
282 + }
283 + }
284 + }
285 +@@ -959,10 +962,12 @@ static void calculate_single_pcr(struct cpu_hw_events *cpuc)
286 +
287 + enc = perf_event_get_enc(cpuc->events[i]);
288 + cpuc->pcr[0] &= ~mask_for_index(idx);
289 +- if (hwc->state & PERF_HES_STOPPED)
290 ++ if (hwc->state & PERF_HES_ARCH) {
291 + cpuc->pcr[0] |= nop_for_index(idx);
292 +- else
293 ++ } else {
294 + cpuc->pcr[0] |= event_encoding(enc, idx);
295 ++ hwc->state = 0;
296 ++ }
297 + }
298 + out:
299 + cpuc->pcr[0] |= cpuc->event[0]->hw.config_base;
300 +@@ -988,6 +993,9 @@ static void calculate_multiple_pcrs(struct cpu_hw_events *cpuc)
301 +
302 + cpuc->current_idx[i] = idx;
303 +
304 ++ if (cp->hw.state & PERF_HES_ARCH)
305 ++ continue;
306 ++
307 + sparc_pmu_start(cp, PERF_EF_RELOAD);
308 + }
309 + out:
310 +@@ -1079,6 +1087,8 @@ static void sparc_pmu_start(struct perf_event *event, int flags)
311 + event->hw.state = 0;
312 +
313 + sparc_pmu_enable_event(cpuc, &event->hw, idx);
314 ++
315 ++ perf_event_update_userpage(event);
316 + }
317 +
318 + static void sparc_pmu_stop(struct perf_event *event, int flags)
319 +@@ -1371,9 +1381,9 @@ static int sparc_pmu_add(struct perf_event *event, int ef_flags)
320 + cpuc->events[n0] = event->hw.event_base;
321 + cpuc->current_idx[n0] = PIC_NO_INDEX;
322 +
323 +- event->hw.state = PERF_HES_UPTODATE;
324 ++ event->hw.state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
325 + if (!(ef_flags & PERF_EF_START))
326 +- event->hw.state |= PERF_HES_STOPPED;
327 ++ event->hw.state |= PERF_HES_ARCH;
328 +
329 + /*
330 + * If group events scheduling transaction was started,
331 +@@ -1603,6 +1613,8 @@ static int __kprobes perf_event_nmi_handler(struct notifier_block *self,
332 + struct perf_sample_data data;
333 + struct cpu_hw_events *cpuc;
334 + struct pt_regs *regs;
335 ++ u64 finish_clock;
336 ++ u64 start_clock;
337 + int i;
338 +
339 + if (!atomic_read(&active_events))
340 +@@ -1616,6 +1628,8 @@ static int __kprobes perf_event_nmi_handler(struct notifier_block *self,
341 + return NOTIFY_DONE;
342 + }
343 +
344 ++ start_clock = sched_clock();
345 ++
346 + regs = args->regs;
347 +
348 + cpuc = this_cpu_ptr(&cpu_hw_events);
349 +@@ -1654,6 +1668,10 @@ static int __kprobes perf_event_nmi_handler(struct notifier_block *self,
350 + sparc_pmu_stop(event, 0);
351 + }
352 +
353 ++ finish_clock = sched_clock();
354 ++
355 ++ perf_sample_event_took(finish_clock - start_clock);
356 ++
357 + return NOTIFY_STOP;
358 + }
359 +
360 +diff --git a/arch/sparc/kernel/process_64.c b/arch/sparc/kernel/process_64.c
361 +index 6c086086ca8f..59eaf6227af1 100644
362 +--- a/arch/sparc/kernel/process_64.c
363 ++++ b/arch/sparc/kernel/process_64.c
364 +@@ -36,6 +36,7 @@
365 + #include <linux/sysrq.h>
366 + #include <linux/nmi.h>
367 + #include <linux/context_tracking.h>
368 ++#include <linux/signal.h>
369 +
370 + #include <linux/uaccess.h>
371 + #include <asm/page.h>
372 +@@ -521,7 +522,12 @@ static void stack_unaligned(unsigned long sp)
373 + force_sig_fault(SIGBUS, BUS_ADRALN, (void __user *) sp, 0, current);
374 + }
375 +
376 +-void fault_in_user_windows(void)
377 ++static const char uwfault32[] = KERN_INFO \
378 ++ "%s[%d]: bad register window fault: SP %08lx (orig_sp %08lx) TPC %08lx O7 %08lx\n";
379 ++static const char uwfault64[] = KERN_INFO \
380 ++ "%s[%d]: bad register window fault: SP %016lx (orig_sp %016lx) TPC %08lx O7 %016lx\n";
381 ++
382 ++void fault_in_user_windows(struct pt_regs *regs)
383 + {
384 + struct thread_info *t = current_thread_info();
385 + unsigned long window;
386 +@@ -534,9 +540,9 @@ void fault_in_user_windows(void)
387 + do {
388 + struct reg_window *rwin = &t->reg_window[window];
389 + int winsize = sizeof(struct reg_window);
390 +- unsigned long sp;
391 ++ unsigned long sp, orig_sp;
392 +
393 +- sp = t->rwbuf_stkptrs[window];
394 ++ orig_sp = sp = t->rwbuf_stkptrs[window];
395 +
396 + if (test_thread_64bit_stack(sp))
397 + sp += STACK_BIAS;
398 +@@ -547,8 +553,16 @@ void fault_in_user_windows(void)
399 + stack_unaligned(sp);
400 +
401 + if (unlikely(copy_to_user((char __user *)sp,
402 +- rwin, winsize)))
403 ++ rwin, winsize))) {
404 ++ if (show_unhandled_signals)
405 ++ printk_ratelimited(is_compat_task() ?
406 ++ uwfault32 : uwfault64,
407 ++ current->comm, current->pid,
408 ++ sp, orig_sp,
409 ++ regs->tpc,
410 ++ regs->u_regs[UREG_I7]);
411 + goto barf;
412 ++ }
413 + } while (window--);
414 + }
415 + set_thread_wsaved(0);
416 +@@ -556,8 +570,7 @@ void fault_in_user_windows(void)
417 +
418 + barf:
419 + set_thread_wsaved(window + 1);
420 +- user_exit();
421 +- do_exit(SIGILL);
422 ++ force_sig(SIGSEGV, current);
423 + }
424 +
425 + asmlinkage long sparc_do_fork(unsigned long clone_flags,
426 +diff --git a/arch/sparc/kernel/rtrap_64.S b/arch/sparc/kernel/rtrap_64.S
427 +index f6528884a2c8..29aa34f11720 100644
428 +--- a/arch/sparc/kernel/rtrap_64.S
429 ++++ b/arch/sparc/kernel/rtrap_64.S
430 +@@ -39,6 +39,7 @@ __handle_preemption:
431 + wrpr %g0, RTRAP_PSTATE_IRQOFF, %pstate
432 +
433 + __handle_user_windows:
434 ++ add %sp, PTREGS_OFF, %o0
435 + call fault_in_user_windows
436 + 661: wrpr %g0, RTRAP_PSTATE, %pstate
437 + /* If userspace is using ADI, it could potentially pass
438 +@@ -84,8 +85,9 @@ __handle_signal:
439 + ldx [%sp + PTREGS_OFF + PT_V9_TSTATE], %l1
440 + sethi %hi(0xf << 20), %l4
441 + and %l1, %l4, %l4
442 ++ andn %l1, %l4, %l1
443 + ba,pt %xcc, __handle_preemption_continue
444 +- andn %l1, %l4, %l1
445 ++ srl %l4, 20, %l4
446 +
447 + /* When returning from a NMI (%pil==15) interrupt we want to
448 + * avoid running softirqs, doing IRQ tracing, preempting, etc.
449 +diff --git a/arch/sparc/kernel/signal32.c b/arch/sparc/kernel/signal32.c
450 +index 44d379db3f64..4c5b3fcbed94 100644
451 +--- a/arch/sparc/kernel/signal32.c
452 ++++ b/arch/sparc/kernel/signal32.c
453 +@@ -371,7 +371,11 @@ static int setup_frame32(struct ksignal *ksig, struct pt_regs *regs,
454 + get_sigframe(ksig, regs, sigframe_size);
455 +
456 + if (invalid_frame_pointer(sf, sigframe_size)) {
457 +- do_exit(SIGILL);
458 ++ if (show_unhandled_signals)
459 ++ pr_info("%s[%d] bad frame in setup_frame32: %08lx TPC %08lx O7 %08lx\n",
460 ++ current->comm, current->pid, (unsigned long)sf,
461 ++ regs->tpc, regs->u_regs[UREG_I7]);
462 ++ force_sigsegv(ksig->sig, current);
463 + return -EINVAL;
464 + }
465 +
466 +@@ -501,7 +505,11 @@ static int setup_rt_frame32(struct ksignal *ksig, struct pt_regs *regs,
467 + get_sigframe(ksig, regs, sigframe_size);
468 +
469 + if (invalid_frame_pointer(sf, sigframe_size)) {
470 +- do_exit(SIGILL);
471 ++ if (show_unhandled_signals)
472 ++ pr_info("%s[%d] bad frame in setup_rt_frame32: %08lx TPC %08lx O7 %08lx\n",
473 ++ current->comm, current->pid, (unsigned long)sf,
474 ++ regs->tpc, regs->u_regs[UREG_I7]);
475 ++ force_sigsegv(ksig->sig, current);
476 + return -EINVAL;
477 + }
478 +
479 +diff --git a/arch/sparc/kernel/signal_64.c b/arch/sparc/kernel/signal_64.c
480 +index 48366e5eb5b2..e9de1803a22e 100644
481 +--- a/arch/sparc/kernel/signal_64.c
482 ++++ b/arch/sparc/kernel/signal_64.c
483 +@@ -370,7 +370,11 @@ setup_rt_frame(struct ksignal *ksig, struct pt_regs *regs)
484 + get_sigframe(ksig, regs, sf_size);
485 +
486 + if (invalid_frame_pointer (sf)) {
487 +- do_exit(SIGILL); /* won't return, actually */
488 ++ if (show_unhandled_signals)
489 ++ pr_info("%s[%d] bad frame in setup_rt_frame: %016lx TPC %016lx O7 %016lx\n",
490 ++ current->comm, current->pid, (unsigned long)sf,
491 ++ regs->tpc, regs->u_regs[UREG_I7]);
492 ++ force_sigsegv(ksig->sig, current);
493 + return -EINVAL;
494 + }
495 +
496 +diff --git a/arch/sparc/kernel/systbls_64.S b/arch/sparc/kernel/systbls_64.S
497 +index 387ef993880a..25699462ad5b 100644
498 +--- a/arch/sparc/kernel/systbls_64.S
499 ++++ b/arch/sparc/kernel/systbls_64.S
500 +@@ -47,9 +47,9 @@ sys_call_table32:
501 + .word sys_recvfrom, sys_setreuid16, sys_setregid16, sys_rename, compat_sys_truncate
502 + /*130*/ .word compat_sys_ftruncate, sys_flock, compat_sys_lstat64, sys_sendto, sys_shutdown
503 + .word sys_socketpair, sys_mkdir, sys_rmdir, compat_sys_utimes, compat_sys_stat64
504 +-/*140*/ .word sys_sendfile64, sys_nis_syscall, compat_sys_futex, sys_gettid, compat_sys_getrlimit
505 ++/*140*/ .word sys_sendfile64, sys_getpeername, compat_sys_futex, sys_gettid, compat_sys_getrlimit
506 + .word compat_sys_setrlimit, sys_pivot_root, sys_prctl, sys_pciconfig_read, sys_pciconfig_write
507 +-/*150*/ .word sys_nis_syscall, sys_inotify_init, sys_inotify_add_watch, sys_poll, sys_getdents64
508 ++/*150*/ .word sys_getsockname, sys_inotify_init, sys_inotify_add_watch, sys_poll, sys_getdents64
509 + .word compat_sys_fcntl64, sys_inotify_rm_watch, compat_sys_statfs, compat_sys_fstatfs, sys_oldumount
510 + /*160*/ .word compat_sys_sched_setaffinity, compat_sys_sched_getaffinity, sys_getdomainname, sys_setdomainname, sys_nis_syscall
511 + .word sys_quotactl, sys_set_tid_address, compat_sys_mount, compat_sys_ustat, sys_setxattr
512 +diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c
513 +index f396048a0d68..39822f611c01 100644
514 +--- a/arch/sparc/mm/init_64.c
515 ++++ b/arch/sparc/mm/init_64.c
516 +@@ -1383,6 +1383,7 @@ int __node_distance(int from, int to)
517 + }
518 + return numa_latency[from][to];
519 + }
520 ++EXPORT_SYMBOL(__node_distance);
521 +
522 + static int __init find_best_numa_node_for_mlgroup(struct mdesc_mlgroup *grp)
523 + {
524 +diff --git a/arch/sparc/vdso/vclock_gettime.c b/arch/sparc/vdso/vclock_gettime.c
525 +index 3feb3d960ca5..75dca9aab737 100644
526 +--- a/arch/sparc/vdso/vclock_gettime.c
527 ++++ b/arch/sparc/vdso/vclock_gettime.c
528 +@@ -33,9 +33,19 @@
529 + #define TICK_PRIV_BIT (1ULL << 63)
530 + #endif
531 +
532 ++#ifdef CONFIG_SPARC64
533 + #define SYSCALL_STRING \
534 + "ta 0x6d;" \
535 +- "sub %%g0, %%o0, %%o0;" \
536 ++ "bcs,a 1f;" \
537 ++ " sub %%g0, %%o0, %%o0;" \
538 ++ "1:"
539 ++#else
540 ++#define SYSCALL_STRING \
541 ++ "ta 0x10;" \
542 ++ "bcs,a 1f;" \
543 ++ " sub %%g0, %%o0, %%o0;" \
544 ++ "1:"
545 ++#endif
546 +
547 + #define SYSCALL_CLOBBERS \
548 + "f0", "f1", "f2", "f3", "f4", "f5", "f6", "f7", \
549 +diff --git a/arch/x86/events/amd/uncore.c b/arch/x86/events/amd/uncore.c
550 +index 981ba5e8241b..8671de126eac 100644
551 +--- a/arch/x86/events/amd/uncore.c
552 ++++ b/arch/x86/events/amd/uncore.c
553 +@@ -36,6 +36,7 @@
554 +
555 + static int num_counters_llc;
556 + static int num_counters_nb;
557 ++static bool l3_mask;
558 +
559 + static HLIST_HEAD(uncore_unused_list);
560 +
561 +@@ -209,6 +210,13 @@ static int amd_uncore_event_init(struct perf_event *event)
562 + hwc->config = event->attr.config & AMD64_RAW_EVENT_MASK_NB;
563 + hwc->idx = -1;
564 +
565 ++ /*
566 ++ * SliceMask and ThreadMask need to be set for certain L3 events in
567 ++ * Family 17h. For other events, the two fields do not affect the count.
568 ++ */
569 ++ if (l3_mask)
570 ++ hwc->config |= (AMD64_L3_SLICE_MASK | AMD64_L3_THREAD_MASK);
571 ++
572 + if (event->cpu < 0)
573 + return -EINVAL;
574 +
575 +@@ -525,6 +533,7 @@ static int __init amd_uncore_init(void)
576 + amd_llc_pmu.name = "amd_l3";
577 + format_attr_event_df.show = &event_show_df;
578 + format_attr_event_l3.show = &event_show_l3;
579 ++ l3_mask = true;
580 + } else {
581 + num_counters_nb = NUM_COUNTERS_NB;
582 + num_counters_llc = NUM_COUNTERS_L2;
583 +@@ -532,6 +541,7 @@ static int __init amd_uncore_init(void)
584 + amd_llc_pmu.name = "amd_l2";
585 + format_attr_event_df = format_attr_event;
586 + format_attr_event_l3 = format_attr_event;
587 ++ l3_mask = false;
588 + }
589 +
590 + amd_nb_pmu.attr_groups = amd_uncore_attr_groups_df;
591 +diff --git a/arch/x86/events/intel/uncore_snbep.c b/arch/x86/events/intel/uncore_snbep.c
592 +index 51d7c117e3c7..c07bee31abe8 100644
593 +--- a/arch/x86/events/intel/uncore_snbep.c
594 ++++ b/arch/x86/events/intel/uncore_snbep.c
595 +@@ -3061,7 +3061,7 @@ static struct event_constraint bdx_uncore_pcu_constraints[] = {
596 +
597 + void bdx_uncore_cpu_init(void)
598 + {
599 +- int pkg = topology_phys_to_logical_pkg(0);
600 ++ int pkg = topology_phys_to_logical_pkg(boot_cpu_data.phys_proc_id);
601 +
602 + if (bdx_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores)
603 + bdx_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores;
604 +@@ -3931,16 +3931,16 @@ static const struct pci_device_id skx_uncore_pci_ids[] = {
605 + .driver_data = UNCORE_PCI_DEV_FULL_DATA(21, 5, SKX_PCI_UNCORE_M2PCIE, 3),
606 + },
607 + { /* M3UPI0 Link 0 */
608 +- PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x204C),
609 +- .driver_data = UNCORE_PCI_DEV_FULL_DATA(18, 0, SKX_PCI_UNCORE_M3UPI, 0),
610 ++ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x204D),
611 ++ .driver_data = UNCORE_PCI_DEV_FULL_DATA(18, 1, SKX_PCI_UNCORE_M3UPI, 0),
612 + },
613 + { /* M3UPI0 Link 1 */
614 +- PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x204D),
615 +- .driver_data = UNCORE_PCI_DEV_FULL_DATA(18, 1, SKX_PCI_UNCORE_M3UPI, 1),
616 ++ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x204E),
617 ++ .driver_data = UNCORE_PCI_DEV_FULL_DATA(18, 2, SKX_PCI_UNCORE_M3UPI, 1),
618 + },
619 + { /* M3UPI1 Link 2 */
620 +- PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x204C),
621 +- .driver_data = UNCORE_PCI_DEV_FULL_DATA(18, 4, SKX_PCI_UNCORE_M3UPI, 2),
622 ++ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x204D),
623 ++ .driver_data = UNCORE_PCI_DEV_FULL_DATA(18, 5, SKX_PCI_UNCORE_M3UPI, 2),
624 + },
625 + { /* end: all zeroes */ }
626 + };
627 +diff --git a/arch/x86/include/asm/perf_event.h b/arch/x86/include/asm/perf_event.h
628 +index 12f54082f4c8..78241b736f2a 100644
629 +--- a/arch/x86/include/asm/perf_event.h
630 ++++ b/arch/x86/include/asm/perf_event.h
631 +@@ -46,6 +46,14 @@
632 + #define INTEL_ARCH_EVENT_MASK \
633 + (ARCH_PERFMON_EVENTSEL_UMASK | ARCH_PERFMON_EVENTSEL_EVENT)
634 +
635 ++#define AMD64_L3_SLICE_SHIFT 48
636 ++#define AMD64_L3_SLICE_MASK \
637 ++ ((0xFULL) << AMD64_L3_SLICE_SHIFT)
638 ++
639 ++#define AMD64_L3_THREAD_SHIFT 56
640 ++#define AMD64_L3_THREAD_MASK \
641 ++ ((0xFFULL) << AMD64_L3_THREAD_SHIFT)
642 ++
643 + #define X86_RAW_EVENT_MASK \
644 + (ARCH_PERFMON_EVENTSEL_EVENT | \
645 + ARCH_PERFMON_EVENTSEL_UMASK | \
646 +diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
647 +index 930c88341e4e..1fbf38dde84c 100644
648 +--- a/arch/x86/kernel/paravirt.c
649 ++++ b/arch/x86/kernel/paravirt.c
650 +@@ -90,7 +90,7 @@ unsigned paravirt_patch_call(void *insnbuf,
651 +
652 + if (len < 5) {
653 + #ifdef CONFIG_RETPOLINE
654 +- WARN_ONCE("Failing to patch indirect CALL in %ps\n", (void *)addr);
655 ++ WARN_ONCE(1, "Failing to patch indirect CALL in %ps\n", (void *)addr);
656 + #endif
657 + return len; /* call too long for patch site */
658 + }
659 +@@ -110,7 +110,7 @@ unsigned paravirt_patch_jmp(void *insnbuf, const void *target,
660 +
661 + if (len < 5) {
662 + #ifdef CONFIG_RETPOLINE
663 +- WARN_ONCE("Failing to patch indirect JMP in %ps\n", (void *)addr);
664 ++ WARN_ONCE(1, "Failing to patch indirect JMP in %ps\n", (void *)addr);
665 + #endif
666 + return len; /* call too long for patch site */
667 + }
668 +diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
669 +index ef772e5634d4..3e59a187fe30 100644
670 +--- a/arch/x86/kvm/svm.c
671 ++++ b/arch/x86/kvm/svm.c
672 +@@ -436,14 +436,18 @@ static inline struct kvm_svm *to_kvm_svm(struct kvm *kvm)
673 +
674 + static inline bool svm_sev_enabled(void)
675 + {
676 +- return max_sev_asid;
677 ++ return IS_ENABLED(CONFIG_KVM_AMD_SEV) ? max_sev_asid : 0;
678 + }
679 +
680 + static inline bool sev_guest(struct kvm *kvm)
681 + {
682 ++#ifdef CONFIG_KVM_AMD_SEV
683 + struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
684 +
685 + return sev->active;
686 ++#else
687 ++ return false;
688 ++#endif
689 + }
690 +
691 + static inline int sev_get_asid(struct kvm *kvm)
692 +diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
693 +index 32721ef9652d..9efe130ea2e6 100644
694 +--- a/arch/x86/kvm/vmx.c
695 ++++ b/arch/x86/kvm/vmx.c
696 +@@ -819,6 +819,7 @@ struct nested_vmx {
697 +
698 + /* to migrate it to L2 if VM_ENTRY_LOAD_DEBUG_CONTROLS is off */
699 + u64 vmcs01_debugctl;
700 ++ u64 vmcs01_guest_bndcfgs;
701 +
702 + u16 vpid02;
703 + u16 last_vpid;
704 +@@ -3395,9 +3396,6 @@ static void nested_vmx_setup_ctls_msrs(struct nested_vmx_msrs *msrs, bool apicv)
705 + VM_EXIT_LOAD_IA32_EFER | VM_EXIT_SAVE_IA32_EFER |
706 + VM_EXIT_SAVE_VMX_PREEMPTION_TIMER | VM_EXIT_ACK_INTR_ON_EXIT;
707 +
708 +- if (kvm_mpx_supported())
709 +- msrs->exit_ctls_high |= VM_EXIT_CLEAR_BNDCFGS;
710 +-
711 + /* We support free control of debug control saving. */
712 + msrs->exit_ctls_low &= ~VM_EXIT_SAVE_DEBUG_CONTROLS;
713 +
714 +@@ -3414,8 +3412,6 @@ static void nested_vmx_setup_ctls_msrs(struct nested_vmx_msrs *msrs, bool apicv)
715 + VM_ENTRY_LOAD_IA32_PAT;
716 + msrs->entry_ctls_high |=
717 + (VM_ENTRY_ALWAYSON_WITHOUT_TRUE_MSR | VM_ENTRY_LOAD_IA32_EFER);
718 +- if (kvm_mpx_supported())
719 +- msrs->entry_ctls_high |= VM_ENTRY_LOAD_BNDCFGS;
720 +
721 + /* We support free control of debug control loading. */
722 + msrs->entry_ctls_low &= ~VM_ENTRY_LOAD_DEBUG_CONTROLS;
723 +@@ -10825,6 +10821,23 @@ static void nested_vmx_cr_fixed1_bits_update(struct kvm_vcpu *vcpu)
724 + #undef cr4_fixed1_update
725 + }
726 +
727 ++static void nested_vmx_entry_exit_ctls_update(struct kvm_vcpu *vcpu)
728 ++{
729 ++ struct vcpu_vmx *vmx = to_vmx(vcpu);
730 ++
731 ++ if (kvm_mpx_supported()) {
732 ++ bool mpx_enabled = guest_cpuid_has(vcpu, X86_FEATURE_MPX);
733 ++
734 ++ if (mpx_enabled) {
735 ++ vmx->nested.msrs.entry_ctls_high |= VM_ENTRY_LOAD_BNDCFGS;
736 ++ vmx->nested.msrs.exit_ctls_high |= VM_EXIT_CLEAR_BNDCFGS;
737 ++ } else {
738 ++ vmx->nested.msrs.entry_ctls_high &= ~VM_ENTRY_LOAD_BNDCFGS;
739 ++ vmx->nested.msrs.exit_ctls_high &= ~VM_EXIT_CLEAR_BNDCFGS;
740 ++ }
741 ++ }
742 ++}
743 ++
744 + static void vmx_cpuid_update(struct kvm_vcpu *vcpu)
745 + {
746 + struct vcpu_vmx *vmx = to_vmx(vcpu);
747 +@@ -10841,8 +10854,10 @@ static void vmx_cpuid_update(struct kvm_vcpu *vcpu)
748 + to_vmx(vcpu)->msr_ia32_feature_control_valid_bits &=
749 + ~FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX;
750 +
751 +- if (nested_vmx_allowed(vcpu))
752 ++ if (nested_vmx_allowed(vcpu)) {
753 + nested_vmx_cr_fixed1_bits_update(vcpu);
754 ++ nested_vmx_entry_exit_ctls_update(vcpu);
755 ++ }
756 + }
757 +
758 + static void vmx_set_supported_cpuid(u32 func, struct kvm_cpuid_entry2 *entry)
759 +@@ -11553,8 +11568,13 @@ static void prepare_vmcs02_full(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
760 +
761 + set_cr4_guest_host_mask(vmx);
762 +
763 +- if (vmx_mpx_supported())
764 +- vmcs_write64(GUEST_BNDCFGS, vmcs12->guest_bndcfgs);
765 ++ if (kvm_mpx_supported()) {
766 ++ if (vmx->nested.nested_run_pending &&
767 ++ (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_BNDCFGS))
768 ++ vmcs_write64(GUEST_BNDCFGS, vmcs12->guest_bndcfgs);
769 ++ else
770 ++ vmcs_write64(GUEST_BNDCFGS, vmx->nested.vmcs01_guest_bndcfgs);
771 ++ }
772 +
773 + if (enable_vpid) {
774 + if (nested_cpu_has_vpid(vmcs12) && vmx->nested.vpid02)
775 +@@ -12068,6 +12088,9 @@ static int enter_vmx_non_root_mode(struct kvm_vcpu *vcpu)
776 +
777 + if (!(vmcs12->vm_entry_controls & VM_ENTRY_LOAD_DEBUG_CONTROLS))
778 + vmx->nested.vmcs01_debugctl = vmcs_read64(GUEST_IA32_DEBUGCTL);
779 ++ if (kvm_mpx_supported() &&
780 ++ !(vmcs12->vm_entry_controls & VM_ENTRY_LOAD_BNDCFGS))
781 ++ vmx->nested.vmcs01_guest_bndcfgs = vmcs_read64(GUEST_BNDCFGS);
782 +
783 + vmx_switch_vmcs(vcpu, &vmx->nested.vmcs02);
784 + vmx_segment_cache_clear(vmx);
785 +diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
786 +index 97fcac34e007..3cd58a5eb449 100644
787 +--- a/arch/x86/kvm/x86.c
788 ++++ b/arch/x86/kvm/x86.c
789 +@@ -4625,7 +4625,7 @@ static void kvm_init_msr_list(void)
790 + */
791 + switch (msrs_to_save[i]) {
792 + case MSR_IA32_BNDCFGS:
793 +- if (!kvm_x86_ops->mpx_supported())
794 ++ if (!kvm_mpx_supported())
795 + continue;
796 + break;
797 + case MSR_TSC_AUX:
798 +diff --git a/drivers/clk/mvebu/armada-37xx-periph.c b/drivers/clk/mvebu/armada-37xx-periph.c
799 +index 6f7637b19738..e764dfdea53f 100644
800 +--- a/drivers/clk/mvebu/armada-37xx-periph.c
801 ++++ b/drivers/clk/mvebu/armada-37xx-periph.c
802 +@@ -419,7 +419,6 @@ static unsigned int armada_3700_pm_dvfs_get_cpu_parent(struct regmap *base)
803 + static u8 clk_pm_cpu_get_parent(struct clk_hw *hw)
804 + {
805 + struct clk_pm_cpu *pm_cpu = to_clk_pm_cpu(hw);
806 +- int num_parents = clk_hw_get_num_parents(hw);
807 + u32 val;
808 +
809 + if (armada_3700_pm_dvfs_is_enabled(pm_cpu->nb_pm_base)) {
810 +diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
811 +index 06dce16e22bb..70f0dedca59f 100644
812 +--- a/drivers/gpio/gpiolib.c
813 ++++ b/drivers/gpio/gpiolib.c
814 +@@ -1675,7 +1675,8 @@ static void gpiochip_set_cascaded_irqchip(struct gpio_chip *gpiochip,
815 + irq_set_chained_handler_and_data(parent_irq, parent_handler,
816 + gpiochip);
817 +
818 +- gpiochip->irq.parents = &parent_irq;
819 ++ gpiochip->irq.parent_irq = parent_irq;
820 ++ gpiochip->irq.parents = &gpiochip->irq.parent_irq;
821 + gpiochip->irq.num_parents = 1;
822 + }
823 +
824 +diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
825 +index e484d0a94bdc..5b9cc3aeaa55 100644
826 +--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
827 ++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
828 +@@ -4494,12 +4494,18 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
829 + }
830 + spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
831 +
832 +- /* Signal HW programming completion */
833 +- drm_atomic_helper_commit_hw_done(state);
834 +
835 + if (wait_for_vblank)
836 + drm_atomic_helper_wait_for_flip_done(dev, state);
837 +
838 ++ /*
839 ++ * FIXME:
840 ++ * Delay hw_done() until flip_done() is signaled. This is to block
841 ++ * another commit from freeing the CRTC state while we're still
842 ++ * waiting on flip_done.
843 ++ */
844 ++ drm_atomic_helper_commit_hw_done(state);
845 ++
846 + drm_atomic_helper_cleanup_planes(dev, state);
847 +
848 + /* Finally, drop a runtime PM reference for each newly disabled CRTC,
849 +diff --git a/drivers/gpu/drm/i2c/tda9950.c b/drivers/gpu/drm/i2c/tda9950.c
850 +index 3f7396caad48..ccd355d0c123 100644
851 +--- a/drivers/gpu/drm/i2c/tda9950.c
852 ++++ b/drivers/gpu/drm/i2c/tda9950.c
853 +@@ -188,7 +188,8 @@ static irqreturn_t tda9950_irq(int irq, void *data)
854 + break;
855 + }
856 + /* TDA9950 executes all retries for us */
857 +- tx_status |= CEC_TX_STATUS_MAX_RETRIES;
858 ++ if (tx_status != CEC_TX_STATUS_OK)
859 ++ tx_status |= CEC_TX_STATUS_MAX_RETRIES;
860 + cec_transmit_done(priv->adap, tx_status, arb_lost_cnt,
861 + nack_cnt, 0, err_cnt);
862 + break;
863 +@@ -307,7 +308,7 @@ static void tda9950_release(struct tda9950_priv *priv)
864 + /* Wait up to .5s for it to signal non-busy */
865 + do {
866 + csr = tda9950_read(client, REG_CSR);
867 +- if (!(csr & CSR_BUSY) || --timeout)
868 ++ if (!(csr & CSR_BUSY) || !--timeout)
869 + break;
870 + msleep(10);
871 + } while (1);
872 +diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
873 +index eee6b79fb131..ae5b72269e27 100644
874 +--- a/drivers/hid/hid-ids.h
875 ++++ b/drivers/hid/hid-ids.h
876 +@@ -974,7 +974,6 @@
877 + #define USB_DEVICE_ID_SIS817_TOUCH 0x0817
878 + #define USB_DEVICE_ID_SIS_TS 0x1013
879 + #define USB_DEVICE_ID_SIS1030_TOUCH 0x1030
880 +-#define USB_DEVICE_ID_SIS10FB_TOUCH 0x10fb
881 +
882 + #define USB_VENDOR_ID_SKYCABLE 0x1223
883 + #define USB_DEVICE_ID_SKYCABLE_WIRELESS_PRESENTER 0x3F07
884 +diff --git a/drivers/hid/i2c-hid/i2c-hid.c b/drivers/hid/i2c-hid/i2c-hid.c
885 +index 37013b58098c..d17cf6e323b2 100644
886 +--- a/drivers/hid/i2c-hid/i2c-hid.c
887 ++++ b/drivers/hid/i2c-hid/i2c-hid.c
888 +@@ -47,8 +47,7 @@
889 + /* quirks to control the device */
890 + #define I2C_HID_QUIRK_SET_PWR_WAKEUP_DEV BIT(0)
891 + #define I2C_HID_QUIRK_NO_IRQ_AFTER_RESET BIT(1)
892 +-#define I2C_HID_QUIRK_RESEND_REPORT_DESCR BIT(2)
893 +-#define I2C_HID_QUIRK_NO_RUNTIME_PM BIT(3)
894 ++#define I2C_HID_QUIRK_NO_RUNTIME_PM BIT(2)
895 +
896 + /* flags */
897 + #define I2C_HID_STARTED 0
898 +@@ -172,8 +171,6 @@ static const struct i2c_hid_quirks {
899 + { I2C_VENDOR_ID_HANTICK, I2C_PRODUCT_ID_HANTICK_5288,
900 + I2C_HID_QUIRK_NO_IRQ_AFTER_RESET |
901 + I2C_HID_QUIRK_NO_RUNTIME_PM },
902 +- { USB_VENDOR_ID_SIS_TOUCH, USB_DEVICE_ID_SIS10FB_TOUCH,
903 +- I2C_HID_QUIRK_RESEND_REPORT_DESCR },
904 + { 0, 0 }
905 + };
906 +
907 +@@ -1241,22 +1238,13 @@ static int i2c_hid_resume(struct device *dev)
908 +
909 + /* Instead of resetting device, simply powers the device on. This
910 + * solves "incomplete reports" on Raydium devices 2386:3118 and
911 +- * 2386:4B33
912 ++ * 2386:4B33 and fixes various SIS touchscreens no longer sending
913 ++ * data after a suspend/resume.
914 + */
915 + ret = i2c_hid_set_power(client, I2C_HID_PWR_ON);
916 + if (ret)
917 + return ret;
918 +
919 +- /* Some devices need to re-send report descr cmd
920 +- * after resume, after this it will be back normal.
921 +- * otherwise it issues too many incomplete reports.
922 +- */
923 +- if (ihid->quirks & I2C_HID_QUIRK_RESEND_REPORT_DESCR) {
924 +- ret = i2c_hid_command(client, &hid_report_descr_cmd, NULL, 0);
925 +- if (ret)
926 +- return ret;
927 +- }
928 +-
929 + if (hid->driver && hid->driver->reset_resume) {
930 + ret = hid->driver->reset_resume(hid);
931 + return ret;
932 +diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c
933 +index 308456d28afb..73339fd47dd8 100644
934 +--- a/drivers/infiniband/hw/mlx5/mr.c
935 ++++ b/drivers/infiniband/hw/mlx5/mr.c
936 +@@ -544,6 +544,9 @@ void mlx5_mr_cache_free(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
937 + int shrink = 0;
938 + int c;
939 +
940 ++ if (!mr->allocated_from_cache)
941 ++ return;
942 ++
943 + c = order2idx(dev, mr->order);
944 + if (c < 0 || c >= MAX_MR_CACHE_ENTRIES) {
945 + mlx5_ib_warn(dev, "order %d, cache index %d\n", mr->order, c);
946 +@@ -1647,18 +1650,19 @@ static void dereg_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
947 + umem = NULL;
948 + }
949 + #endif
950 +-
951 + clean_mr(dev, mr);
952 +
953 ++ /*
954 ++ * We should unregister the DMA address from the HCA before
955 ++ * remove the DMA mapping.
956 ++ */
957 ++ mlx5_mr_cache_free(dev, mr);
958 + if (umem) {
959 + ib_umem_release(umem);
960 + atomic_sub(npages, &dev->mdev->priv.reg_pages);
961 + }
962 +-
963 + if (!mr->allocated_from_cache)
964 + kfree(mr);
965 +- else
966 +- mlx5_mr_cache_free(dev, mr);
967 + }
968 +
969 + int mlx5_ib_dereg_mr(struct ib_mr *ibmr)
970 +diff --git a/drivers/net/bonding/bond_netlink.c b/drivers/net/bonding/bond_netlink.c
971 +index 9697977b80f0..6b9ad8673218 100644
972 +--- a/drivers/net/bonding/bond_netlink.c
973 ++++ b/drivers/net/bonding/bond_netlink.c
974 +@@ -638,8 +638,7 @@ static int bond_fill_info(struct sk_buff *skb,
975 + goto nla_put_failure;
976 +
977 + if (nla_put(skb, IFLA_BOND_AD_ACTOR_SYSTEM,
978 +- sizeof(bond->params.ad_actor_system),
979 +- &bond->params.ad_actor_system))
980 ++ ETH_ALEN, &bond->params.ad_actor_system))
981 + goto nla_put_failure;
982 + }
983 + if (!bond_3ad_get_active_agg_info(bond, &info)) {
984 +diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c
985 +index 1b01cd2820ba..000f0d42a710 100644
986 +--- a/drivers/net/ethernet/amazon/ena/ena_netdev.c
987 ++++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c
988 +@@ -1580,8 +1580,6 @@ static int ena_up_complete(struct ena_adapter *adapter)
989 + if (rc)
990 + return rc;
991 +
992 +- ena_init_napi(adapter);
993 +-
994 + ena_change_mtu(adapter->netdev, adapter->netdev->mtu);
995 +
996 + ena_refill_all_rx_bufs(adapter);
997 +@@ -1735,6 +1733,13 @@ static int ena_up(struct ena_adapter *adapter)
998 +
999 + ena_setup_io_intr(adapter);
1000 +
1001 ++ /* napi poll functions should be initialized before running
1002 ++ * request_irq(), to handle a rare condition where there is a pending
1003 ++ * interrupt, causing the ISR to fire immediately while the poll
1004 ++ * function wasn't set yet, causing a null dereference
1005 ++ */
1006 ++ ena_init_napi(adapter);
1007 ++
1008 + rc = ena_request_io_irq(adapter);
1009 + if (rc)
1010 + goto err_req_irq;
1011 +@@ -2648,7 +2653,11 @@ err_disable_msix:
1012 + ena_free_mgmnt_irq(adapter);
1013 + ena_disable_msix(adapter);
1014 + err_device_destroy:
1015 ++ ena_com_abort_admin_commands(ena_dev);
1016 ++ ena_com_wait_for_abort_completion(ena_dev);
1017 + ena_com_admin_destroy(ena_dev);
1018 ++ ena_com_mmio_reg_read_request_destroy(ena_dev);
1019 ++ ena_com_dev_reset(ena_dev, ENA_REGS_RESET_DRIVER_INVALID_STATE);
1020 + err:
1021 + clear_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags);
1022 + clear_bit(ENA_FLAG_ONGOING_RESET, &adapter->flags);
1023 +@@ -3128,15 +3137,8 @@ err_rss_init:
1024 +
1025 + static void ena_release_bars(struct ena_com_dev *ena_dev, struct pci_dev *pdev)
1026 + {
1027 +- int release_bars;
1028 +-
1029 +- if (ena_dev->mem_bar)
1030 +- devm_iounmap(&pdev->dev, ena_dev->mem_bar);
1031 +-
1032 +- if (ena_dev->reg_bar)
1033 +- devm_iounmap(&pdev->dev, ena_dev->reg_bar);
1034 ++ int release_bars = pci_select_bars(pdev, IORESOURCE_MEM) & ENA_BAR_MASK;
1035 +
1036 +- release_bars = pci_select_bars(pdev, IORESOURCE_MEM) & ENA_BAR_MASK;
1037 + pci_release_selected_regions(pdev, release_bars);
1038 + }
1039 +
1040 +diff --git a/drivers/net/ethernet/amd/declance.c b/drivers/net/ethernet/amd/declance.c
1041 +index 116997a8b593..00332a1ea84b 100644
1042 +--- a/drivers/net/ethernet/amd/declance.c
1043 ++++ b/drivers/net/ethernet/amd/declance.c
1044 +@@ -1031,6 +1031,7 @@ static int dec_lance_probe(struct device *bdev, const int type)
1045 + int i, ret;
1046 + unsigned long esar_base;
1047 + unsigned char *esar;
1048 ++ const char *desc;
1049 +
1050 + if (dec_lance_debug && version_printed++ == 0)
1051 + printk(version);
1052 +@@ -1216,19 +1217,20 @@ static int dec_lance_probe(struct device *bdev, const int type)
1053 + */
1054 + switch (type) {
1055 + case ASIC_LANCE:
1056 +- printk("%s: IOASIC onboard LANCE", name);
1057 ++ desc = "IOASIC onboard LANCE";
1058 + break;
1059 + case PMAD_LANCE:
1060 +- printk("%s: PMAD-AA", name);
1061 ++ desc = "PMAD-AA";
1062 + break;
1063 + case PMAX_LANCE:
1064 +- printk("%s: PMAX onboard LANCE", name);
1065 ++ desc = "PMAX onboard LANCE";
1066 + break;
1067 + }
1068 + for (i = 0; i < 6; i++)
1069 + dev->dev_addr[i] = esar[i * 4];
1070 +
1071 +- printk(", addr = %pM, irq = %d\n", dev->dev_addr, dev->irq);
1072 ++ printk("%s: %s, addr = %pM, irq = %d\n",
1073 ++ name, desc, dev->dev_addr, dev->irq);
1074 +
1075 + dev->netdev_ops = &lance_netdev_ops;
1076 + dev->watchdog_timeo = 5*HZ;
1077 +diff --git a/drivers/net/ethernet/broadcom/genet/bcmmii.c b/drivers/net/ethernet/broadcom/genet/bcmmii.c
1078 +index 4241ae928d4a..34af5f1569c8 100644
1079 +--- a/drivers/net/ethernet/broadcom/genet/bcmmii.c
1080 ++++ b/drivers/net/ethernet/broadcom/genet/bcmmii.c
1081 +@@ -321,9 +321,12 @@ int bcmgenet_mii_probe(struct net_device *dev)
1082 + phydev->advertising = phydev->supported;
1083 +
1084 + /* The internal PHY has its link interrupts routed to the
1085 +- * Ethernet MAC ISRs
1086 ++ * Ethernet MAC ISRs. On GENETv5 there is a hardware issue
1087 ++ * that prevents the signaling of link UP interrupts when
1088 ++ * the link operates at 10Mbps, so fallback to polling for
1089 ++ * those versions of GENET.
1090 + */
1091 +- if (priv->internal_phy)
1092 ++ if (priv->internal_phy && !GENET_IS_V5(priv))
1093 + dev->phydev->irq = PHY_IGNORE_INTERRUPT;
1094 +
1095 + return 0;
1096 +diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c
1097 +index dfa045f22ef1..db568232ff3e 100644
1098 +--- a/drivers/net/ethernet/cadence/macb_main.c
1099 ++++ b/drivers/net/ethernet/cadence/macb_main.c
1100 +@@ -2089,6 +2089,7 @@ static void macb_configure_dma(struct macb *bp)
1101 + else
1102 + dmacfg &= ~GEM_BIT(TXCOEN);
1103 +
1104 ++ dmacfg &= ~GEM_BIT(ADDR64);
1105 + #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
1106 + if (bp->hw_dma_cap & HW_DMA_CAP_64B)
1107 + dmacfg |= GEM_BIT(ADDR64);
1108 +diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
1109 +index a19172dbe6be..c34ea385fe4a 100644
1110 +--- a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
1111 ++++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
1112 +@@ -2159,6 +2159,8 @@ static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
1113 + return -EPERM;
1114 + if (copy_from_user(&t, useraddr, sizeof(t)))
1115 + return -EFAULT;
1116 ++ if (t.cmd != CHELSIO_SET_QSET_PARAMS)
1117 ++ return -EINVAL;
1118 + if (t.qset_idx >= SGE_QSETS)
1119 + return -EINVAL;
1120 + if (!in_range(t.intr_lat, 0, M_NEWTIMER) ||
1121 +@@ -2258,6 +2260,9 @@ static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
1122 + if (copy_from_user(&t, useraddr, sizeof(t)))
1123 + return -EFAULT;
1124 +
1125 ++ if (t.cmd != CHELSIO_GET_QSET_PARAMS)
1126 ++ return -EINVAL;
1127 ++
1128 + /* Display qsets for all ports when offload enabled */
1129 + if (test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map)) {
1130 + q1 = 0;
1131 +@@ -2303,6 +2308,8 @@ static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
1132 + return -EBUSY;
1133 + if (copy_from_user(&edata, useraddr, sizeof(edata)))
1134 + return -EFAULT;
1135 ++ if (edata.cmd != CHELSIO_SET_QSET_NUM)
1136 ++ return -EINVAL;
1137 + if (edata.val < 1 ||
1138 + (edata.val > 1 && !(adapter->flags & USING_MSIX)))
1139 + return -EINVAL;
1140 +@@ -2343,6 +2350,8 @@ static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
1141 + return -EPERM;
1142 + if (copy_from_user(&t, useraddr, sizeof(t)))
1143 + return -EFAULT;
1144 ++ if (t.cmd != CHELSIO_LOAD_FW)
1145 ++ return -EINVAL;
1146 + /* Check t.len sanity ? */
1147 + fw_data = memdup_user(useraddr + sizeof(t), t.len);
1148 + if (IS_ERR(fw_data))
1149 +@@ -2366,6 +2375,8 @@ static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
1150 + return -EBUSY;
1151 + if (copy_from_user(&m, useraddr, sizeof(m)))
1152 + return -EFAULT;
1153 ++ if (m.cmd != CHELSIO_SETMTUTAB)
1154 ++ return -EINVAL;
1155 + if (m.nmtus != NMTUS)
1156 + return -EINVAL;
1157 + if (m.mtus[0] < 81) /* accommodate SACK */
1158 +@@ -2407,6 +2418,8 @@ static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
1159 + return -EBUSY;
1160 + if (copy_from_user(&m, useraddr, sizeof(m)))
1161 + return -EFAULT;
1162 ++ if (m.cmd != CHELSIO_SET_PM)
1163 ++ return -EINVAL;
1164 + if (!is_power_of_2(m.rx_pg_sz) ||
1165 + !is_power_of_2(m.tx_pg_sz))
1166 + return -EINVAL; /* not power of 2 */
1167 +@@ -2440,6 +2453,8 @@ static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
1168 + return -EIO; /* need the memory controllers */
1169 + if (copy_from_user(&t, useraddr, sizeof(t)))
1170 + return -EFAULT;
1171 ++ if (t.cmd != CHELSIO_GET_MEM)
1172 ++ return -EINVAL;
1173 + if ((t.addr & 7) || (t.len & 7))
1174 + return -EINVAL;
1175 + if (t.mem_id == MEM_CM)
1176 +@@ -2492,6 +2507,8 @@ static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
1177 + return -EAGAIN;
1178 + if (copy_from_user(&t, useraddr, sizeof(t)))
1179 + return -EFAULT;
1180 ++ if (t.cmd != CHELSIO_SET_TRACE_FILTER)
1181 ++ return -EINVAL;
1182 +
1183 + tp = (const struct trace_params *)&t.sip;
1184 + if (t.config_tx)
1185 +diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
1186 +index 8f755009ff38..c8445a4135a9 100644
1187 +--- a/drivers/net/ethernet/emulex/benet/be_main.c
1188 ++++ b/drivers/net/ethernet/emulex/benet/be_main.c
1189 +@@ -3915,8 +3915,6 @@ static int be_enable_vxlan_offloads(struct be_adapter *adapter)
1190 + netdev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
1191 + NETIF_F_TSO | NETIF_F_TSO6 |
1192 + NETIF_F_GSO_UDP_TUNNEL;
1193 +- netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL;
1194 +- netdev->features |= NETIF_F_GSO_UDP_TUNNEL;
1195 +
1196 + dev_info(dev, "Enabled VxLAN offloads for UDP port %d\n",
1197 + be16_to_cpu(port));
1198 +@@ -3938,8 +3936,6 @@ static void be_disable_vxlan_offloads(struct be_adapter *adapter)
1199 + adapter->vxlan_port = 0;
1200 +
1201 + netdev->hw_enc_features = 0;
1202 +- netdev->hw_features &= ~(NETIF_F_GSO_UDP_TUNNEL);
1203 +- netdev->features &= ~(NETIF_F_GSO_UDP_TUNNEL);
1204 + }
1205 +
1206 + static void be_calculate_vf_res(struct be_adapter *adapter, u16 num_vfs,
1207 +@@ -5232,6 +5228,7 @@ static void be_netdev_init(struct net_device *netdev)
1208 + struct be_adapter *adapter = netdev_priv(netdev);
1209 +
1210 + netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
1211 ++ NETIF_F_GSO_UDP_TUNNEL |
1212 + NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
1213 + NETIF_F_HW_VLAN_CTAG_TX;
1214 + if ((be_if_cap_flags(adapter) & BE_IF_FLAGS_RSS))
1215 +diff --git a/drivers/net/ethernet/freescale/fec.h b/drivers/net/ethernet/freescale/fec.h
1216 +index 4778b663653e..bf80855dd0dd 100644
1217 +--- a/drivers/net/ethernet/freescale/fec.h
1218 ++++ b/drivers/net/ethernet/freescale/fec.h
1219 +@@ -452,6 +452,10 @@ struct bufdesc_ex {
1220 + * initialisation.
1221 + */
1222 + #define FEC_QUIRK_MIB_CLEAR (1 << 15)
1223 ++/* Only i.MX25/i.MX27/i.MX28 controller supports FRBR,FRSR registers,
1224 ++ * those FIFO receive registers are resolved in other platforms.
1225 ++ */
1226 ++#define FEC_QUIRK_HAS_FRREG (1 << 16)
1227 +
1228 + struct bufdesc_prop {
1229 + int qid;
1230 +diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
1231 +index c729665107f5..11f90bb2d2a9 100644
1232 +--- a/drivers/net/ethernet/freescale/fec_main.c
1233 ++++ b/drivers/net/ethernet/freescale/fec_main.c
1234 +@@ -90,14 +90,16 @@ static struct platform_device_id fec_devtype[] = {
1235 + .driver_data = 0,
1236 + }, {
1237 + .name = "imx25-fec",
1238 +- .driver_data = FEC_QUIRK_USE_GASKET | FEC_QUIRK_MIB_CLEAR,
1239 ++ .driver_data = FEC_QUIRK_USE_GASKET | FEC_QUIRK_MIB_CLEAR |
1240 ++ FEC_QUIRK_HAS_FRREG,
1241 + }, {
1242 + .name = "imx27-fec",
1243 +- .driver_data = FEC_QUIRK_MIB_CLEAR,
1244 ++ .driver_data = FEC_QUIRK_MIB_CLEAR | FEC_QUIRK_HAS_FRREG,
1245 + }, {
1246 + .name = "imx28-fec",
1247 + .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_SWAP_FRAME |
1248 +- FEC_QUIRK_SINGLE_MDIO | FEC_QUIRK_HAS_RACC,
1249 ++ FEC_QUIRK_SINGLE_MDIO | FEC_QUIRK_HAS_RACC |
1250 ++ FEC_QUIRK_HAS_FRREG,
1251 + }, {
1252 + .name = "imx6q-fec",
1253 + .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT |
1254 +@@ -1157,7 +1159,7 @@ static void fec_enet_timeout_work(struct work_struct *work)
1255 + napi_disable(&fep->napi);
1256 + netif_tx_lock_bh(ndev);
1257 + fec_restart(ndev);
1258 +- netif_wake_queue(ndev);
1259 ++ netif_tx_wake_all_queues(ndev);
1260 + netif_tx_unlock_bh(ndev);
1261 + napi_enable(&fep->napi);
1262 + }
1263 +@@ -1272,7 +1274,7 @@ skb_done:
1264 +
1265 + /* Since we have freed up a buffer, the ring is no longer full
1266 + */
1267 +- if (netif_queue_stopped(ndev)) {
1268 ++ if (netif_tx_queue_stopped(nq)) {
1269 + entries_free = fec_enet_get_free_txdesc_num(txq);
1270 + if (entries_free >= txq->tx_wake_threshold)
1271 + netif_tx_wake_queue(nq);
1272 +@@ -1745,7 +1747,7 @@ static void fec_enet_adjust_link(struct net_device *ndev)
1273 + napi_disable(&fep->napi);
1274 + netif_tx_lock_bh(ndev);
1275 + fec_restart(ndev);
1276 +- netif_wake_queue(ndev);
1277 ++ netif_tx_wake_all_queues(ndev);
1278 + netif_tx_unlock_bh(ndev);
1279 + napi_enable(&fep->napi);
1280 + }
1281 +@@ -2163,7 +2165,13 @@ static void fec_enet_get_regs(struct net_device *ndev,
1282 + memset(buf, 0, regs->len);
1283 +
1284 + for (i = 0; i < ARRAY_SIZE(fec_enet_register_offset); i++) {
1285 +- off = fec_enet_register_offset[i] / 4;
1286 ++ off = fec_enet_register_offset[i];
1287 ++
1288 ++ if ((off == FEC_R_BOUND || off == FEC_R_FSTART) &&
1289 ++ !(fep->quirks & FEC_QUIRK_HAS_FRREG))
1290 ++ continue;
1291 ++
1292 ++ off >>= 2;
1293 + buf[off] = readl(&theregs[off]);
1294 + }
1295 + }
1296 +@@ -2246,7 +2254,7 @@ static int fec_enet_set_pauseparam(struct net_device *ndev,
1297 + napi_disable(&fep->napi);
1298 + netif_tx_lock_bh(ndev);
1299 + fec_restart(ndev);
1300 +- netif_wake_queue(ndev);
1301 ++ netif_tx_wake_all_queues(ndev);
1302 + netif_tx_unlock_bh(ndev);
1303 + napi_enable(&fep->napi);
1304 + }
1305 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
1306 +index d3a1dd20e41d..fb6c72cf70a0 100644
1307 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
1308 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
1309 +@@ -429,10 +429,9 @@ static inline u16 mlx5e_icosq_wrap_cnt(struct mlx5e_icosq *sq)
1310 +
1311 + static inline void mlx5e_fill_icosq_frag_edge(struct mlx5e_icosq *sq,
1312 + struct mlx5_wq_cyc *wq,
1313 +- u16 pi, u16 frag_pi)
1314 ++ u16 pi, u16 nnops)
1315 + {
1316 + struct mlx5e_sq_wqe_info *edge_wi, *wi = &sq->db.ico_wqe[pi];
1317 +- u8 nnops = mlx5_wq_cyc_get_frag_size(wq) - frag_pi;
1318 +
1319 + edge_wi = wi + nnops;
1320 +
1321 +@@ -451,15 +450,14 @@ static int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix)
1322 + struct mlx5_wq_cyc *wq = &sq->wq;
1323 + struct mlx5e_umr_wqe *umr_wqe;
1324 + u16 xlt_offset = ix << (MLX5E_LOG_ALIGNED_MPWQE_PPW - 1);
1325 +- u16 pi, frag_pi;
1326 ++ u16 pi, contig_wqebbs_room;
1327 + int err;
1328 + int i;
1329 +
1330 + pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
1331 +- frag_pi = mlx5_wq_cyc_ctr2fragix(wq, sq->pc);
1332 +-
1333 +- if (unlikely(frag_pi + MLX5E_UMR_WQEBBS > mlx5_wq_cyc_get_frag_size(wq))) {
1334 +- mlx5e_fill_icosq_frag_edge(sq, wq, pi, frag_pi);
1335 ++ contig_wqebbs_room = mlx5_wq_cyc_get_contig_wqebbs(wq, pi);
1336 ++ if (unlikely(contig_wqebbs_room < MLX5E_UMR_WQEBBS)) {
1337 ++ mlx5e_fill_icosq_frag_edge(sq, wq, pi, contig_wqebbs_room);
1338 + pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
1339 + }
1340 +
1341 +@@ -693,43 +691,15 @@ static inline bool is_last_ethertype_ip(struct sk_buff *skb, int *network_depth)
1342 + return (ethertype == htons(ETH_P_IP) || ethertype == htons(ETH_P_IPV6));
1343 + }
1344 +
1345 +-static __be32 mlx5e_get_fcs(struct sk_buff *skb)
1346 ++static u32 mlx5e_get_fcs(const struct sk_buff *skb)
1347 + {
1348 +- int last_frag_sz, bytes_in_prev, nr_frags;
1349 +- u8 *fcs_p1, *fcs_p2;
1350 +- skb_frag_t *last_frag;
1351 +- __be32 fcs_bytes;
1352 +-
1353 +- if (!skb_is_nonlinear(skb))
1354 +- return *(__be32 *)(skb->data + skb->len - ETH_FCS_LEN);
1355 +-
1356 +- nr_frags = skb_shinfo(skb)->nr_frags;
1357 +- last_frag = &skb_shinfo(skb)->frags[nr_frags - 1];
1358 +- last_frag_sz = skb_frag_size(last_frag);
1359 +-
1360 +- /* If all FCS data is in last frag */
1361 +- if (last_frag_sz >= ETH_FCS_LEN)
1362 +- return *(__be32 *)(skb_frag_address(last_frag) +
1363 +- last_frag_sz - ETH_FCS_LEN);
1364 +-
1365 +- fcs_p2 = (u8 *)skb_frag_address(last_frag);
1366 +- bytes_in_prev = ETH_FCS_LEN - last_frag_sz;
1367 +-
1368 +- /* Find where the other part of the FCS is - Linear or another frag */
1369 +- if (nr_frags == 1) {
1370 +- fcs_p1 = skb_tail_pointer(skb);
1371 +- } else {
1372 +- skb_frag_t *prev_frag = &skb_shinfo(skb)->frags[nr_frags - 2];
1373 +-
1374 +- fcs_p1 = skb_frag_address(prev_frag) +
1375 +- skb_frag_size(prev_frag);
1376 +- }
1377 +- fcs_p1 -= bytes_in_prev;
1378 ++ const void *fcs_bytes;
1379 ++ u32 _fcs_bytes;
1380 +
1381 +- memcpy(&fcs_bytes, fcs_p1, bytes_in_prev);
1382 +- memcpy(((u8 *)&fcs_bytes) + bytes_in_prev, fcs_p2, last_frag_sz);
1383 ++ fcs_bytes = skb_header_pointer(skb, skb->len - ETH_FCS_LEN,
1384 ++ ETH_FCS_LEN, &_fcs_bytes);
1385 +
1386 +- return fcs_bytes;
1387 ++ return __get_unaligned_cpu32(fcs_bytes);
1388 + }
1389 +
1390 + static inline void mlx5e_handle_csum(struct net_device *netdev,
1391 +@@ -762,8 +732,9 @@ static inline void mlx5e_handle_csum(struct net_device *netdev,
1392 + network_depth - ETH_HLEN,
1393 + skb->csum);
1394 + if (unlikely(netdev->features & NETIF_F_RXFCS))
1395 +- skb->csum = csum_add(skb->csum,
1396 +- (__force __wsum)mlx5e_get_fcs(skb));
1397 ++ skb->csum = csum_block_add(skb->csum,
1398 ++ (__force __wsum)mlx5e_get_fcs(skb),
1399 ++ skb->len - ETH_FCS_LEN);
1400 + stats->csum_complete++;
1401 + return;
1402 + }
1403 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
1404 +index f29deb44bf3b..1e774d979c85 100644
1405 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
1406 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
1407 +@@ -287,10 +287,9 @@ dma_unmap_wqe_err:
1408 +
1409 + static inline void mlx5e_fill_sq_frag_edge(struct mlx5e_txqsq *sq,
1410 + struct mlx5_wq_cyc *wq,
1411 +- u16 pi, u16 frag_pi)
1412 ++ u16 pi, u16 nnops)
1413 + {
1414 + struct mlx5e_tx_wqe_info *edge_wi, *wi = &sq->db.wqe_info[pi];
1415 +- u8 nnops = mlx5_wq_cyc_get_frag_size(wq) - frag_pi;
1416 +
1417 + edge_wi = wi + nnops;
1418 +
1419 +@@ -345,8 +344,8 @@ netdev_tx_t mlx5e_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
1420 + struct mlx5e_tx_wqe_info *wi;
1421 +
1422 + struct mlx5e_sq_stats *stats = sq->stats;
1423 ++ u16 headlen, ihs, contig_wqebbs_room;
1424 + u16 ds_cnt, ds_cnt_inl = 0;
1425 +- u16 headlen, ihs, frag_pi;
1426 + u8 num_wqebbs, opcode;
1427 + u32 num_bytes;
1428 + int num_dma;
1429 +@@ -383,9 +382,9 @@ netdev_tx_t mlx5e_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
1430 + }
1431 +
1432 + num_wqebbs = DIV_ROUND_UP(ds_cnt, MLX5_SEND_WQEBB_NUM_DS);
1433 +- frag_pi = mlx5_wq_cyc_ctr2fragix(wq, sq->pc);
1434 +- if (unlikely(frag_pi + num_wqebbs > mlx5_wq_cyc_get_frag_size(wq))) {
1435 +- mlx5e_fill_sq_frag_edge(sq, wq, pi, frag_pi);
1436 ++ contig_wqebbs_room = mlx5_wq_cyc_get_contig_wqebbs(wq, pi);
1437 ++ if (unlikely(contig_wqebbs_room < num_wqebbs)) {
1438 ++ mlx5e_fill_sq_frag_edge(sq, wq, pi, contig_wqebbs_room);
1439 + mlx5e_sq_fetch_wqe(sq, &wqe, &pi);
1440 + }
1441 +
1442 +@@ -629,7 +628,7 @@ netdev_tx_t mlx5i_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
1443 + struct mlx5e_tx_wqe_info *wi;
1444 +
1445 + struct mlx5e_sq_stats *stats = sq->stats;
1446 +- u16 headlen, ihs, pi, frag_pi;
1447 ++ u16 headlen, ihs, pi, contig_wqebbs_room;
1448 + u16 ds_cnt, ds_cnt_inl = 0;
1449 + u8 num_wqebbs, opcode;
1450 + u32 num_bytes;
1451 +@@ -665,13 +664,14 @@ netdev_tx_t mlx5i_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
1452 + }
1453 +
1454 + num_wqebbs = DIV_ROUND_UP(ds_cnt, MLX5_SEND_WQEBB_NUM_DS);
1455 +- frag_pi = mlx5_wq_cyc_ctr2fragix(wq, sq->pc);
1456 +- if (unlikely(frag_pi + num_wqebbs > mlx5_wq_cyc_get_frag_size(wq))) {
1457 ++ pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
1458 ++ contig_wqebbs_room = mlx5_wq_cyc_get_contig_wqebbs(wq, pi);
1459 ++ if (unlikely(contig_wqebbs_room < num_wqebbs)) {
1460 ++ mlx5e_fill_sq_frag_edge(sq, wq, pi, contig_wqebbs_room);
1461 + pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
1462 +- mlx5e_fill_sq_frag_edge(sq, wq, pi, frag_pi);
1463 + }
1464 +
1465 +- mlx5i_sq_fetch_wqe(sq, &wqe, &pi);
1466 ++ mlx5i_sq_fetch_wqe(sq, &wqe, pi);
1467 +
1468 + /* fill wqe */
1469 + wi = &sq->db.wqe_info[pi];
1470 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
1471 +index 406c23862f5f..01ccc8201052 100644
1472 +--- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c
1473 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
1474 +@@ -269,7 +269,7 @@ static void eq_pf_process(struct mlx5_eq *eq)
1475 + case MLX5_PFAULT_SUBTYPE_WQE:
1476 + /* WQE based event */
1477 + pfault->type =
1478 +- be32_to_cpu(pf_eqe->wqe.pftype_wq) >> 24;
1479 ++ (be32_to_cpu(pf_eqe->wqe.pftype_wq) >> 24) & 0x7;
1480 + pfault->token =
1481 + be32_to_cpu(pf_eqe->wqe.token);
1482 + pfault->wqe.wq_num =
1483 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c b/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c
1484 +index 5645a4facad2..b8ee9101c506 100644
1485 +--- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c
1486 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c
1487 +@@ -245,7 +245,7 @@ static void *mlx5_fpga_ipsec_cmd_exec(struct mlx5_core_dev *mdev,
1488 + return ERR_PTR(res);
1489 + }
1490 +
1491 +- /* Context will be freed by wait func after completion */
1492 ++ /* Context should be freed by the caller after completion. */
1493 + return context;
1494 + }
1495 +
1496 +@@ -418,10 +418,8 @@ static int mlx5_fpga_ipsec_set_caps(struct mlx5_core_dev *mdev, u32 flags)
1497 + cmd.cmd = htonl(MLX5_FPGA_IPSEC_CMD_OP_SET_CAP);
1498 + cmd.flags = htonl(flags);
1499 + context = mlx5_fpga_ipsec_cmd_exec(mdev, &cmd, sizeof(cmd));
1500 +- if (IS_ERR(context)) {
1501 +- err = PTR_ERR(context);
1502 +- goto out;
1503 +- }
1504 ++ if (IS_ERR(context))
1505 ++ return PTR_ERR(context);
1506 +
1507 + err = mlx5_fpga_ipsec_cmd_wait(context);
1508 + if (err)
1509 +@@ -435,6 +433,7 @@ static int mlx5_fpga_ipsec_set_caps(struct mlx5_core_dev *mdev, u32 flags)
1510 + }
1511 +
1512 + out:
1513 ++ kfree(context);
1514 + return err;
1515 + }
1516 +
1517 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h
1518 +index 08eac92fc26c..0982c579ec74 100644
1519 +--- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h
1520 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h
1521 +@@ -109,12 +109,11 @@ struct mlx5i_tx_wqe {
1522 +
1523 + static inline void mlx5i_sq_fetch_wqe(struct mlx5e_txqsq *sq,
1524 + struct mlx5i_tx_wqe **wqe,
1525 +- u16 *pi)
1526 ++ u16 pi)
1527 + {
1528 + struct mlx5_wq_cyc *wq = &sq->wq;
1529 +
1530 +- *pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
1531 +- *wqe = mlx5_wq_cyc_get_wqe(wq, *pi);
1532 ++ *wqe = mlx5_wq_cyc_get_wqe(wq, pi);
1533 + memset(*wqe, 0, sizeof(**wqe));
1534 + }
1535 +
1536 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/wq.c b/drivers/net/ethernet/mellanox/mlx5/core/wq.c
1537 +index d838af9539b1..9046475c531c 100644
1538 +--- a/drivers/net/ethernet/mellanox/mlx5/core/wq.c
1539 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/wq.c
1540 +@@ -39,11 +39,6 @@ u32 mlx5_wq_cyc_get_size(struct mlx5_wq_cyc *wq)
1541 + return (u32)wq->fbc.sz_m1 + 1;
1542 + }
1543 +
1544 +-u16 mlx5_wq_cyc_get_frag_size(struct mlx5_wq_cyc *wq)
1545 +-{
1546 +- return wq->fbc.frag_sz_m1 + 1;
1547 +-}
1548 +-
1549 + u32 mlx5_cqwq_get_size(struct mlx5_cqwq *wq)
1550 + {
1551 + return wq->fbc.sz_m1 + 1;
1552 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/wq.h b/drivers/net/ethernet/mellanox/mlx5/core/wq.h
1553 +index 16476cc1a602..311256554520 100644
1554 +--- a/drivers/net/ethernet/mellanox/mlx5/core/wq.h
1555 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/wq.h
1556 +@@ -80,7 +80,6 @@ int mlx5_wq_cyc_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
1557 + void *wqc, struct mlx5_wq_cyc *wq,
1558 + struct mlx5_wq_ctrl *wq_ctrl);
1559 + u32 mlx5_wq_cyc_get_size(struct mlx5_wq_cyc *wq);
1560 +-u16 mlx5_wq_cyc_get_frag_size(struct mlx5_wq_cyc *wq);
1561 +
1562 + int mlx5_wq_qp_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
1563 + void *qpc, struct mlx5_wq_qp *wq,
1564 +@@ -140,11 +139,6 @@ static inline u16 mlx5_wq_cyc_ctr2ix(struct mlx5_wq_cyc *wq, u16 ctr)
1565 + return ctr & wq->fbc.sz_m1;
1566 + }
1567 +
1568 +-static inline u16 mlx5_wq_cyc_ctr2fragix(struct mlx5_wq_cyc *wq, u16 ctr)
1569 +-{
1570 +- return ctr & wq->fbc.frag_sz_m1;
1571 +-}
1572 +-
1573 + static inline u16 mlx5_wq_cyc_get_head(struct mlx5_wq_cyc *wq)
1574 + {
1575 + return mlx5_wq_cyc_ctr2ix(wq, wq->wqe_ctr);
1576 +@@ -160,6 +154,11 @@ static inline void *mlx5_wq_cyc_get_wqe(struct mlx5_wq_cyc *wq, u16 ix)
1577 + return mlx5_frag_buf_get_wqe(&wq->fbc, ix);
1578 + }
1579 +
1580 ++static inline u16 mlx5_wq_cyc_get_contig_wqebbs(struct mlx5_wq_cyc *wq, u16 ix)
1581 ++{
1582 ++ return mlx5_frag_buf_get_idx_last_contig_stride(&wq->fbc, ix) - ix + 1;
1583 ++}
1584 ++
1585 + static inline int mlx5_wq_cyc_cc_bigger(u16 cc1, u16 cc2)
1586 + {
1587 + int equal = (cc1 == cc2);
1588 +diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.c b/drivers/net/ethernet/mellanox/mlxsw/core.c
1589 +index f9c724752a32..13636a537f37 100644
1590 +--- a/drivers/net/ethernet/mellanox/mlxsw/core.c
1591 ++++ b/drivers/net/ethernet/mellanox/mlxsw/core.c
1592 +@@ -985,8 +985,8 @@ static int mlxsw_devlink_core_bus_device_reload(struct devlink *devlink,
1593 + mlxsw_core->bus,
1594 + mlxsw_core->bus_priv, true,
1595 + devlink);
1596 +- if (err)
1597 +- mlxsw_core->reload_fail = true;
1598 ++ mlxsw_core->reload_fail = !!err;
1599 ++
1600 + return err;
1601 + }
1602 +
1603 +@@ -1126,8 +1126,15 @@ void mlxsw_core_bus_device_unregister(struct mlxsw_core *mlxsw_core,
1604 + const char *device_kind = mlxsw_core->bus_info->device_kind;
1605 + struct devlink *devlink = priv_to_devlink(mlxsw_core);
1606 +
1607 +- if (mlxsw_core->reload_fail)
1608 +- goto reload_fail;
1609 ++ if (mlxsw_core->reload_fail) {
1610 ++ if (!reload)
1611 ++ /* Only the parts that were not de-initialized in the
1612 ++ * failed reload attempt need to be de-initialized.
1613 ++ */
1614 ++ goto reload_fail_deinit;
1615 ++ else
1616 ++ return;
1617 ++ }
1618 +
1619 + if (mlxsw_core->driver->fini)
1620 + mlxsw_core->driver->fini(mlxsw_core);
1621 +@@ -1140,9 +1147,12 @@ void mlxsw_core_bus_device_unregister(struct mlxsw_core *mlxsw_core,
1622 + if (!reload)
1623 + devlink_resources_unregister(devlink, NULL);
1624 + mlxsw_core->bus->fini(mlxsw_core->bus_priv);
1625 +- if (reload)
1626 +- return;
1627 +-reload_fail:
1628 ++
1629 ++ return;
1630 ++
1631 ++reload_fail_deinit:
1632 ++ devlink_unregister(devlink);
1633 ++ devlink_resources_unregister(devlink, NULL);
1634 + devlink_free(devlink);
1635 + mlxsw_core_driver_put(device_kind);
1636 + }
1637 +diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
1638 +index 6cb43dda8232..9883e48d8a21 100644
1639 +--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
1640 ++++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
1641 +@@ -2307,8 +2307,6 @@ static void mlxsw_sp_switchdev_event_work(struct work_struct *work)
1642 + break;
1643 + case SWITCHDEV_FDB_DEL_TO_DEVICE:
1644 + fdb_info = &switchdev_work->fdb_info;
1645 +- if (!fdb_info->added_by_user)
1646 +- break;
1647 + mlxsw_sp_port_fdb_set(mlxsw_sp_port, fdb_info, false);
1648 + break;
1649 + case SWITCHDEV_FDB_ADD_TO_BRIDGE: /* fall through */
1650 +diff --git a/drivers/net/ethernet/qlogic/qed/qed_iwarp.c b/drivers/net/ethernet/qlogic/qed/qed_iwarp.c
1651 +index 90a2b53096e2..51bbb0e5b514 100644
1652 +--- a/drivers/net/ethernet/qlogic/qed/qed_iwarp.c
1653 ++++ b/drivers/net/ethernet/qlogic/qed/qed_iwarp.c
1654 +@@ -1710,7 +1710,7 @@ qed_iwarp_parse_rx_pkt(struct qed_hwfn *p_hwfn,
1655 +
1656 + cm_info->local_ip[0] = ntohl(iph->daddr);
1657 + cm_info->remote_ip[0] = ntohl(iph->saddr);
1658 +- cm_info->ip_version = TCP_IPV4;
1659 ++ cm_info->ip_version = QED_TCP_IPV4;
1660 +
1661 + ip_hlen = (iph->ihl) * sizeof(u32);
1662 + *payload_len = ntohs(iph->tot_len) - ip_hlen;
1663 +@@ -1730,7 +1730,7 @@ qed_iwarp_parse_rx_pkt(struct qed_hwfn *p_hwfn,
1664 + cm_info->remote_ip[i] =
1665 + ntohl(ip6h->saddr.in6_u.u6_addr32[i]);
1666 + }
1667 +- cm_info->ip_version = TCP_IPV6;
1668 ++ cm_info->ip_version = QED_TCP_IPV6;
1669 +
1670 + ip_hlen = sizeof(*ip6h);
1671 + *payload_len = ntohs(ip6h->payload_len);
1672 +diff --git a/drivers/net/ethernet/qlogic/qed/qed_roce.c b/drivers/net/ethernet/qlogic/qed/qed_roce.c
1673 +index b5ce1581645f..79424e6f0976 100644
1674 +--- a/drivers/net/ethernet/qlogic/qed/qed_roce.c
1675 ++++ b/drivers/net/ethernet/qlogic/qed/qed_roce.c
1676 +@@ -138,23 +138,16 @@ static void qed_rdma_copy_gids(struct qed_rdma_qp *qp, __le32 *src_gid,
1677 +
1678 + static enum roce_flavor qed_roce_mode_to_flavor(enum roce_mode roce_mode)
1679 + {
1680 +- enum roce_flavor flavor;
1681 +-
1682 + switch (roce_mode) {
1683 + case ROCE_V1:
1684 +- flavor = PLAIN_ROCE;
1685 +- break;
1686 ++ return PLAIN_ROCE;
1687 + case ROCE_V2_IPV4:
1688 +- flavor = RROCE_IPV4;
1689 +- break;
1690 ++ return RROCE_IPV4;
1691 + case ROCE_V2_IPV6:
1692 +- flavor = ROCE_V2_IPV6;
1693 +- break;
1694 ++ return RROCE_IPV6;
1695 + default:
1696 +- flavor = MAX_ROCE_MODE;
1697 +- break;
1698 ++ return MAX_ROCE_FLAVOR;
1699 + }
1700 +- return flavor;
1701 + }
1702 +
1703 + void qed_roce_free_cid_pair(struct qed_hwfn *p_hwfn, u16 cid)
1704 +diff --git a/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c b/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c
1705 +index 8de644b4721e..77b6248ad3b9 100644
1706 +--- a/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c
1707 ++++ b/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c
1708 +@@ -154,7 +154,7 @@ qed_set_pf_update_tunn_mode(struct qed_tunnel_info *p_tun,
1709 + static void qed_set_tunn_cls_info(struct qed_tunnel_info *p_tun,
1710 + struct qed_tunnel_info *p_src)
1711 + {
1712 +- enum tunnel_clss type;
1713 ++ int type;
1714 +
1715 + p_tun->b_update_rx_cls = p_src->b_update_rx_cls;
1716 + p_tun->b_update_tx_cls = p_src->b_update_tx_cls;
1717 +diff --git a/drivers/net/ethernet/qlogic/qed/qed_vf.c b/drivers/net/ethernet/qlogic/qed/qed_vf.c
1718 +index be6ddde1a104..c4766e4ac485 100644
1719 +--- a/drivers/net/ethernet/qlogic/qed/qed_vf.c
1720 ++++ b/drivers/net/ethernet/qlogic/qed/qed_vf.c
1721 +@@ -413,7 +413,6 @@ static int qed_vf_pf_acquire(struct qed_hwfn *p_hwfn)
1722 + }
1723 +
1724 + if (!p_iov->b_pre_fp_hsi &&
1725 +- ETH_HSI_VER_MINOR &&
1726 + (resp->pfdev_info.minor_fp_hsi < ETH_HSI_VER_MINOR)) {
1727 + DP_INFO(p_hwfn,
1728 + "PF is using older fastpath HSI; %02x.%02x is configured\n",
1729 +@@ -572,7 +571,7 @@ free_p_iov:
1730 + static void
1731 + __qed_vf_prep_tunn_req_tlv(struct vfpf_update_tunn_param_tlv *p_req,
1732 + struct qed_tunn_update_type *p_src,
1733 +- enum qed_tunn_clss mask, u8 *p_cls)
1734 ++ enum qed_tunn_mode mask, u8 *p_cls)
1735 + {
1736 + if (p_src->b_update_mode) {
1737 + p_req->tun_mode_update_mask |= BIT(mask);
1738 +@@ -587,7 +586,7 @@ __qed_vf_prep_tunn_req_tlv(struct vfpf_update_tunn_param_tlv *p_req,
1739 + static void
1740 + qed_vf_prep_tunn_req_tlv(struct vfpf_update_tunn_param_tlv *p_req,
1741 + struct qed_tunn_update_type *p_src,
1742 +- enum qed_tunn_clss mask,
1743 ++ enum qed_tunn_mode mask,
1744 + u8 *p_cls, struct qed_tunn_update_udp_port *p_port,
1745 + u8 *p_update_port, u16 *p_udp_port)
1746 + {
1747 +diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
1748 +index 627c5cd8f786..f18087102d40 100644
1749 +--- a/drivers/net/ethernet/realtek/r8169.c
1750 ++++ b/drivers/net/ethernet/realtek/r8169.c
1751 +@@ -7044,17 +7044,15 @@ static int rtl8169_poll(struct napi_struct *napi, int budget)
1752 + struct rtl8169_private *tp = container_of(napi, struct rtl8169_private, napi);
1753 + struct net_device *dev = tp->dev;
1754 + u16 enable_mask = RTL_EVENT_NAPI | tp->event_slow;
1755 +- int work_done= 0;
1756 ++ int work_done;
1757 + u16 status;
1758 +
1759 + status = rtl_get_events(tp);
1760 + rtl_ack_events(tp, status & ~tp->event_slow);
1761 +
1762 +- if (status & RTL_EVENT_NAPI_RX)
1763 +- work_done = rtl_rx(dev, tp, (u32) budget);
1764 ++ work_done = rtl_rx(dev, tp, (u32) budget);
1765 +
1766 +- if (status & RTL_EVENT_NAPI_TX)
1767 +- rtl_tx(dev, tp);
1768 ++ rtl_tx(dev, tp);
1769 +
1770 + if (status & tp->event_slow) {
1771 + enable_mask &= ~tp->event_slow;
1772 +diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
1773 +index 5df1a608e566..541602d70c24 100644
1774 +--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
1775 ++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
1776 +@@ -133,7 +133,7 @@ static int stmmac_mdio_write(struct mii_bus *bus, int phyaddr, int phyreg,
1777 + */
1778 + int stmmac_mdio_reset(struct mii_bus *bus)
1779 + {
1780 +-#if defined(CONFIG_STMMAC_PLATFORM)
1781 ++#if IS_ENABLED(CONFIG_STMMAC_PLATFORM)
1782 + struct net_device *ndev = bus->priv;
1783 + struct stmmac_priv *priv = netdev_priv(ndev);
1784 + unsigned int mii_address = priv->hw->mii.addr;
1785 +diff --git a/drivers/net/hamradio/yam.c b/drivers/net/hamradio/yam.c
1786 +index 16ec7af6ab7b..ba9df430fca6 100644
1787 +--- a/drivers/net/hamradio/yam.c
1788 ++++ b/drivers/net/hamradio/yam.c
1789 +@@ -966,6 +966,8 @@ static int yam_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
1790 + sizeof(struct yamdrv_ioctl_mcs));
1791 + if (IS_ERR(ym))
1792 + return PTR_ERR(ym);
1793 ++ if (ym->cmd != SIOCYAMSMCS)
1794 ++ return -EINVAL;
1795 + if (ym->bitrate > YAM_MAXBITRATE) {
1796 + kfree(ym);
1797 + return -EINVAL;
1798 +@@ -981,6 +983,8 @@ static int yam_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
1799 + if (copy_from_user(&yi, ifr->ifr_data, sizeof(struct yamdrv_ioctl_cfg)))
1800 + return -EFAULT;
1801 +
1802 ++ if (yi.cmd != SIOCYAMSCFG)
1803 ++ return -EINVAL;
1804 + if ((yi.cfg.mask & YAM_IOBASE) && netif_running(dev))
1805 + return -EINVAL; /* Cannot change this parameter when up */
1806 + if ((yi.cfg.mask & YAM_IRQ) && netif_running(dev))
1807 +diff --git a/drivers/net/usb/asix_common.c b/drivers/net/usb/asix_common.c
1808 +index e95dd12edec4..023b8d0bf175 100644
1809 +--- a/drivers/net/usb/asix_common.c
1810 ++++ b/drivers/net/usb/asix_common.c
1811 +@@ -607,6 +607,9 @@ int asix_set_wol(struct net_device *net, struct ethtool_wolinfo *wolinfo)
1812 + struct usbnet *dev = netdev_priv(net);
1813 + u8 opt = 0;
1814 +
1815 ++ if (wolinfo->wolopts & ~(WAKE_PHY | WAKE_MAGIC))
1816 ++ return -EINVAL;
1817 ++
1818 + if (wolinfo->wolopts & WAKE_PHY)
1819 + opt |= AX_MONITOR_LINK;
1820 + if (wolinfo->wolopts & WAKE_MAGIC)
1821 +diff --git a/drivers/net/usb/ax88179_178a.c b/drivers/net/usb/ax88179_178a.c
1822 +index 9e8ad372f419..2207f7a7d1ff 100644
1823 +--- a/drivers/net/usb/ax88179_178a.c
1824 ++++ b/drivers/net/usb/ax88179_178a.c
1825 +@@ -566,6 +566,9 @@ ax88179_set_wol(struct net_device *net, struct ethtool_wolinfo *wolinfo)
1826 + struct usbnet *dev = netdev_priv(net);
1827 + u8 opt = 0;
1828 +
1829 ++ if (wolinfo->wolopts & ~(WAKE_PHY | WAKE_MAGIC))
1830 ++ return -EINVAL;
1831 ++
1832 + if (wolinfo->wolopts & WAKE_PHY)
1833 + opt |= AX_MONITOR_MODE_RWLC;
1834 + if (wolinfo->wolopts & WAKE_MAGIC)
1835 +diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c
1836 +index aeca484a75b8..2bb3a081ff10 100644
1837 +--- a/drivers/net/usb/lan78xx.c
1838 ++++ b/drivers/net/usb/lan78xx.c
1839 +@@ -1401,19 +1401,10 @@ static int lan78xx_set_wol(struct net_device *netdev,
1840 + if (ret < 0)
1841 + return ret;
1842 +
1843 +- pdata->wol = 0;
1844 +- if (wol->wolopts & WAKE_UCAST)
1845 +- pdata->wol |= WAKE_UCAST;
1846 +- if (wol->wolopts & WAKE_MCAST)
1847 +- pdata->wol |= WAKE_MCAST;
1848 +- if (wol->wolopts & WAKE_BCAST)
1849 +- pdata->wol |= WAKE_BCAST;
1850 +- if (wol->wolopts & WAKE_MAGIC)
1851 +- pdata->wol |= WAKE_MAGIC;
1852 +- if (wol->wolopts & WAKE_PHY)
1853 +- pdata->wol |= WAKE_PHY;
1854 +- if (wol->wolopts & WAKE_ARP)
1855 +- pdata->wol |= WAKE_ARP;
1856 ++ if (wol->wolopts & ~WAKE_ALL)
1857 ++ return -EINVAL;
1858 ++
1859 ++ pdata->wol = wol->wolopts;
1860 +
1861 + device_set_wakeup_enable(&dev->udev->dev, (bool)wol->wolopts);
1862 +
1863 +diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
1864 +index 1b07bb5e110d..9a55d75f7f10 100644
1865 +--- a/drivers/net/usb/r8152.c
1866 ++++ b/drivers/net/usb/r8152.c
1867 +@@ -4503,6 +4503,9 @@ static int rtl8152_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1868 + if (!rtl_can_wakeup(tp))
1869 + return -EOPNOTSUPP;
1870 +
1871 ++ if (wol->wolopts & ~WAKE_ANY)
1872 ++ return -EINVAL;
1873 ++
1874 + ret = usb_autopm_get_interface(tp->intf);
1875 + if (ret < 0)
1876 + goto out_set_wol;
1877 +diff --git a/drivers/net/usb/smsc75xx.c b/drivers/net/usb/smsc75xx.c
1878 +index b64b1ee56d2d..ec287c9741e8 100644
1879 +--- a/drivers/net/usb/smsc75xx.c
1880 ++++ b/drivers/net/usb/smsc75xx.c
1881 +@@ -731,6 +731,9 @@ static int smsc75xx_ethtool_set_wol(struct net_device *net,
1882 + struct smsc75xx_priv *pdata = (struct smsc75xx_priv *)(dev->data[0]);
1883 + int ret;
1884 +
1885 ++ if (wolinfo->wolopts & ~SUPPORTED_WAKE)
1886 ++ return -EINVAL;
1887 ++
1888 + pdata->wolopts = wolinfo->wolopts & SUPPORTED_WAKE;
1889 +
1890 + ret = device_set_wakeup_enable(&dev->udev->dev, pdata->wolopts);
1891 +diff --git a/drivers/net/usb/smsc95xx.c b/drivers/net/usb/smsc95xx.c
1892 +index 06b4d290784d..262e7a3c23cb 100644
1893 +--- a/drivers/net/usb/smsc95xx.c
1894 ++++ b/drivers/net/usb/smsc95xx.c
1895 +@@ -774,6 +774,9 @@ static int smsc95xx_ethtool_set_wol(struct net_device *net,
1896 + struct smsc95xx_priv *pdata = (struct smsc95xx_priv *)(dev->data[0]);
1897 + int ret;
1898 +
1899 ++ if (wolinfo->wolopts & ~SUPPORTED_WAKE)
1900 ++ return -EINVAL;
1901 ++
1902 + pdata->wolopts = wolinfo->wolopts & SUPPORTED_WAKE;
1903 +
1904 + ret = device_set_wakeup_enable(&dev->udev->dev, pdata->wolopts);
1905 +diff --git a/drivers/net/usb/sr9800.c b/drivers/net/usb/sr9800.c
1906 +index 9277a0f228df..35f39f23d881 100644
1907 +--- a/drivers/net/usb/sr9800.c
1908 ++++ b/drivers/net/usb/sr9800.c
1909 +@@ -421,6 +421,9 @@ sr_set_wol(struct net_device *net, struct ethtool_wolinfo *wolinfo)
1910 + struct usbnet *dev = netdev_priv(net);
1911 + u8 opt = 0;
1912 +
1913 ++ if (wolinfo->wolopts & ~(WAKE_PHY | WAKE_MAGIC))
1914 ++ return -EINVAL;
1915 ++
1916 + if (wolinfo->wolopts & WAKE_PHY)
1917 + opt |= SR_MONITOR_LINK;
1918 + if (wolinfo->wolopts & WAKE_MAGIC)
1919 +diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
1920 +index 2b6ec927809e..500e2d8f10bc 100644
1921 +--- a/drivers/net/virtio_net.c
1922 ++++ b/drivers/net/virtio_net.c
1923 +@@ -2162,8 +2162,9 @@ static void virtnet_freeze_down(struct virtio_device *vdev)
1924 + /* Make sure no work handler is accessing the device */
1925 + flush_work(&vi->config_work);
1926 +
1927 ++ netif_tx_lock_bh(vi->dev);
1928 + netif_device_detach(vi->dev);
1929 +- netif_tx_disable(vi->dev);
1930 ++ netif_tx_unlock_bh(vi->dev);
1931 + cancel_delayed_work_sync(&vi->refill);
1932 +
1933 + if (netif_running(vi->dev)) {
1934 +@@ -2199,7 +2200,9 @@ static int virtnet_restore_up(struct virtio_device *vdev)
1935 + }
1936 + }
1937 +
1938 ++ netif_tx_lock_bh(vi->dev);
1939 + netif_device_attach(vi->dev);
1940 ++ netif_tx_unlock_bh(vi->dev);
1941 + return err;
1942 + }
1943 +
1944 +diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
1945 +index 80e2c8595c7c..58dd217811c8 100644
1946 +--- a/drivers/net/wireless/mac80211_hwsim.c
1947 ++++ b/drivers/net/wireless/mac80211_hwsim.c
1948 +@@ -519,7 +519,6 @@ struct mac80211_hwsim_data {
1949 + int channels, idx;
1950 + bool use_chanctx;
1951 + bool destroy_on_close;
1952 +- struct work_struct destroy_work;
1953 + u32 portid;
1954 + char alpha2[2];
1955 + const struct ieee80211_regdomain *regd;
1956 +@@ -2812,8 +2811,7 @@ static int mac80211_hwsim_new_radio(struct genl_info *info,
1957 + hwsim_radios_generation++;
1958 + spin_unlock_bh(&hwsim_radio_lock);
1959 +
1960 +- if (idx > 0)
1961 +- hwsim_mcast_new_radio(idx, info, param);
1962 ++ hwsim_mcast_new_radio(idx, info, param);
1963 +
1964 + return idx;
1965 +
1966 +@@ -3442,30 +3440,27 @@ static struct genl_family hwsim_genl_family __ro_after_init = {
1967 + .n_mcgrps = ARRAY_SIZE(hwsim_mcgrps),
1968 + };
1969 +
1970 +-static void destroy_radio(struct work_struct *work)
1971 +-{
1972 +- struct mac80211_hwsim_data *data =
1973 +- container_of(work, struct mac80211_hwsim_data, destroy_work);
1974 +-
1975 +- hwsim_radios_generation++;
1976 +- mac80211_hwsim_del_radio(data, wiphy_name(data->hw->wiphy), NULL);
1977 +-}
1978 +-
1979 + static void remove_user_radios(u32 portid)
1980 + {
1981 + struct mac80211_hwsim_data *entry, *tmp;
1982 ++ LIST_HEAD(list);
1983 +
1984 + spin_lock_bh(&hwsim_radio_lock);
1985 + list_for_each_entry_safe(entry, tmp, &hwsim_radios, list) {
1986 + if (entry->destroy_on_close && entry->portid == portid) {
1987 +- list_del(&entry->list);
1988 ++ list_move(&entry->list, &list);
1989 + rhashtable_remove_fast(&hwsim_radios_rht, &entry->rht,
1990 + hwsim_rht_params);
1991 +- INIT_WORK(&entry->destroy_work, destroy_radio);
1992 +- queue_work(hwsim_wq, &entry->destroy_work);
1993 ++ hwsim_radios_generation++;
1994 + }
1995 + }
1996 + spin_unlock_bh(&hwsim_radio_lock);
1997 ++
1998 ++ list_for_each_entry_safe(entry, tmp, &list, list) {
1999 ++ list_del(&entry->list);
2000 ++ mac80211_hwsim_del_radio(entry, wiphy_name(entry->hw->wiphy),
2001 ++ NULL);
2002 ++ }
2003 + }
2004 +
2005 + static int mac80211_hwsim_netlink_notify(struct notifier_block *nb,
2006 +@@ -3523,6 +3518,7 @@ static __net_init int hwsim_init_net(struct net *net)
2007 + static void __net_exit hwsim_exit_net(struct net *net)
2008 + {
2009 + struct mac80211_hwsim_data *data, *tmp;
2010 ++ LIST_HEAD(list);
2011 +
2012 + spin_lock_bh(&hwsim_radio_lock);
2013 + list_for_each_entry_safe(data, tmp, &hwsim_radios, list) {
2014 +@@ -3533,17 +3529,19 @@ static void __net_exit hwsim_exit_net(struct net *net)
2015 + if (data->netgroup == hwsim_net_get_netgroup(&init_net))
2016 + continue;
2017 +
2018 +- list_del(&data->list);
2019 ++ list_move(&data->list, &list);
2020 + rhashtable_remove_fast(&hwsim_radios_rht, &data->rht,
2021 + hwsim_rht_params);
2022 + hwsim_radios_generation++;
2023 +- spin_unlock_bh(&hwsim_radio_lock);
2024 ++ }
2025 ++ spin_unlock_bh(&hwsim_radio_lock);
2026 ++
2027 ++ list_for_each_entry_safe(data, tmp, &list, list) {
2028 ++ list_del(&data->list);
2029 + mac80211_hwsim_del_radio(data,
2030 + wiphy_name(data->hw->wiphy),
2031 + NULL);
2032 +- spin_lock_bh(&hwsim_radio_lock);
2033 + }
2034 +- spin_unlock_bh(&hwsim_radio_lock);
2035 +
2036 + ida_simple_remove(&hwsim_netgroup_ida, hwsim_net_get_netgroup(net));
2037 + }
2038 +diff --git a/drivers/net/wireless/marvell/libertas/if_sdio.c b/drivers/net/wireless/marvell/libertas/if_sdio.c
2039 +index 43743c26c071..39bf85d0ade0 100644
2040 +--- a/drivers/net/wireless/marvell/libertas/if_sdio.c
2041 ++++ b/drivers/net/wireless/marvell/libertas/if_sdio.c
2042 +@@ -1317,6 +1317,10 @@ static int if_sdio_suspend(struct device *dev)
2043 + if (priv->wol_criteria == EHS_REMOVE_WAKEUP) {
2044 + dev_info(dev, "Suspend without wake params -- powering down card\n");
2045 + if (priv->fw_ready) {
2046 ++ ret = lbs_suspend(priv);
2047 ++ if (ret)
2048 ++ return ret;
2049 ++
2050 + priv->power_up_on_resume = true;
2051 + if_sdio_power_off(card);
2052 + }
2053 +diff --git a/drivers/scsi/qedi/qedi_main.c b/drivers/scsi/qedi/qedi_main.c
2054 +index 3e18a68c2b03..054e66d93ed6 100644
2055 +--- a/drivers/scsi/qedi/qedi_main.c
2056 ++++ b/drivers/scsi/qedi/qedi_main.c
2057 +@@ -2472,6 +2472,7 @@ static int __qedi_probe(struct pci_dev *pdev, int mode)
2058 + /* start qedi context */
2059 + spin_lock_init(&qedi->hba_lock);
2060 + spin_lock_init(&qedi->task_idx_lock);
2061 ++ mutex_init(&qedi->stats_lock);
2062 + }
2063 + qedi_ops->ll2->register_cb_ops(qedi->cdev, &qedi_ll2_cb_ops, qedi);
2064 + qedi_ops->ll2->start(qedi->cdev, &params);
2065 +diff --git a/drivers/soc/fsl/qbman/qman.c b/drivers/soc/fsl/qbman/qman.c
2066 +index ecb22749df0b..8cc015183043 100644
2067 +--- a/drivers/soc/fsl/qbman/qman.c
2068 ++++ b/drivers/soc/fsl/qbman/qman.c
2069 +@@ -2729,6 +2729,9 @@ static int qman_alloc_range(struct gen_pool *p, u32 *result, u32 cnt)
2070 + {
2071 + unsigned long addr;
2072 +
2073 ++ if (!p)
2074 ++ return -ENODEV;
2075 ++
2076 + addr = gen_pool_alloc(p, cnt);
2077 + if (!addr)
2078 + return -ENOMEM;
2079 +diff --git a/drivers/soc/fsl/qe/ucc.c b/drivers/soc/fsl/qe/ucc.c
2080 +index c646d8713861..681f7d4b7724 100644
2081 +--- a/drivers/soc/fsl/qe/ucc.c
2082 ++++ b/drivers/soc/fsl/qe/ucc.c
2083 +@@ -626,7 +626,7 @@ static u32 ucc_get_tdm_sync_shift(enum comm_dir mode, u32 tdm_num)
2084 + {
2085 + u32 shift;
2086 +
2087 +- shift = (mode == COMM_DIR_RX) ? RX_SYNC_SHIFT_BASE : RX_SYNC_SHIFT_BASE;
2088 ++ shift = (mode == COMM_DIR_RX) ? RX_SYNC_SHIFT_BASE : TX_SYNC_SHIFT_BASE;
2089 + shift -= tdm_num * 2;
2090 +
2091 + return shift;
2092 +diff --git a/drivers/thunderbolt/icm.c b/drivers/thunderbolt/icm.c
2093 +index 500911f16498..5bad9fdec5f8 100644
2094 +--- a/drivers/thunderbolt/icm.c
2095 ++++ b/drivers/thunderbolt/icm.c
2096 +@@ -653,14 +653,6 @@ icm_fr_xdomain_connected(struct tb *tb, const struct icm_pkg_header *hdr)
2097 + bool approved;
2098 + u64 route;
2099 +
2100 +- /*
2101 +- * After NVM upgrade adding root switch device fails because we
2102 +- * initiated reset. During that time ICM might still send
2103 +- * XDomain connected message which we ignore here.
2104 +- */
2105 +- if (!tb->root_switch)
2106 +- return;
2107 +-
2108 + link = pkg->link_info & ICM_LINK_INFO_LINK_MASK;
2109 + depth = (pkg->link_info & ICM_LINK_INFO_DEPTH_MASK) >>
2110 + ICM_LINK_INFO_DEPTH_SHIFT;
2111 +@@ -950,14 +942,6 @@ icm_tr_device_connected(struct tb *tb, const struct icm_pkg_header *hdr)
2112 + if (pkg->hdr.packet_id)
2113 + return;
2114 +
2115 +- /*
2116 +- * After NVM upgrade adding root switch device fails because we
2117 +- * initiated reset. During that time ICM might still send device
2118 +- * connected message which we ignore here.
2119 +- */
2120 +- if (!tb->root_switch)
2121 +- return;
2122 +-
2123 + route = get_route(pkg->route_hi, pkg->route_lo);
2124 + authorized = pkg->link_info & ICM_LINK_INFO_APPROVED;
2125 + security_level = (pkg->hdr.flags & ICM_FLAGS_SLEVEL_MASK) >>
2126 +@@ -1317,19 +1301,26 @@ static void icm_handle_notification(struct work_struct *work)
2127 +
2128 + mutex_lock(&tb->lock);
2129 +
2130 +- switch (n->pkg->code) {
2131 +- case ICM_EVENT_DEVICE_CONNECTED:
2132 +- icm->device_connected(tb, n->pkg);
2133 +- break;
2134 +- case ICM_EVENT_DEVICE_DISCONNECTED:
2135 +- icm->device_disconnected(tb, n->pkg);
2136 +- break;
2137 +- case ICM_EVENT_XDOMAIN_CONNECTED:
2138 +- icm->xdomain_connected(tb, n->pkg);
2139 +- break;
2140 +- case ICM_EVENT_XDOMAIN_DISCONNECTED:
2141 +- icm->xdomain_disconnected(tb, n->pkg);
2142 +- break;
2143 ++ /*
2144 ++ * When the domain is stopped we flush its workqueue but before
2145 ++ * that the root switch is removed. In that case we should treat
2146 ++ * the queued events as being canceled.
2147 ++ */
2148 ++ if (tb->root_switch) {
2149 ++ switch (n->pkg->code) {
2150 ++ case ICM_EVENT_DEVICE_CONNECTED:
2151 ++ icm->device_connected(tb, n->pkg);
2152 ++ break;
2153 ++ case ICM_EVENT_DEVICE_DISCONNECTED:
2154 ++ icm->device_disconnected(tb, n->pkg);
2155 ++ break;
2156 ++ case ICM_EVENT_XDOMAIN_CONNECTED:
2157 ++ icm->xdomain_connected(tb, n->pkg);
2158 ++ break;
2159 ++ case ICM_EVENT_XDOMAIN_DISCONNECTED:
2160 ++ icm->xdomain_disconnected(tb, n->pkg);
2161 ++ break;
2162 ++ }
2163 + }
2164 +
2165 + mutex_unlock(&tb->lock);
2166 +diff --git a/drivers/thunderbolt/nhi.c b/drivers/thunderbolt/nhi.c
2167 +index f5a33e88e676..2d042150e41c 100644
2168 +--- a/drivers/thunderbolt/nhi.c
2169 ++++ b/drivers/thunderbolt/nhi.c
2170 +@@ -1147,5 +1147,5 @@ static void __exit nhi_unload(void)
2171 + tb_domain_exit();
2172 + }
2173 +
2174 +-fs_initcall(nhi_init);
2175 ++rootfs_initcall(nhi_init);
2176 + module_exit(nhi_unload);
2177 +diff --git a/drivers/tty/serial/8250/8250_dw.c b/drivers/tty/serial/8250/8250_dw.c
2178 +index af842000188c..a25f6ea5c784 100644
2179 +--- a/drivers/tty/serial/8250/8250_dw.c
2180 ++++ b/drivers/tty/serial/8250/8250_dw.c
2181 +@@ -576,10 +576,6 @@ static int dw8250_probe(struct platform_device *pdev)
2182 + if (!data->skip_autocfg)
2183 + dw8250_setup_port(p);
2184 +
2185 +-#ifdef CONFIG_PM
2186 +- uart.capabilities |= UART_CAP_RPM;
2187 +-#endif
2188 +-
2189 + /* If we have a valid fifosize, try hooking up DMA */
2190 + if (p->fifosize) {
2191 + data->dma.rxconf.src_maxburst = p->fifosize / 4;
2192 +diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
2193 +index 560ed8711706..c4424cbd9943 100644
2194 +--- a/drivers/vhost/vhost.c
2195 ++++ b/drivers/vhost/vhost.c
2196 +@@ -30,6 +30,7 @@
2197 + #include <linux/sched/mm.h>
2198 + #include <linux/sched/signal.h>
2199 + #include <linux/interval_tree_generic.h>
2200 ++#include <linux/nospec.h>
2201 +
2202 + #include "vhost.h"
2203 +
2204 +@@ -1362,6 +1363,7 @@ long vhost_vring_ioctl(struct vhost_dev *d, unsigned int ioctl, void __user *arg
2205 + if (idx >= d->nvqs)
2206 + return -ENOBUFS;
2207 +
2208 ++ idx = array_index_nospec(idx, d->nvqs);
2209 + vq = d->vqs[idx];
2210 +
2211 + mutex_lock(&vq->mutex);
2212 +diff --git a/drivers/video/fbdev/pxa168fb.c b/drivers/video/fbdev/pxa168fb.c
2213 +index def3a501acd6..d059d04c63ac 100644
2214 +--- a/drivers/video/fbdev/pxa168fb.c
2215 ++++ b/drivers/video/fbdev/pxa168fb.c
2216 +@@ -712,7 +712,7 @@ static int pxa168fb_probe(struct platform_device *pdev)
2217 + /*
2218 + * enable controller clock
2219 + */
2220 +- clk_enable(fbi->clk);
2221 ++ clk_prepare_enable(fbi->clk);
2222 +
2223 + pxa168fb_set_par(info);
2224 +
2225 +@@ -767,7 +767,7 @@ static int pxa168fb_probe(struct platform_device *pdev)
2226 + failed_free_cmap:
2227 + fb_dealloc_cmap(&info->cmap);
2228 + failed_free_clk:
2229 +- clk_disable(fbi->clk);
2230 ++ clk_disable_unprepare(fbi->clk);
2231 + failed_free_fbmem:
2232 + dma_free_coherent(fbi->dev, info->fix.smem_len,
2233 + info->screen_base, fbi->fb_start_dma);
2234 +@@ -807,7 +807,7 @@ static int pxa168fb_remove(struct platform_device *pdev)
2235 + dma_free_wc(fbi->dev, PAGE_ALIGN(info->fix.smem_len),
2236 + info->screen_base, info->fix.smem_start);
2237 +
2238 +- clk_disable(fbi->clk);
2239 ++ clk_disable_unprepare(fbi->clk);
2240 +
2241 + framebuffer_release(info);
2242 +
2243 +diff --git a/fs/afs/cell.c b/fs/afs/cell.c
2244 +index f3d0bef16d78..6127f0fcd62c 100644
2245 +--- a/fs/afs/cell.c
2246 ++++ b/fs/afs/cell.c
2247 +@@ -514,6 +514,8 @@ static int afs_alloc_anon_key(struct afs_cell *cell)
2248 + */
2249 + static int afs_activate_cell(struct afs_net *net, struct afs_cell *cell)
2250 + {
2251 ++ struct hlist_node **p;
2252 ++ struct afs_cell *pcell;
2253 + int ret;
2254 +
2255 + if (!cell->anonymous_key) {
2256 +@@ -534,7 +536,18 @@ static int afs_activate_cell(struct afs_net *net, struct afs_cell *cell)
2257 + return ret;
2258 +
2259 + mutex_lock(&net->proc_cells_lock);
2260 +- list_add_tail(&cell->proc_link, &net->proc_cells);
2261 ++ for (p = &net->proc_cells.first; *p; p = &(*p)->next) {
2262 ++ pcell = hlist_entry(*p, struct afs_cell, proc_link);
2263 ++ if (strcmp(cell->name, pcell->name) < 0)
2264 ++ break;
2265 ++ }
2266 ++
2267 ++ cell->proc_link.pprev = p;
2268 ++ cell->proc_link.next = *p;
2269 ++ rcu_assign_pointer(*p, &cell->proc_link.next);
2270 ++ if (cell->proc_link.next)
2271 ++ cell->proc_link.next->pprev = &cell->proc_link.next;
2272 ++
2273 + afs_dynroot_mkdir(net, cell);
2274 + mutex_unlock(&net->proc_cells_lock);
2275 + return 0;
2276 +@@ -550,7 +563,7 @@ static void afs_deactivate_cell(struct afs_net *net, struct afs_cell *cell)
2277 + afs_proc_cell_remove(cell);
2278 +
2279 + mutex_lock(&net->proc_cells_lock);
2280 +- list_del_init(&cell->proc_link);
2281 ++ hlist_del_rcu(&cell->proc_link);
2282 + afs_dynroot_rmdir(net, cell);
2283 + mutex_unlock(&net->proc_cells_lock);
2284 +
2285 +diff --git a/fs/afs/dynroot.c b/fs/afs/dynroot.c
2286 +index 174e843f0633..7de7223843cc 100644
2287 +--- a/fs/afs/dynroot.c
2288 ++++ b/fs/afs/dynroot.c
2289 +@@ -286,7 +286,7 @@ int afs_dynroot_populate(struct super_block *sb)
2290 + return -ERESTARTSYS;
2291 +
2292 + net->dynroot_sb = sb;
2293 +- list_for_each_entry(cell, &net->proc_cells, proc_link) {
2294 ++ hlist_for_each_entry(cell, &net->proc_cells, proc_link) {
2295 + ret = afs_dynroot_mkdir(net, cell);
2296 + if (ret < 0)
2297 + goto error;
2298 +diff --git a/fs/afs/internal.h b/fs/afs/internal.h
2299 +index 9778df135717..270d1caa27c6 100644
2300 +--- a/fs/afs/internal.h
2301 ++++ b/fs/afs/internal.h
2302 +@@ -241,7 +241,7 @@ struct afs_net {
2303 + seqlock_t cells_lock;
2304 +
2305 + struct mutex proc_cells_lock;
2306 +- struct list_head proc_cells;
2307 ++ struct hlist_head proc_cells;
2308 +
2309 + /* Known servers. Theoretically each fileserver can only be in one
2310 + * cell, but in practice, people create aliases and subsets and there's
2311 +@@ -319,7 +319,7 @@ struct afs_cell {
2312 + struct afs_net *net;
2313 + struct key *anonymous_key; /* anonymous user key for this cell */
2314 + struct work_struct manager; /* Manager for init/deinit/dns */
2315 +- struct list_head proc_link; /* /proc cell list link */
2316 ++ struct hlist_node proc_link; /* /proc cell list link */
2317 + #ifdef CONFIG_AFS_FSCACHE
2318 + struct fscache_cookie *cache; /* caching cookie */
2319 + #endif
2320 +diff --git a/fs/afs/main.c b/fs/afs/main.c
2321 +index e84fe822a960..107427688edd 100644
2322 +--- a/fs/afs/main.c
2323 ++++ b/fs/afs/main.c
2324 +@@ -87,7 +87,7 @@ static int __net_init afs_net_init(struct net *net_ns)
2325 + timer_setup(&net->cells_timer, afs_cells_timer, 0);
2326 +
2327 + mutex_init(&net->proc_cells_lock);
2328 +- INIT_LIST_HEAD(&net->proc_cells);
2329 ++ INIT_HLIST_HEAD(&net->proc_cells);
2330 +
2331 + seqlock_init(&net->fs_lock);
2332 + net->fs_servers = RB_ROOT;
2333 +diff --git a/fs/afs/proc.c b/fs/afs/proc.c
2334 +index 476dcbb79713..9101f62707af 100644
2335 +--- a/fs/afs/proc.c
2336 ++++ b/fs/afs/proc.c
2337 +@@ -33,9 +33,8 @@ static inline struct afs_net *afs_seq2net_single(struct seq_file *m)
2338 + static int afs_proc_cells_show(struct seq_file *m, void *v)
2339 + {
2340 + struct afs_cell *cell = list_entry(v, struct afs_cell, proc_link);
2341 +- struct afs_net *net = afs_seq2net(m);
2342 +
2343 +- if (v == &net->proc_cells) {
2344 ++ if (v == SEQ_START_TOKEN) {
2345 + /* display header on line 1 */
2346 + seq_puts(m, "USE NAME\n");
2347 + return 0;
2348 +@@ -50,12 +49,12 @@ static void *afs_proc_cells_start(struct seq_file *m, loff_t *_pos)
2349 + __acquires(rcu)
2350 + {
2351 + rcu_read_lock();
2352 +- return seq_list_start_head(&afs_seq2net(m)->proc_cells, *_pos);
2353 ++ return seq_hlist_start_head_rcu(&afs_seq2net(m)->proc_cells, *_pos);
2354 + }
2355 +
2356 + static void *afs_proc_cells_next(struct seq_file *m, void *v, loff_t *pos)
2357 + {
2358 +- return seq_list_next(v, &afs_seq2net(m)->proc_cells, pos);
2359 ++ return seq_hlist_next_rcu(v, &afs_seq2net(m)->proc_cells, pos);
2360 + }
2361 +
2362 + static void afs_proc_cells_stop(struct seq_file *m, void *v)
2363 +diff --git a/fs/fat/fatent.c b/fs/fat/fatent.c
2364 +index 3aef8630a4b9..95d2c716e0da 100644
2365 +--- a/fs/fat/fatent.c
2366 ++++ b/fs/fat/fatent.c
2367 +@@ -681,6 +681,7 @@ int fat_count_free_clusters(struct super_block *sb)
2368 + if (ops->ent_get(&fatent) == FAT_ENT_FREE)
2369 + free++;
2370 + } while (fat_ent_next(sbi, &fatent));
2371 ++ cond_resched();
2372 + }
2373 + sbi->free_clusters = free;
2374 + sbi->free_clus_valid = 1;
2375 +diff --git a/fs/ocfs2/refcounttree.c b/fs/ocfs2/refcounttree.c
2376 +index 7869622af22a..7a5ee145c733 100644
2377 +--- a/fs/ocfs2/refcounttree.c
2378 ++++ b/fs/ocfs2/refcounttree.c
2379 +@@ -2946,6 +2946,7 @@ int ocfs2_duplicate_clusters_by_page(handle_t *handle,
2380 + if (map_end & (PAGE_SIZE - 1))
2381 + to = map_end & (PAGE_SIZE - 1);
2382 +
2383 ++retry:
2384 + page = find_or_create_page(mapping, page_index, GFP_NOFS);
2385 + if (!page) {
2386 + ret = -ENOMEM;
2387 +@@ -2954,11 +2955,18 @@ int ocfs2_duplicate_clusters_by_page(handle_t *handle,
2388 + }
2389 +
2390 + /*
2391 +- * In case PAGE_SIZE <= CLUSTER_SIZE, This page
2392 +- * can't be dirtied before we CoW it out.
2393 ++ * In case PAGE_SIZE <= CLUSTER_SIZE, we do not expect a dirty
2394 ++ * page, so write it back.
2395 + */
2396 +- if (PAGE_SIZE <= OCFS2_SB(sb)->s_clustersize)
2397 +- BUG_ON(PageDirty(page));
2398 ++ if (PAGE_SIZE <= OCFS2_SB(sb)->s_clustersize) {
2399 ++ if (PageDirty(page)) {
2400 ++ /*
2401 ++ * write_on_page will unlock the page on return
2402 ++ */
2403 ++ ret = write_one_page(page);
2404 ++ goto retry;
2405 ++ }
2406 ++ }
2407 +
2408 + if (!PageUptodate(page)) {
2409 + ret = block_read_full_page(page, ocfs2_get_block);
2410 +diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
2411 +index e373e2e10f6a..83b930988e21 100644
2412 +--- a/include/asm-generic/vmlinux.lds.h
2413 ++++ b/include/asm-generic/vmlinux.lds.h
2414 +@@ -70,7 +70,7 @@
2415 + */
2416 + #ifdef CONFIG_LD_DEAD_CODE_DATA_ELIMINATION
2417 + #define TEXT_MAIN .text .text.[0-9a-zA-Z_]*
2418 +-#define DATA_MAIN .data .data.[0-9a-zA-Z_]*
2419 ++#define DATA_MAIN .data .data.[0-9a-zA-Z_]* .data..LPBX*
2420 + #define SDATA_MAIN .sdata .sdata.[0-9a-zA-Z_]*
2421 + #define RODATA_MAIN .rodata .rodata.[0-9a-zA-Z_]*
2422 + #define BSS_MAIN .bss .bss.[0-9a-zA-Z_]*
2423 +@@ -617,8 +617,8 @@
2424 +
2425 + #define EXIT_DATA \
2426 + *(.exit.data .exit.data.*) \
2427 +- *(.fini_array) \
2428 +- *(.dtors) \
2429 ++ *(.fini_array .fini_array.*) \
2430 ++ *(.dtors .dtors.*) \
2431 + MEM_DISCARD(exit.data*) \
2432 + MEM_DISCARD(exit.rodata*)
2433 +
2434 +diff --git a/include/linux/compiler_types.h b/include/linux/compiler_types.h
2435 +index a8ba6b04152c..55e4be8b016b 100644
2436 +--- a/include/linux/compiler_types.h
2437 ++++ b/include/linux/compiler_types.h
2438 +@@ -78,6 +78,18 @@ extern void __chk_io_ptr(const volatile void __iomem *);
2439 + #include <linux/compiler-clang.h>
2440 + #endif
2441 +
2442 ++/*
2443 ++ * Some architectures need to provide custom definitions of macros provided
2444 ++ * by linux/compiler-*.h, and can do so using asm/compiler.h. We include that
2445 ++ * conditionally rather than using an asm-generic wrapper in order to avoid
2446 ++ * build failures if any C compilation, which will include this file via an
2447 ++ * -include argument in c_flags, occurs prior to the asm-generic wrappers being
2448 ++ * generated.
2449 ++ */
2450 ++#ifdef CONFIG_HAVE_ARCH_COMPILER_H
2451 ++#include <asm/compiler.h>
2452 ++#endif
2453 ++
2454 + /*
2455 + * Generic compiler-dependent macros required for kernel
2456 + * build go below this comment. Actual compiler/compiler version
2457 +diff --git a/include/linux/gpio/driver.h b/include/linux/gpio/driver.h
2458 +index 5382b5183b7e..82a953ec5ef0 100644
2459 +--- a/include/linux/gpio/driver.h
2460 ++++ b/include/linux/gpio/driver.h
2461 +@@ -94,6 +94,13 @@ struct gpio_irq_chip {
2462 + */
2463 + unsigned int num_parents;
2464 +
2465 ++ /**
2466 ++ * @parent_irq:
2467 ++ *
2468 ++ * For use by gpiochip_set_cascaded_irqchip()
2469 ++ */
2470 ++ unsigned int parent_irq;
2471 ++
2472 + /**
2473 + * @parents:
2474 + *
2475 +diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
2476 +index 64f450593b54..b49bfc8e68b0 100644
2477 +--- a/include/linux/mlx5/driver.h
2478 ++++ b/include/linux/mlx5/driver.h
2479 +@@ -1022,6 +1022,14 @@ static inline void *mlx5_frag_buf_get_wqe(struct mlx5_frag_buf_ctrl *fbc,
2480 + ((fbc->frag_sz_m1 & ix) << fbc->log_stride);
2481 + }
2482 +
2483 ++static inline u32
2484 ++mlx5_frag_buf_get_idx_last_contig_stride(struct mlx5_frag_buf_ctrl *fbc, u32 ix)
2485 ++{
2486 ++ u32 last_frag_stride_idx = (ix + fbc->strides_offset) | fbc->frag_sz_m1;
2487 ++
2488 ++ return min_t(u32, last_frag_stride_idx - fbc->strides_offset, fbc->sz_m1);
2489 ++}
2490 ++
2491 + int mlx5_cmd_init(struct mlx5_core_dev *dev);
2492 + void mlx5_cmd_cleanup(struct mlx5_core_dev *dev);
2493 + void mlx5_cmd_use_events(struct mlx5_core_dev *dev);
2494 +diff --git a/include/linux/netfilter.h b/include/linux/netfilter.h
2495 +index dd2052f0efb7..11b7b8ab0696 100644
2496 +--- a/include/linux/netfilter.h
2497 ++++ b/include/linux/netfilter.h
2498 +@@ -215,6 +215,8 @@ static inline int nf_hook(u_int8_t pf, unsigned int hook, struct net *net,
2499 + break;
2500 + case NFPROTO_ARP:
2501 + #ifdef CONFIG_NETFILTER_FAMILY_ARP
2502 ++ if (WARN_ON_ONCE(hook >= ARRAY_SIZE(net->nf.hooks_arp)))
2503 ++ break;
2504 + hook_head = rcu_dereference(net->nf.hooks_arp[hook]);
2505 + #endif
2506 + break;
2507 +diff --git a/include/net/ip6_fib.h b/include/net/ip6_fib.h
2508 +index 3d4930528db0..2d31e22babd8 100644
2509 +--- a/include/net/ip6_fib.h
2510 ++++ b/include/net/ip6_fib.h
2511 +@@ -159,6 +159,10 @@ struct fib6_info {
2512 + struct rt6_info * __percpu *rt6i_pcpu;
2513 + struct rt6_exception_bucket __rcu *rt6i_exception_bucket;
2514 +
2515 ++#ifdef CONFIG_IPV6_ROUTER_PREF
2516 ++ unsigned long last_probe;
2517 ++#endif
2518 ++
2519 + u32 fib6_metric;
2520 + u8 fib6_protocol;
2521 + u8 fib6_type;
2522 +diff --git a/include/net/sctp/sm.h b/include/net/sctp/sm.h
2523 +index 5ef1bad81ef5..9e3d32746430 100644
2524 +--- a/include/net/sctp/sm.h
2525 ++++ b/include/net/sctp/sm.h
2526 +@@ -347,7 +347,7 @@ static inline __u16 sctp_data_size(struct sctp_chunk *chunk)
2527 + __u16 size;
2528 +
2529 + size = ntohs(chunk->chunk_hdr->length);
2530 +- size -= sctp_datahdr_len(&chunk->asoc->stream);
2531 ++ size -= sctp_datachk_len(&chunk->asoc->stream);
2532 +
2533 + return size;
2534 + }
2535 +diff --git a/include/trace/events/rxrpc.h b/include/trace/events/rxrpc.h
2536 +index 4fff00e9da8a..0a774b64fc29 100644
2537 +--- a/include/trace/events/rxrpc.h
2538 ++++ b/include/trace/events/rxrpc.h
2539 +@@ -56,7 +56,6 @@ enum rxrpc_peer_trace {
2540 + rxrpc_peer_new,
2541 + rxrpc_peer_processing,
2542 + rxrpc_peer_put,
2543 +- rxrpc_peer_queued_error,
2544 + };
2545 +
2546 + enum rxrpc_conn_trace {
2547 +@@ -257,8 +256,7 @@ enum rxrpc_tx_fail_trace {
2548 + EM(rxrpc_peer_got, "GOT") \
2549 + EM(rxrpc_peer_new, "NEW") \
2550 + EM(rxrpc_peer_processing, "PRO") \
2551 +- EM(rxrpc_peer_put, "PUT") \
2552 +- E_(rxrpc_peer_queued_error, "QER")
2553 ++ E_(rxrpc_peer_put, "PUT")
2554 +
2555 + #define rxrpc_conn_traces \
2556 + EM(rxrpc_conn_got, "GOT") \
2557 +diff --git a/kernel/events/core.c b/kernel/events/core.c
2558 +index ae22d93701db..fc072b7f839d 100644
2559 +--- a/kernel/events/core.c
2560 ++++ b/kernel/events/core.c
2561 +@@ -8319,6 +8319,8 @@ void perf_tp_event(u16 event_type, u64 count, void *record, int entry_size,
2562 + goto unlock;
2563 +
2564 + list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
2565 ++ if (event->cpu != smp_processor_id())
2566 ++ continue;
2567 + if (event->attr.type != PERF_TYPE_TRACEPOINT)
2568 + continue;
2569 + if (event->attr.config != entry->type)
2570 +@@ -9436,9 +9438,7 @@ static void free_pmu_context(struct pmu *pmu)
2571 + if (pmu->task_ctx_nr > perf_invalid_context)
2572 + return;
2573 +
2574 +- mutex_lock(&pmus_lock);
2575 + free_percpu(pmu->pmu_cpu_context);
2576 +- mutex_unlock(&pmus_lock);
2577 + }
2578 +
2579 + /*
2580 +@@ -9694,12 +9694,8 @@ EXPORT_SYMBOL_GPL(perf_pmu_register);
2581 +
2582 + void perf_pmu_unregister(struct pmu *pmu)
2583 + {
2584 +- int remove_device;
2585 +-
2586 + mutex_lock(&pmus_lock);
2587 +- remove_device = pmu_bus_running;
2588 + list_del_rcu(&pmu->entry);
2589 +- mutex_unlock(&pmus_lock);
2590 +
2591 + /*
2592 + * We dereference the pmu list under both SRCU and regular RCU, so
2593 +@@ -9711,13 +9707,14 @@ void perf_pmu_unregister(struct pmu *pmu)
2594 + free_percpu(pmu->pmu_disable_count);
2595 + if (pmu->type >= PERF_TYPE_MAX)
2596 + idr_remove(&pmu_idr, pmu->type);
2597 +- if (remove_device) {
2598 ++ if (pmu_bus_running) {
2599 + if (pmu->nr_addr_filters)
2600 + device_remove_file(pmu->dev, &dev_attr_nr_addr_filters);
2601 + device_del(pmu->dev);
2602 + put_device(pmu->dev);
2603 + }
2604 + free_pmu_context(pmu);
2605 ++ mutex_unlock(&pmus_lock);
2606 + }
2607 + EXPORT_SYMBOL_GPL(perf_pmu_unregister);
2608 +
2609 +diff --git a/kernel/locking/test-ww_mutex.c b/kernel/locking/test-ww_mutex.c
2610 +index 0e4cd64ad2c0..654977862b06 100644
2611 +--- a/kernel/locking/test-ww_mutex.c
2612 ++++ b/kernel/locking/test-ww_mutex.c
2613 +@@ -260,7 +260,7 @@ static void test_cycle_work(struct work_struct *work)
2614 + {
2615 + struct test_cycle *cycle = container_of(work, typeof(*cycle), work);
2616 + struct ww_acquire_ctx ctx;
2617 +- int err;
2618 ++ int err, erra = 0;
2619 +
2620 + ww_acquire_init(&ctx, &ww_class);
2621 + ww_mutex_lock(&cycle->a_mutex, &ctx);
2622 +@@ -270,17 +270,19 @@ static void test_cycle_work(struct work_struct *work)
2623 +
2624 + err = ww_mutex_lock(cycle->b_mutex, &ctx);
2625 + if (err == -EDEADLK) {
2626 ++ err = 0;
2627 + ww_mutex_unlock(&cycle->a_mutex);
2628 + ww_mutex_lock_slow(cycle->b_mutex, &ctx);
2629 +- err = ww_mutex_lock(&cycle->a_mutex, &ctx);
2630 ++ erra = ww_mutex_lock(&cycle->a_mutex, &ctx);
2631 + }
2632 +
2633 + if (!err)
2634 + ww_mutex_unlock(cycle->b_mutex);
2635 +- ww_mutex_unlock(&cycle->a_mutex);
2636 ++ if (!erra)
2637 ++ ww_mutex_unlock(&cycle->a_mutex);
2638 + ww_acquire_fini(&ctx);
2639 +
2640 +- cycle->result = err;
2641 ++ cycle->result = err ?: erra;
2642 + }
2643 +
2644 + static int __test_cycle(unsigned int nthreads)
2645 +diff --git a/mm/gup_benchmark.c b/mm/gup_benchmark.c
2646 +index 6a473709e9b6..7405c9d89d65 100644
2647 +--- a/mm/gup_benchmark.c
2648 ++++ b/mm/gup_benchmark.c
2649 +@@ -19,7 +19,8 @@ static int __gup_benchmark_ioctl(unsigned int cmd,
2650 + struct gup_benchmark *gup)
2651 + {
2652 + ktime_t start_time, end_time;
2653 +- unsigned long i, nr, nr_pages, addr, next;
2654 ++ unsigned long i, nr_pages, addr, next;
2655 ++ int nr;
2656 + struct page **pages;
2657 +
2658 + nr_pages = gup->size / PAGE_SIZE;
2659 +diff --git a/mm/migrate.c b/mm/migrate.c
2660 +index 2a55289ee9f1..f49eb9589d73 100644
2661 +--- a/mm/migrate.c
2662 ++++ b/mm/migrate.c
2663 +@@ -1415,7 +1415,7 @@ retry:
2664 + * we encounter them after the rest of the list
2665 + * is processed.
2666 + */
2667 +- if (PageTransHuge(page)) {
2668 ++ if (PageTransHuge(page) && !PageHuge(page)) {
2669 + lock_page(page);
2670 + rc = split_huge_page_to_list(page, from);
2671 + unlock_page(page);
2672 +diff --git a/mm/vmscan.c b/mm/vmscan.c
2673 +index fc0436407471..03822f86f288 100644
2674 +--- a/mm/vmscan.c
2675 ++++ b/mm/vmscan.c
2676 +@@ -386,17 +386,6 @@ static unsigned long do_shrink_slab(struct shrink_control *shrinkctl,
2677 + delta = freeable >> priority;
2678 + delta *= 4;
2679 + do_div(delta, shrinker->seeks);
2680 +-
2681 +- /*
2682 +- * Make sure we apply some minimal pressure on default priority
2683 +- * even on small cgroups. Stale objects are not only consuming memory
2684 +- * by themselves, but can also hold a reference to a dying cgroup,
2685 +- * preventing it from being reclaimed. A dying cgroup with all
2686 +- * corresponding structures like per-cpu stats and kmem caches
2687 +- * can be really big, so it may lead to a significant waste of memory.
2688 +- */
2689 +- delta = max_t(unsigned long long, delta, min(freeable, batch_size));
2690 +-
2691 + total_scan += delta;
2692 + if (total_scan < 0) {
2693 + pr_err("shrink_slab: %pF negative objects to delete nr=%ld\n",
2694 +diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c
2695 +index 8a80d48d89c4..1b9984f653dd 100644
2696 +--- a/net/bluetooth/mgmt.c
2697 ++++ b/net/bluetooth/mgmt.c
2698 +@@ -2298,9 +2298,8 @@ static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2699 + /* LE address type */
2700 + addr_type = le_addr_type(cp->addr.type);
2701 +
2702 +- hci_remove_irk(hdev, &cp->addr.bdaddr, addr_type);
2703 +-
2704 +- err = hci_remove_ltk(hdev, &cp->addr.bdaddr, addr_type);
2705 ++ /* Abort any ongoing SMP pairing. Removes ltk and irk if they exist. */
2706 ++ err = smp_cancel_and_remove_pairing(hdev, &cp->addr.bdaddr, addr_type);
2707 + if (err < 0) {
2708 + err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2709 + MGMT_STATUS_NOT_PAIRED, &rp,
2710 +@@ -2314,8 +2313,6 @@ static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2711 + goto done;
2712 + }
2713 +
2714 +- /* Abort any ongoing SMP pairing */
2715 +- smp_cancel_pairing(conn);
2716 +
2717 + /* Defer clearing up the connection parameters until closing to
2718 + * give a chance of keeping them if a repairing happens.
2719 +diff --git a/net/bluetooth/smp.c b/net/bluetooth/smp.c
2720 +index 3a7b0773536b..73f7211d0431 100644
2721 +--- a/net/bluetooth/smp.c
2722 ++++ b/net/bluetooth/smp.c
2723 +@@ -2422,30 +2422,51 @@ unlock:
2724 + return ret;
2725 + }
2726 +
2727 +-void smp_cancel_pairing(struct hci_conn *hcon)
2728 ++int smp_cancel_and_remove_pairing(struct hci_dev *hdev, bdaddr_t *bdaddr,
2729 ++ u8 addr_type)
2730 + {
2731 +- struct l2cap_conn *conn = hcon->l2cap_data;
2732 ++ struct hci_conn *hcon;
2733 ++ struct l2cap_conn *conn;
2734 + struct l2cap_chan *chan;
2735 + struct smp_chan *smp;
2736 ++ int err;
2737 ++
2738 ++ err = hci_remove_ltk(hdev, bdaddr, addr_type);
2739 ++ hci_remove_irk(hdev, bdaddr, addr_type);
2740 ++
2741 ++ hcon = hci_conn_hash_lookup_le(hdev, bdaddr, addr_type);
2742 ++ if (!hcon)
2743 ++ goto done;
2744 +
2745 ++ conn = hcon->l2cap_data;
2746 + if (!conn)
2747 +- return;
2748 ++ goto done;
2749 +
2750 + chan = conn->smp;
2751 + if (!chan)
2752 +- return;
2753 ++ goto done;
2754 +
2755 + l2cap_chan_lock(chan);
2756 +
2757 + smp = chan->data;
2758 + if (smp) {
2759 ++ /* Set keys to NULL to make sure smp_failure() does not try to
2760 ++ * remove and free already invalidated rcu list entries. */
2761 ++ smp->ltk = NULL;
2762 ++ smp->slave_ltk = NULL;
2763 ++ smp->remote_irk = NULL;
2764 ++
2765 + if (test_bit(SMP_FLAG_COMPLETE, &smp->flags))
2766 + smp_failure(conn, 0);
2767 + else
2768 + smp_failure(conn, SMP_UNSPECIFIED);
2769 ++ err = 0;
2770 + }
2771 +
2772 + l2cap_chan_unlock(chan);
2773 ++
2774 ++done:
2775 ++ return err;
2776 + }
2777 +
2778 + static int smp_cmd_encrypt_info(struct l2cap_conn *conn, struct sk_buff *skb)
2779 +diff --git a/net/bluetooth/smp.h b/net/bluetooth/smp.h
2780 +index 0ff6247eaa6c..121edadd5f8d 100644
2781 +--- a/net/bluetooth/smp.h
2782 ++++ b/net/bluetooth/smp.h
2783 +@@ -181,7 +181,8 @@ enum smp_key_pref {
2784 + };
2785 +
2786 + /* SMP Commands */
2787 +-void smp_cancel_pairing(struct hci_conn *hcon);
2788 ++int smp_cancel_and_remove_pairing(struct hci_dev *hdev, bdaddr_t *bdaddr,
2789 ++ u8 addr_type);
2790 + bool smp_sufficient_security(struct hci_conn *hcon, u8 sec_level,
2791 + enum smp_key_pref key_pref);
2792 + int smp_conn_security(struct hci_conn *hcon, __u8 sec_level);
2793 +diff --git a/net/bpfilter/bpfilter_kern.c b/net/bpfilter/bpfilter_kern.c
2794 +index f0fc182d3db7..d5dd6b8b4248 100644
2795 +--- a/net/bpfilter/bpfilter_kern.c
2796 ++++ b/net/bpfilter/bpfilter_kern.c
2797 +@@ -23,9 +23,11 @@ static void shutdown_umh(struct umh_info *info)
2798 +
2799 + if (!info->pid)
2800 + return;
2801 +- tsk = pid_task(find_vpid(info->pid), PIDTYPE_PID);
2802 +- if (tsk)
2803 ++ tsk = get_pid_task(find_vpid(info->pid), PIDTYPE_PID);
2804 ++ if (tsk) {
2805 + force_sig(SIGKILL, tsk);
2806 ++ put_task_struct(tsk);
2807 ++ }
2808 + fput(info->pipe_to_umh);
2809 + fput(info->pipe_from_umh);
2810 + info->pid = 0;
2811 +diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
2812 +index 920665dd92db..6059a47f5e0c 100644
2813 +--- a/net/bridge/br_multicast.c
2814 ++++ b/net/bridge/br_multicast.c
2815 +@@ -1420,7 +1420,14 @@ static void br_multicast_query_received(struct net_bridge *br,
2816 + return;
2817 +
2818 + br_multicast_update_query_timer(br, query, max_delay);
2819 +- br_multicast_mark_router(br, port);
2820 ++
2821 ++ /* Based on RFC4541, section 2.1.1 IGMP Forwarding Rules,
2822 ++ * the arrival port for IGMP Queries where the source address
2823 ++ * is 0.0.0.0 should not be added to router port list.
2824 ++ */
2825 ++ if ((saddr->proto == htons(ETH_P_IP) && saddr->u.ip4) ||
2826 ++ saddr->proto == htons(ETH_P_IPV6))
2827 ++ br_multicast_mark_router(br, port);
2828 + }
2829 +
2830 + static int br_ip4_multicast_query(struct net_bridge *br,
2831 +diff --git a/net/bridge/br_netfilter_hooks.c b/net/bridge/br_netfilter_hooks.c
2832 +index 9b16eaf33819..58240cc185e7 100644
2833 +--- a/net/bridge/br_netfilter_hooks.c
2834 ++++ b/net/bridge/br_netfilter_hooks.c
2835 +@@ -834,7 +834,8 @@ static unsigned int ip_sabotage_in(void *priv,
2836 + struct sk_buff *skb,
2837 + const struct nf_hook_state *state)
2838 + {
2839 +- if (skb->nf_bridge && !skb->nf_bridge->in_prerouting) {
2840 ++ if (skb->nf_bridge && !skb->nf_bridge->in_prerouting &&
2841 ++ !netif_is_l3_master(skb->dev)) {
2842 + state->okfn(state->net, state->sk, skb);
2843 + return NF_STOLEN;
2844 + }
2845 +diff --git a/net/core/datagram.c b/net/core/datagram.c
2846 +index 9938952c5c78..16f0eb0970c4 100644
2847 +--- a/net/core/datagram.c
2848 ++++ b/net/core/datagram.c
2849 +@@ -808,8 +808,9 @@ int skb_copy_and_csum_datagram_msg(struct sk_buff *skb,
2850 + return -EINVAL;
2851 + }
2852 +
2853 +- if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE))
2854 +- netdev_rx_csum_fault(skb->dev);
2855 ++ if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE) &&
2856 ++ !skb->csum_complete_sw)
2857 ++ netdev_rx_csum_fault(NULL);
2858 + }
2859 + return 0;
2860 + fault:
2861 +diff --git a/net/core/ethtool.c b/net/core/ethtool.c
2862 +index 6c04f1bf377d..548d0e615bc7 100644
2863 +--- a/net/core/ethtool.c
2864 ++++ b/net/core/ethtool.c
2865 +@@ -2461,13 +2461,17 @@ roll_back:
2866 + return ret;
2867 + }
2868 +
2869 +-static int ethtool_set_per_queue(struct net_device *dev, void __user *useraddr)
2870 ++static int ethtool_set_per_queue(struct net_device *dev,
2871 ++ void __user *useraddr, u32 sub_cmd)
2872 + {
2873 + struct ethtool_per_queue_op per_queue_opt;
2874 +
2875 + if (copy_from_user(&per_queue_opt, useraddr, sizeof(per_queue_opt)))
2876 + return -EFAULT;
2877 +
2878 ++ if (per_queue_opt.sub_command != sub_cmd)
2879 ++ return -EINVAL;
2880 ++
2881 + switch (per_queue_opt.sub_command) {
2882 + case ETHTOOL_GCOALESCE:
2883 + return ethtool_get_per_queue_coalesce(dev, useraddr, &per_queue_opt);
2884 +@@ -2838,7 +2842,7 @@ int dev_ethtool(struct net *net, struct ifreq *ifr)
2885 + rc = ethtool_get_phy_stats(dev, useraddr);
2886 + break;
2887 + case ETHTOOL_PERQUEUE:
2888 +- rc = ethtool_set_per_queue(dev, useraddr);
2889 ++ rc = ethtool_set_per_queue(dev, useraddr, sub_cmd);
2890 + break;
2891 + case ETHTOOL_GLINKSETTINGS:
2892 + rc = ethtool_get_link_ksettings(dev, useraddr);
2893 +diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
2894 +index 18de39dbdc30..4b25fd14bc5a 100644
2895 +--- a/net/core/rtnetlink.c
2896 ++++ b/net/core/rtnetlink.c
2897 +@@ -3480,6 +3480,11 @@ static int rtnl_fdb_add(struct sk_buff *skb, struct nlmsghdr *nlh,
2898 + return -EINVAL;
2899 + }
2900 +
2901 ++ if (dev->type != ARPHRD_ETHER) {
2902 ++ NL_SET_ERR_MSG(extack, "FDB delete only supported for Ethernet devices");
2903 ++ return -EINVAL;
2904 ++ }
2905 ++
2906 + addr = nla_data(tb[NDA_LLADDR]);
2907 +
2908 + err = fdb_vid_parse(tb[NDA_VLAN], &vid, extack);
2909 +@@ -3584,6 +3589,11 @@ static int rtnl_fdb_del(struct sk_buff *skb, struct nlmsghdr *nlh,
2910 + return -EINVAL;
2911 + }
2912 +
2913 ++ if (dev->type != ARPHRD_ETHER) {
2914 ++ NL_SET_ERR_MSG(extack, "FDB add only supported for Ethernet devices");
2915 ++ return -EINVAL;
2916 ++ }
2917 ++
2918 + addr = nla_data(tb[NDA_LLADDR]);
2919 +
2920 + err = fdb_vid_parse(tb[NDA_VLAN], &vid, extack);
2921 +diff --git a/net/core/skbuff.c b/net/core/skbuff.c
2922 +index 3680912f056a..c45916b91a9c 100644
2923 +--- a/net/core/skbuff.c
2924 ++++ b/net/core/skbuff.c
2925 +@@ -1845,8 +1845,9 @@ int pskb_trim_rcsum_slow(struct sk_buff *skb, unsigned int len)
2926 + if (skb->ip_summed == CHECKSUM_COMPLETE) {
2927 + int delta = skb->len - len;
2928 +
2929 +- skb->csum = csum_sub(skb->csum,
2930 +- skb_checksum(skb, len, delta, 0));
2931 ++ skb->csum = csum_block_sub(skb->csum,
2932 ++ skb_checksum(skb, len, delta, 0),
2933 ++ len);
2934 + }
2935 + return __pskb_trim(skb, len);
2936 + }
2937 +diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
2938 +index d14d741fb05e..9d3bdce1ad8a 100644
2939 +--- a/net/ipv4/ip_fragment.c
2940 ++++ b/net/ipv4/ip_fragment.c
2941 +@@ -657,10 +657,14 @@ struct sk_buff *ip_check_defrag(struct net *net, struct sk_buff *skb, u32 user)
2942 + if (ip_is_fragment(&iph)) {
2943 + skb = skb_share_check(skb, GFP_ATOMIC);
2944 + if (skb) {
2945 +- if (!pskb_may_pull(skb, netoff + iph.ihl * 4))
2946 +- return skb;
2947 +- if (pskb_trim_rcsum(skb, netoff + len))
2948 +- return skb;
2949 ++ if (!pskb_may_pull(skb, netoff + iph.ihl * 4)) {
2950 ++ kfree_skb(skb);
2951 ++ return NULL;
2952 ++ }
2953 ++ if (pskb_trim_rcsum(skb, netoff + len)) {
2954 ++ kfree_skb(skb);
2955 ++ return NULL;
2956 ++ }
2957 + memset(IPCB(skb), 0, sizeof(struct inet_skb_parm));
2958 + if (ip_defrag(net, skb, user))
2959 + return NULL;
2960 +diff --git a/net/ipv4/ipmr_base.c b/net/ipv4/ipmr_base.c
2961 +index cafb0506c8c9..33be09791c74 100644
2962 +--- a/net/ipv4/ipmr_base.c
2963 ++++ b/net/ipv4/ipmr_base.c
2964 +@@ -295,8 +295,6 @@ int mr_rtm_dumproute(struct sk_buff *skb, struct netlink_callback *cb,
2965 + next_entry:
2966 + e++;
2967 + }
2968 +- e = 0;
2969 +- s_e = 0;
2970 +
2971 + spin_lock_bh(lock);
2972 + list_for_each_entry(mfc, &mrt->mfc_unres_queue, list) {
2973 +diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
2974 +index a12df801de94..2fe7e2713350 100644
2975 +--- a/net/ipv4/udp.c
2976 ++++ b/net/ipv4/udp.c
2977 +@@ -2124,8 +2124,24 @@ static inline int udp4_csum_init(struct sk_buff *skb, struct udphdr *uh,
2978 + /* Note, we are only interested in != 0 or == 0, thus the
2979 + * force to int.
2980 + */
2981 +- return (__force int)skb_checksum_init_zero_check(skb, proto, uh->check,
2982 +- inet_compute_pseudo);
2983 ++ err = (__force int)skb_checksum_init_zero_check(skb, proto, uh->check,
2984 ++ inet_compute_pseudo);
2985 ++ if (err)
2986 ++ return err;
2987 ++
2988 ++ if (skb->ip_summed == CHECKSUM_COMPLETE && !skb->csum_valid) {
2989 ++ /* If SW calculated the value, we know it's bad */
2990 ++ if (skb->csum_complete_sw)
2991 ++ return 1;
2992 ++
2993 ++ /* HW says the value is bad. Let's validate that.
2994 ++ * skb->csum is no longer the full packet checksum,
2995 ++ * so don't treat it as such.
2996 ++ */
2997 ++ skb_checksum_complete_unset(skb);
2998 ++ }
2999 ++
3000 ++ return 0;
3001 + }
3002 +
3003 + /* wrapper for udp_queue_rcv_skb tacking care of csum conversion and
3004 +diff --git a/net/ipv4/xfrm4_input.c b/net/ipv4/xfrm4_input.c
3005 +index bcfc00e88756..f8de2482a529 100644
3006 +--- a/net/ipv4/xfrm4_input.c
3007 ++++ b/net/ipv4/xfrm4_input.c
3008 +@@ -67,6 +67,7 @@ int xfrm4_transport_finish(struct sk_buff *skb, int async)
3009 +
3010 + if (xo && (xo->flags & XFRM_GRO)) {
3011 + skb_mac_header_rebuild(skb);
3012 ++ skb_reset_transport_header(skb);
3013 + return 0;
3014 + }
3015 +
3016 +diff --git a/net/ipv4/xfrm4_mode_transport.c b/net/ipv4/xfrm4_mode_transport.c
3017 +index 3d36644890bb..1ad2c2c4e250 100644
3018 +--- a/net/ipv4/xfrm4_mode_transport.c
3019 ++++ b/net/ipv4/xfrm4_mode_transport.c
3020 +@@ -46,7 +46,6 @@ static int xfrm4_transport_output(struct xfrm_state *x, struct sk_buff *skb)
3021 + static int xfrm4_transport_input(struct xfrm_state *x, struct sk_buff *skb)
3022 + {
3023 + int ihl = skb->data - skb_transport_header(skb);
3024 +- struct xfrm_offload *xo = xfrm_offload(skb);
3025 +
3026 + if (skb->transport_header != skb->network_header) {
3027 + memmove(skb_transport_header(skb),
3028 +@@ -54,8 +53,7 @@ static int xfrm4_transport_input(struct xfrm_state *x, struct sk_buff *skb)
3029 + skb->network_header = skb->transport_header;
3030 + }
3031 + ip_hdr(skb)->tot_len = htons(skb->len + ihl);
3032 +- if (!xo || !(xo->flags & XFRM_GRO))
3033 +- skb_reset_transport_header(skb);
3034 ++ skb_reset_transport_header(skb);
3035 + return 0;
3036 + }
3037 +
3038 +diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
3039 +index 3484c7020fd9..ac3de1aa1cd3 100644
3040 +--- a/net/ipv6/addrconf.c
3041 ++++ b/net/ipv6/addrconf.c
3042 +@@ -4930,8 +4930,8 @@ static int in6_dump_addrs(struct inet6_dev *idev, struct sk_buff *skb,
3043 +
3044 + /* unicast address incl. temp addr */
3045 + list_for_each_entry(ifa, &idev->addr_list, if_list) {
3046 +- if (++ip_idx < s_ip_idx)
3047 +- continue;
3048 ++ if (ip_idx < s_ip_idx)
3049 ++ goto next;
3050 + err = inet6_fill_ifaddr(skb, ifa,
3051 + NETLINK_CB(cb->skb).portid,
3052 + cb->nlh->nlmsg_seq,
3053 +@@ -4940,6 +4940,8 @@ static int in6_dump_addrs(struct inet6_dev *idev, struct sk_buff *skb,
3054 + if (err < 0)
3055 + break;
3056 + nl_dump_check_consistent(cb, nlmsg_hdr(skb));
3057 ++next:
3058 ++ ip_idx++;
3059 + }
3060 + break;
3061 + }
3062 +diff --git a/net/ipv6/ip6_checksum.c b/net/ipv6/ip6_checksum.c
3063 +index 547515e8450a..377717045f8f 100644
3064 +--- a/net/ipv6/ip6_checksum.c
3065 ++++ b/net/ipv6/ip6_checksum.c
3066 +@@ -88,8 +88,24 @@ int udp6_csum_init(struct sk_buff *skb, struct udphdr *uh, int proto)
3067 + * Note, we are only interested in != 0 or == 0, thus the
3068 + * force to int.
3069 + */
3070 +- return (__force int)skb_checksum_init_zero_check(skb, proto, uh->check,
3071 +- ip6_compute_pseudo);
3072 ++ err = (__force int)skb_checksum_init_zero_check(skb, proto, uh->check,
3073 ++ ip6_compute_pseudo);
3074 ++ if (err)
3075 ++ return err;
3076 ++
3077 ++ if (skb->ip_summed == CHECKSUM_COMPLETE && !skb->csum_valid) {
3078 ++ /* If SW calculated the value, we know it's bad */
3079 ++ if (skb->csum_complete_sw)
3080 ++ return 1;
3081 ++
3082 ++ /* HW says the value is bad. Let's validate that.
3083 ++ * skb->csum is no longer the full packet checksum,
3084 ++ * so don't treat is as such.
3085 ++ */
3086 ++ skb_checksum_complete_unset(skb);
3087 ++ }
3088 ++
3089 ++ return 0;
3090 + }
3091 + EXPORT_SYMBOL(udp6_csum_init);
3092 +
3093 +diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
3094 +index f5b5b0574a2d..009b508127e6 100644
3095 +--- a/net/ipv6/ip6_tunnel.c
3096 ++++ b/net/ipv6/ip6_tunnel.c
3097 +@@ -1184,10 +1184,6 @@ route_lookup:
3098 + }
3099 + skb_dst_set(skb, dst);
3100 +
3101 +- if (encap_limit >= 0) {
3102 +- init_tel_txopt(&opt, encap_limit);
3103 +- ipv6_push_frag_opts(skb, &opt.ops, &proto);
3104 +- }
3105 + hop_limit = hop_limit ? : ip6_dst_hoplimit(dst);
3106 +
3107 + /* Calculate max headroom for all the headers and adjust
3108 +@@ -1202,6 +1198,11 @@ route_lookup:
3109 + if (err)
3110 + return err;
3111 +
3112 ++ if (encap_limit >= 0) {
3113 ++ init_tel_txopt(&opt, encap_limit);
3114 ++ ipv6_push_frag_opts(skb, &opt.ops, &proto);
3115 ++ }
3116 ++
3117 + skb_push(skb, sizeof(struct ipv6hdr));
3118 + skb_reset_network_header(skb);
3119 + ipv6h = ipv6_hdr(skb);
3120 +diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c
3121 +index f60f310785fd..131440ea6b51 100644
3122 +--- a/net/ipv6/mcast.c
3123 ++++ b/net/ipv6/mcast.c
3124 +@@ -2436,17 +2436,17 @@ static int ip6_mc_leave_src(struct sock *sk, struct ipv6_mc_socklist *iml,
3125 + {
3126 + int err;
3127 +
3128 +- /* callers have the socket lock and rtnl lock
3129 +- * so no other readers or writers of iml or its sflist
3130 +- */
3131 ++ write_lock_bh(&iml->sflock);
3132 + if (!iml->sflist) {
3133 + /* any-source empty exclude case */
3134 +- return ip6_mc_del_src(idev, &iml->addr, iml->sfmode, 0, NULL, 0);
3135 ++ err = ip6_mc_del_src(idev, &iml->addr, iml->sfmode, 0, NULL, 0);
3136 ++ } else {
3137 ++ err = ip6_mc_del_src(idev, &iml->addr, iml->sfmode,
3138 ++ iml->sflist->sl_count, iml->sflist->sl_addr, 0);
3139 ++ sock_kfree_s(sk, iml->sflist, IP6_SFLSIZE(iml->sflist->sl_max));
3140 ++ iml->sflist = NULL;
3141 + }
3142 +- err = ip6_mc_del_src(idev, &iml->addr, iml->sfmode,
3143 +- iml->sflist->sl_count, iml->sflist->sl_addr, 0);
3144 +- sock_kfree_s(sk, iml->sflist, IP6_SFLSIZE(iml->sflist->sl_max));
3145 +- iml->sflist = NULL;
3146 ++ write_unlock_bh(&iml->sflock);
3147 + return err;
3148 + }
3149 +
3150 +diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c
3151 +index 0ec273997d1d..673a4a932f2a 100644
3152 +--- a/net/ipv6/ndisc.c
3153 ++++ b/net/ipv6/ndisc.c
3154 +@@ -1732,10 +1732,9 @@ int ndisc_rcv(struct sk_buff *skb)
3155 + return 0;
3156 + }
3157 +
3158 +- memset(NEIGH_CB(skb), 0, sizeof(struct neighbour_cb));
3159 +-
3160 + switch (msg->icmph.icmp6_type) {
3161 + case NDISC_NEIGHBOUR_SOLICITATION:
3162 ++ memset(NEIGH_CB(skb), 0, sizeof(struct neighbour_cb));
3163 + ndisc_recv_ns(skb);
3164 + break;
3165 +
3166 +diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c
3167 +index e4d9e6976d3c..a452d99c9f52 100644
3168 +--- a/net/ipv6/netfilter/nf_conntrack_reasm.c
3169 ++++ b/net/ipv6/netfilter/nf_conntrack_reasm.c
3170 +@@ -585,8 +585,6 @@ int nf_ct_frag6_gather(struct net *net, struct sk_buff *skb, u32 user)
3171 + fq->q.meat == fq->q.len &&
3172 + nf_ct_frag6_reasm(fq, skb, dev))
3173 + ret = 0;
3174 +- else
3175 +- skb_dst_drop(skb);
3176 +
3177 + out_unlock:
3178 + spin_unlock_bh(&fq->q.lock);
3179 +diff --git a/net/ipv6/route.c b/net/ipv6/route.c
3180 +index ed526e257da6..a243d5249b51 100644
3181 +--- a/net/ipv6/route.c
3182 ++++ b/net/ipv6/route.c
3183 +@@ -517,10 +517,11 @@ static void rt6_probe_deferred(struct work_struct *w)
3184 +
3185 + static void rt6_probe(struct fib6_info *rt)
3186 + {
3187 +- struct __rt6_probe_work *work;
3188 ++ struct __rt6_probe_work *work = NULL;
3189 + const struct in6_addr *nh_gw;
3190 + struct neighbour *neigh;
3191 + struct net_device *dev;
3192 ++ struct inet6_dev *idev;
3193 +
3194 + /*
3195 + * Okay, this does not seem to be appropriate
3196 +@@ -536,15 +537,12 @@ static void rt6_probe(struct fib6_info *rt)
3197 + nh_gw = &rt->fib6_nh.nh_gw;
3198 + dev = rt->fib6_nh.nh_dev;
3199 + rcu_read_lock_bh();
3200 ++ idev = __in6_dev_get(dev);
3201 + neigh = __ipv6_neigh_lookup_noref(dev, nh_gw);
3202 + if (neigh) {
3203 +- struct inet6_dev *idev;
3204 +-
3205 + if (neigh->nud_state & NUD_VALID)
3206 + goto out;
3207 +
3208 +- idev = __in6_dev_get(dev);
3209 +- work = NULL;
3210 + write_lock(&neigh->lock);
3211 + if (!(neigh->nud_state & NUD_VALID) &&
3212 + time_after(jiffies,
3213 +@@ -554,11 +552,13 @@ static void rt6_probe(struct fib6_info *rt)
3214 + __neigh_set_probe_once(neigh);
3215 + }
3216 + write_unlock(&neigh->lock);
3217 +- } else {
3218 ++ } else if (time_after(jiffies, rt->last_probe +
3219 ++ idev->cnf.rtr_probe_interval)) {
3220 + work = kmalloc(sizeof(*work), GFP_ATOMIC);
3221 + }
3222 +
3223 + if (work) {
3224 ++ rt->last_probe = jiffies;
3225 + INIT_WORK(&work->work, rt6_probe_deferred);
3226 + work->target = *nh_gw;
3227 + dev_hold(dev);
3228 +@@ -2792,6 +2792,8 @@ static int ip6_route_check_nh_onlink(struct net *net,
3229 + grt = ip6_nh_lookup_table(net, cfg, gw_addr, tbid, 0);
3230 + if (grt) {
3231 + if (!grt->dst.error &&
3232 ++ /* ignore match if it is the default route */
3233 ++ grt->from && !ipv6_addr_any(&grt->from->fib6_dst.addr) &&
3234 + (grt->rt6i_flags & flags || dev != grt->dst.dev)) {
3235 + NL_SET_ERR_MSG(extack,
3236 + "Nexthop has invalid gateway or device mismatch");
3237 +diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
3238 +index 39d0cab919bb..4f2c7a196365 100644
3239 +--- a/net/ipv6/udp.c
3240 ++++ b/net/ipv6/udp.c
3241 +@@ -762,11 +762,9 @@ static int udp6_unicast_rcv_skb(struct sock *sk, struct sk_buff *skb,
3242 +
3243 + ret = udpv6_queue_rcv_skb(sk, skb);
3244 +
3245 +- /* a return value > 0 means to resubmit the input, but
3246 +- * it wants the return to be -protocol, or 0
3247 +- */
3248 ++ /* a return value > 0 means to resubmit the input */
3249 + if (ret > 0)
3250 +- return -ret;
3251 ++ return ret;
3252 + return 0;
3253 + }
3254 +
3255 +diff --git a/net/ipv6/xfrm6_input.c b/net/ipv6/xfrm6_input.c
3256 +index 841f4a07438e..9ef490dddcea 100644
3257 +--- a/net/ipv6/xfrm6_input.c
3258 ++++ b/net/ipv6/xfrm6_input.c
3259 +@@ -59,6 +59,7 @@ int xfrm6_transport_finish(struct sk_buff *skb, int async)
3260 +
3261 + if (xo && (xo->flags & XFRM_GRO)) {
3262 + skb_mac_header_rebuild(skb);
3263 ++ skb_reset_transport_header(skb);
3264 + return -1;
3265 + }
3266 +
3267 +diff --git a/net/ipv6/xfrm6_mode_transport.c b/net/ipv6/xfrm6_mode_transport.c
3268 +index 9ad07a91708e..3c29da5defe6 100644
3269 +--- a/net/ipv6/xfrm6_mode_transport.c
3270 ++++ b/net/ipv6/xfrm6_mode_transport.c
3271 +@@ -51,7 +51,6 @@ static int xfrm6_transport_output(struct xfrm_state *x, struct sk_buff *skb)
3272 + static int xfrm6_transport_input(struct xfrm_state *x, struct sk_buff *skb)
3273 + {
3274 + int ihl = skb->data - skb_transport_header(skb);
3275 +- struct xfrm_offload *xo = xfrm_offload(skb);
3276 +
3277 + if (skb->transport_header != skb->network_header) {
3278 + memmove(skb_transport_header(skb),
3279 +@@ -60,8 +59,7 @@ static int xfrm6_transport_input(struct xfrm_state *x, struct sk_buff *skb)
3280 + }
3281 + ipv6_hdr(skb)->payload_len = htons(skb->len + ihl -
3282 + sizeof(struct ipv6hdr));
3283 +- if (!xo || !(xo->flags & XFRM_GRO))
3284 +- skb_reset_transport_header(skb);
3285 ++ skb_reset_transport_header(skb);
3286 + return 0;
3287 + }
3288 +
3289 +diff --git a/net/ipv6/xfrm6_output.c b/net/ipv6/xfrm6_output.c
3290 +index 5959ce9620eb..6a74080005cf 100644
3291 +--- a/net/ipv6/xfrm6_output.c
3292 ++++ b/net/ipv6/xfrm6_output.c
3293 +@@ -170,9 +170,11 @@ static int __xfrm6_output(struct net *net, struct sock *sk, struct sk_buff *skb)
3294 +
3295 + if (toobig && xfrm6_local_dontfrag(skb)) {
3296 + xfrm6_local_rxpmtu(skb, mtu);
3297 ++ kfree_skb(skb);
3298 + return -EMSGSIZE;
3299 + } else if (!skb->ignore_df && toobig && skb->sk) {
3300 + xfrm_local_error(skb, mtu);
3301 ++ kfree_skb(skb);
3302 + return -EMSGSIZE;
3303 + }
3304 +
3305 +diff --git a/net/llc/llc_conn.c b/net/llc/llc_conn.c
3306 +index c0ac522b48a1..4ff89cb7c86f 100644
3307 +--- a/net/llc/llc_conn.c
3308 ++++ b/net/llc/llc_conn.c
3309 +@@ -734,6 +734,7 @@ void llc_sap_add_socket(struct llc_sap *sap, struct sock *sk)
3310 + llc_sk(sk)->sap = sap;
3311 +
3312 + spin_lock_bh(&sap->sk_lock);
3313 ++ sock_set_flag(sk, SOCK_RCU_FREE);
3314 + sap->sk_count++;
3315 + sk_nulls_add_node_rcu(sk, laddr_hb);
3316 + hlist_add_head(&llc->dev_hash_node, dev_hb);
3317 +diff --git a/net/mac80211/mesh.h b/net/mac80211/mesh.h
3318 +index ee56f18cad3f..21526630bf65 100644
3319 +--- a/net/mac80211/mesh.h
3320 ++++ b/net/mac80211/mesh.h
3321 +@@ -217,7 +217,8 @@ void mesh_rmc_free(struct ieee80211_sub_if_data *sdata);
3322 + int mesh_rmc_init(struct ieee80211_sub_if_data *sdata);
3323 + void ieee80211s_init(void);
3324 + void ieee80211s_update_metric(struct ieee80211_local *local,
3325 +- struct sta_info *sta, struct sk_buff *skb);
3326 ++ struct sta_info *sta,
3327 ++ struct ieee80211_tx_status *st);
3328 + void ieee80211_mesh_init_sdata(struct ieee80211_sub_if_data *sdata);
3329 + void ieee80211_mesh_teardown_sdata(struct ieee80211_sub_if_data *sdata);
3330 + int ieee80211_start_mesh(struct ieee80211_sub_if_data *sdata);
3331 +diff --git a/net/mac80211/mesh_hwmp.c b/net/mac80211/mesh_hwmp.c
3332 +index daf9db3c8f24..6950cd0bf594 100644
3333 +--- a/net/mac80211/mesh_hwmp.c
3334 ++++ b/net/mac80211/mesh_hwmp.c
3335 +@@ -295,15 +295,12 @@ int mesh_path_error_tx(struct ieee80211_sub_if_data *sdata,
3336 + }
3337 +
3338 + void ieee80211s_update_metric(struct ieee80211_local *local,
3339 +- struct sta_info *sta, struct sk_buff *skb)
3340 ++ struct sta_info *sta,
3341 ++ struct ieee80211_tx_status *st)
3342 + {
3343 +- struct ieee80211_tx_info *txinfo = IEEE80211_SKB_CB(skb);
3344 +- struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
3345 ++ struct ieee80211_tx_info *txinfo = st->info;
3346 + int failed;
3347 +
3348 +- if (!ieee80211_is_data(hdr->frame_control))
3349 +- return;
3350 +-
3351 + failed = !(txinfo->flags & IEEE80211_TX_STAT_ACK);
3352 +
3353 + /* moving average, scaled to 100.
3354 +diff --git a/net/mac80211/status.c b/net/mac80211/status.c
3355 +index 9a6d7208bf4f..91d7c0cd1882 100644
3356 +--- a/net/mac80211/status.c
3357 ++++ b/net/mac80211/status.c
3358 +@@ -479,11 +479,6 @@ static void ieee80211_report_ack_skb(struct ieee80211_local *local,
3359 + if (!skb)
3360 + return;
3361 +
3362 +- if (dropped) {
3363 +- dev_kfree_skb_any(skb);
3364 +- return;
3365 +- }
3366 +-
3367 + if (info->flags & IEEE80211_TX_INTFL_NL80211_FRAME_TX) {
3368 + u64 cookie = IEEE80211_SKB_CB(skb)->ack.cookie;
3369 + struct ieee80211_sub_if_data *sdata;
3370 +@@ -506,6 +501,8 @@ static void ieee80211_report_ack_skb(struct ieee80211_local *local,
3371 + }
3372 + rcu_read_unlock();
3373 +
3374 ++ dev_kfree_skb_any(skb);
3375 ++ } else if (dropped) {
3376 + dev_kfree_skb_any(skb);
3377 + } else {
3378 + /* consumes skb */
3379 +@@ -811,7 +808,7 @@ static void __ieee80211_tx_status(struct ieee80211_hw *hw,
3380 +
3381 + rate_control_tx_status(local, sband, status);
3382 + if (ieee80211_vif_is_mesh(&sta->sdata->vif))
3383 +- ieee80211s_update_metric(local, sta, skb);
3384 ++ ieee80211s_update_metric(local, sta, status);
3385 +
3386 + if (!(info->flags & IEEE80211_TX_CTL_INJECTED) && acked)
3387 + ieee80211_frame_acked(sta, skb);
3388 +@@ -972,6 +969,8 @@ void ieee80211_tx_status_ext(struct ieee80211_hw *hw,
3389 + }
3390 +
3391 + rate_control_tx_status(local, sband, status);
3392 ++ if (ieee80211_vif_is_mesh(&sta->sdata->vif))
3393 ++ ieee80211s_update_metric(local, sta, status);
3394 + }
3395 +
3396 + if (acked || noack_success) {
3397 +diff --git a/net/mac80211/tdls.c b/net/mac80211/tdls.c
3398 +index 5cd5e6e5834e..6c647f425e05 100644
3399 +--- a/net/mac80211/tdls.c
3400 ++++ b/net/mac80211/tdls.c
3401 +@@ -16,6 +16,7 @@
3402 + #include "ieee80211_i.h"
3403 + #include "driver-ops.h"
3404 + #include "rate.h"
3405 ++#include "wme.h"
3406 +
3407 + /* give usermode some time for retries in setting up the TDLS session */
3408 + #define TDLS_PEER_SETUP_TIMEOUT (15 * HZ)
3409 +@@ -1010,14 +1011,13 @@ ieee80211_tdls_prep_mgmt_packet(struct wiphy *wiphy, struct net_device *dev,
3410 + switch (action_code) {
3411 + case WLAN_TDLS_SETUP_REQUEST:
3412 + case WLAN_TDLS_SETUP_RESPONSE:
3413 +- skb_set_queue_mapping(skb, IEEE80211_AC_BK);
3414 +- skb->priority = 2;
3415 ++ skb->priority = 256 + 2;
3416 + break;
3417 + default:
3418 +- skb_set_queue_mapping(skb, IEEE80211_AC_VI);
3419 +- skb->priority = 5;
3420 ++ skb->priority = 256 + 5;
3421 + break;
3422 + }
3423 ++ skb_set_queue_mapping(skb, ieee80211_select_queue(sdata, skb));
3424 +
3425 + /*
3426 + * Set the WLAN_TDLS_TEARDOWN flag to indicate a teardown in progress.
3427 +diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
3428 +index 9b3b069e418a..361f2f6cc839 100644
3429 +--- a/net/mac80211/tx.c
3430 ++++ b/net/mac80211/tx.c
3431 +@@ -1886,7 +1886,7 @@ static bool ieee80211_tx(struct ieee80211_sub_if_data *sdata,
3432 + sdata->vif.hw_queue[skb_get_queue_mapping(skb)];
3433 +
3434 + if (invoke_tx_handlers_early(&tx))
3435 +- return false;
3436 ++ return true;
3437 +
3438 + if (ieee80211_queue_skb(local, sdata, tx.sta, tx.skb))
3439 + return true;
3440 +diff --git a/net/netfilter/nf_conntrack_proto_tcp.c b/net/netfilter/nf_conntrack_proto_tcp.c
3441 +index 8e67910185a0..1004fb5930de 100644
3442 +--- a/net/netfilter/nf_conntrack_proto_tcp.c
3443 ++++ b/net/netfilter/nf_conntrack_proto_tcp.c
3444 +@@ -1239,8 +1239,8 @@ static const struct nla_policy tcp_nla_policy[CTA_PROTOINFO_TCP_MAX+1] = {
3445 + #define TCP_NLATTR_SIZE ( \
3446 + NLA_ALIGN(NLA_HDRLEN + 1) + \
3447 + NLA_ALIGN(NLA_HDRLEN + 1) + \
3448 +- NLA_ALIGN(NLA_HDRLEN + sizeof(sizeof(struct nf_ct_tcp_flags))) + \
3449 +- NLA_ALIGN(NLA_HDRLEN + sizeof(sizeof(struct nf_ct_tcp_flags))))
3450 ++ NLA_ALIGN(NLA_HDRLEN + sizeof(struct nf_ct_tcp_flags)) + \
3451 ++ NLA_ALIGN(NLA_HDRLEN + sizeof(struct nf_ct_tcp_flags)))
3452 +
3453 + static int nlattr_to_tcp(struct nlattr *cda[], struct nf_conn *ct)
3454 + {
3455 +diff --git a/net/netfilter/nft_set_rbtree.c b/net/netfilter/nft_set_rbtree.c
3456 +index 9873d734b494..8ad78b82c8e2 100644
3457 +--- a/net/netfilter/nft_set_rbtree.c
3458 ++++ b/net/netfilter/nft_set_rbtree.c
3459 +@@ -355,12 +355,11 @@ cont:
3460 +
3461 + static void nft_rbtree_gc(struct work_struct *work)
3462 + {
3463 ++ struct nft_rbtree_elem *rbe, *rbe_end = NULL, *rbe_prev = NULL;
3464 + struct nft_set_gc_batch *gcb = NULL;
3465 +- struct rb_node *node, *prev = NULL;
3466 +- struct nft_rbtree_elem *rbe;
3467 + struct nft_rbtree *priv;
3468 ++ struct rb_node *node;
3469 + struct nft_set *set;
3470 +- int i;
3471 +
3472 + priv = container_of(work, struct nft_rbtree, gc_work.work);
3473 + set = nft_set_container_of(priv);
3474 +@@ -371,7 +370,7 @@ static void nft_rbtree_gc(struct work_struct *work)
3475 + rbe = rb_entry(node, struct nft_rbtree_elem, node);
3476 +
3477 + if (nft_rbtree_interval_end(rbe)) {
3478 +- prev = node;
3479 ++ rbe_end = rbe;
3480 + continue;
3481 + }
3482 + if (!nft_set_elem_expired(&rbe->ext))
3483 +@@ -379,29 +378,30 @@ static void nft_rbtree_gc(struct work_struct *work)
3484 + if (nft_set_elem_mark_busy(&rbe->ext))
3485 + continue;
3486 +
3487 ++ if (rbe_prev) {
3488 ++ rb_erase(&rbe_prev->node, &priv->root);
3489 ++ rbe_prev = NULL;
3490 ++ }
3491 + gcb = nft_set_gc_batch_check(set, gcb, GFP_ATOMIC);
3492 + if (!gcb)
3493 + break;
3494 +
3495 + atomic_dec(&set->nelems);
3496 + nft_set_gc_batch_add(gcb, rbe);
3497 ++ rbe_prev = rbe;
3498 +
3499 +- if (prev) {
3500 +- rbe = rb_entry(prev, struct nft_rbtree_elem, node);
3501 ++ if (rbe_end) {
3502 + atomic_dec(&set->nelems);
3503 +- nft_set_gc_batch_add(gcb, rbe);
3504 +- prev = NULL;
3505 ++ nft_set_gc_batch_add(gcb, rbe_end);
3506 ++ rb_erase(&rbe_end->node, &priv->root);
3507 ++ rbe_end = NULL;
3508 + }
3509 + node = rb_next(node);
3510 + if (!node)
3511 + break;
3512 + }
3513 +- if (gcb) {
3514 +- for (i = 0; i < gcb->head.cnt; i++) {
3515 +- rbe = gcb->elems[i];
3516 +- rb_erase(&rbe->node, &priv->root);
3517 +- }
3518 +- }
3519 ++ if (rbe_prev)
3520 ++ rb_erase(&rbe_prev->node, &priv->root);
3521 + write_seqcount_end(&priv->count);
3522 + write_unlock_bh(&priv->lock);
3523 +
3524 +diff --git a/net/openvswitch/flow_netlink.c b/net/openvswitch/flow_netlink.c
3525 +index 492ab0c36f7c..8b1ba43b1ece 100644
3526 +--- a/net/openvswitch/flow_netlink.c
3527 ++++ b/net/openvswitch/flow_netlink.c
3528 +@@ -2990,7 +2990,7 @@ static int __ovs_nla_copy_actions(struct net *net, const struct nlattr *attr,
3529 + * is already present */
3530 + if (mac_proto != MAC_PROTO_NONE)
3531 + return -EINVAL;
3532 +- mac_proto = MAC_PROTO_NONE;
3533 ++ mac_proto = MAC_PROTO_ETHERNET;
3534 + break;
3535 +
3536 + case OVS_ACTION_ATTR_POP_ETH:
3537 +@@ -2998,7 +2998,7 @@ static int __ovs_nla_copy_actions(struct net *net, const struct nlattr *attr,
3538 + return -EINVAL;
3539 + if (vlan_tci & htons(VLAN_TAG_PRESENT))
3540 + return -EINVAL;
3541 +- mac_proto = MAC_PROTO_ETHERNET;
3542 ++ mac_proto = MAC_PROTO_NONE;
3543 + break;
3544 +
3545 + case OVS_ACTION_ATTR_PUSH_NSH:
3546 +diff --git a/net/rds/send.c b/net/rds/send.c
3547 +index 59f17a2335f4..0e54ca0f4e9e 100644
3548 +--- a/net/rds/send.c
3549 ++++ b/net/rds/send.c
3550 +@@ -1006,7 +1006,8 @@ static int rds_cmsg_send(struct rds_sock *rs, struct rds_message *rm,
3551 + return ret;
3552 + }
3553 +
3554 +-static int rds_send_mprds_hash(struct rds_sock *rs, struct rds_connection *conn)
3555 ++static int rds_send_mprds_hash(struct rds_sock *rs,
3556 ++ struct rds_connection *conn, int nonblock)
3557 + {
3558 + int hash;
3559 +
3560 +@@ -1022,10 +1023,16 @@ static int rds_send_mprds_hash(struct rds_sock *rs, struct rds_connection *conn)
3561 + * used. But if we are interrupted, we have to use the zero
3562 + * c_path in case the connection ends up being non-MP capable.
3563 + */
3564 +- if (conn->c_npaths == 0)
3565 ++ if (conn->c_npaths == 0) {
3566 ++ /* Cannot wait for the connection be made, so just use
3567 ++ * the base c_path.
3568 ++ */
3569 ++ if (nonblock)
3570 ++ return 0;
3571 + if (wait_event_interruptible(conn->c_hs_waitq,
3572 + conn->c_npaths != 0))
3573 + hash = 0;
3574 ++ }
3575 + if (conn->c_npaths == 1)
3576 + hash = 0;
3577 + }
3578 +@@ -1170,7 +1177,7 @@ int rds_sendmsg(struct socket *sock, struct msghdr *msg, size_t payload_len)
3579 + }
3580 +
3581 + if (conn->c_trans->t_mp_capable)
3582 +- cpath = &conn->c_path[rds_send_mprds_hash(rs, conn)];
3583 ++ cpath = &conn->c_path[rds_send_mprds_hash(rs, conn, nonblock)];
3584 + else
3585 + cpath = &conn->c_path[0];
3586 +
3587 +diff --git a/net/rxrpc/ar-internal.h b/net/rxrpc/ar-internal.h
3588 +index 707630ab4713..330372c04940 100644
3589 +--- a/net/rxrpc/ar-internal.h
3590 ++++ b/net/rxrpc/ar-internal.h
3591 +@@ -293,7 +293,6 @@ struct rxrpc_peer {
3592 + struct hlist_node hash_link;
3593 + struct rxrpc_local *local;
3594 + struct hlist_head error_targets; /* targets for net error distribution */
3595 +- struct work_struct error_distributor;
3596 + struct rb_root service_conns; /* Service connections */
3597 + struct list_head keepalive_link; /* Link in net->peer_keepalive[] */
3598 + time64_t last_tx_at; /* Last time packet sent here */
3599 +@@ -304,8 +303,6 @@ struct rxrpc_peer {
3600 + unsigned int maxdata; /* data size (MTU - hdrsize) */
3601 + unsigned short hdrsize; /* header size (IP + UDP + RxRPC) */
3602 + int debug_id; /* debug ID for printks */
3603 +- int error_report; /* Net (+0) or local (+1000000) to distribute */
3604 +-#define RXRPC_LOCAL_ERROR_OFFSET 1000000
3605 + struct sockaddr_rxrpc srx; /* remote address */
3606 +
3607 + /* calculated RTT cache */
3608 +@@ -449,8 +446,7 @@ struct rxrpc_connection {
3609 + spinlock_t state_lock; /* state-change lock */
3610 + enum rxrpc_conn_cache_state cache_state;
3611 + enum rxrpc_conn_proto_state state; /* current state of connection */
3612 +- u32 local_abort; /* local abort code */
3613 +- u32 remote_abort; /* remote abort code */
3614 ++ u32 abort_code; /* Abort code of connection abort */
3615 + int debug_id; /* debug ID for printks */
3616 + atomic_t serial; /* packet serial number counter */
3617 + unsigned int hi_serial; /* highest serial number received */
3618 +@@ -460,8 +456,19 @@ struct rxrpc_connection {
3619 + u8 security_size; /* security header size */
3620 + u8 security_ix; /* security type */
3621 + u8 out_clientflag; /* RXRPC_CLIENT_INITIATED if we are client */
3622 ++ short error; /* Local error code */
3623 + };
3624 +
3625 ++static inline bool rxrpc_to_server(const struct rxrpc_skb_priv *sp)
3626 ++{
3627 ++ return sp->hdr.flags & RXRPC_CLIENT_INITIATED;
3628 ++}
3629 ++
3630 ++static inline bool rxrpc_to_client(const struct rxrpc_skb_priv *sp)
3631 ++{
3632 ++ return !rxrpc_to_server(sp);
3633 ++}
3634 ++
3635 + /*
3636 + * Flags in call->flags.
3637 + */
3638 +@@ -1029,7 +1036,6 @@ void rxrpc_send_keepalive(struct rxrpc_peer *);
3639 + * peer_event.c
3640 + */
3641 + void rxrpc_error_report(struct sock *);
3642 +-void rxrpc_peer_error_distributor(struct work_struct *);
3643 + void rxrpc_peer_add_rtt(struct rxrpc_call *, enum rxrpc_rtt_rx_trace,
3644 + rxrpc_serial_t, rxrpc_serial_t, ktime_t, ktime_t);
3645 + void rxrpc_peer_keepalive_worker(struct work_struct *);
3646 +@@ -1048,7 +1054,6 @@ void rxrpc_destroy_all_peers(struct rxrpc_net *);
3647 + struct rxrpc_peer *rxrpc_get_peer(struct rxrpc_peer *);
3648 + struct rxrpc_peer *rxrpc_get_peer_maybe(struct rxrpc_peer *);
3649 + void rxrpc_put_peer(struct rxrpc_peer *);
3650 +-void __rxrpc_queue_peer_error(struct rxrpc_peer *);
3651 +
3652 + /*
3653 + * proc.c
3654 +diff --git a/net/rxrpc/call_accept.c b/net/rxrpc/call_accept.c
3655 +index 9d1e298b784c..0e378d73e856 100644
3656 +--- a/net/rxrpc/call_accept.c
3657 ++++ b/net/rxrpc/call_accept.c
3658 +@@ -422,11 +422,11 @@ found_service:
3659 +
3660 + case RXRPC_CONN_REMOTELY_ABORTED:
3661 + rxrpc_set_call_completion(call, RXRPC_CALL_REMOTELY_ABORTED,
3662 +- conn->remote_abort, -ECONNABORTED);
3663 ++ conn->abort_code, conn->error);
3664 + break;
3665 + case RXRPC_CONN_LOCALLY_ABORTED:
3666 + rxrpc_abort_call("CON", call, sp->hdr.seq,
3667 +- conn->local_abort, -ECONNABORTED);
3668 ++ conn->abort_code, conn->error);
3669 + break;
3670 + default:
3671 + BUG();
3672 +diff --git a/net/rxrpc/call_object.c b/net/rxrpc/call_object.c
3673 +index f6734d8cb01a..ed69257203c2 100644
3674 +--- a/net/rxrpc/call_object.c
3675 ++++ b/net/rxrpc/call_object.c
3676 +@@ -400,7 +400,7 @@ void rxrpc_incoming_call(struct rxrpc_sock *rx,
3677 + rcu_assign_pointer(conn->channels[chan].call, call);
3678 +
3679 + spin_lock(&conn->params.peer->lock);
3680 +- hlist_add_head(&call->error_link, &conn->params.peer->error_targets);
3681 ++ hlist_add_head_rcu(&call->error_link, &conn->params.peer->error_targets);
3682 + spin_unlock(&conn->params.peer->lock);
3683 +
3684 + _net("CALL incoming %d on CONN %d", call->debug_id, call->conn->debug_id);
3685 +diff --git a/net/rxrpc/conn_client.c b/net/rxrpc/conn_client.c
3686 +index 5736f643c516..0be19132202b 100644
3687 +--- a/net/rxrpc/conn_client.c
3688 ++++ b/net/rxrpc/conn_client.c
3689 +@@ -709,8 +709,8 @@ int rxrpc_connect_call(struct rxrpc_call *call,
3690 + }
3691 +
3692 + spin_lock_bh(&call->conn->params.peer->lock);
3693 +- hlist_add_head(&call->error_link,
3694 +- &call->conn->params.peer->error_targets);
3695 ++ hlist_add_head_rcu(&call->error_link,
3696 ++ &call->conn->params.peer->error_targets);
3697 + spin_unlock_bh(&call->conn->params.peer->lock);
3698 +
3699 + out:
3700 +diff --git a/net/rxrpc/conn_event.c b/net/rxrpc/conn_event.c
3701 +index 3fde001fcc39..5e7c8239e703 100644
3702 +--- a/net/rxrpc/conn_event.c
3703 ++++ b/net/rxrpc/conn_event.c
3704 +@@ -126,7 +126,7 @@ static void rxrpc_conn_retransmit_call(struct rxrpc_connection *conn,
3705 +
3706 + switch (chan->last_type) {
3707 + case RXRPC_PACKET_TYPE_ABORT:
3708 +- _proto("Tx ABORT %%%u { %d } [re]", serial, conn->local_abort);
3709 ++ _proto("Tx ABORT %%%u { %d } [re]", serial, conn->abort_code);
3710 + break;
3711 + case RXRPC_PACKET_TYPE_ACK:
3712 + trace_rxrpc_tx_ack(NULL, serial, chan->last_seq, 0,
3713 +@@ -148,13 +148,12 @@ static void rxrpc_conn_retransmit_call(struct rxrpc_connection *conn,
3714 + * pass a connection-level abort onto all calls on that connection
3715 + */
3716 + static void rxrpc_abort_calls(struct rxrpc_connection *conn,
3717 +- enum rxrpc_call_completion compl,
3718 +- u32 abort_code, int error)
3719 ++ enum rxrpc_call_completion compl)
3720 + {
3721 + struct rxrpc_call *call;
3722 + int i;
3723 +
3724 +- _enter("{%d},%x", conn->debug_id, abort_code);
3725 ++ _enter("{%d},%x", conn->debug_id, conn->abort_code);
3726 +
3727 + spin_lock(&conn->channel_lock);
3728 +
3729 +@@ -167,9 +166,11 @@ static void rxrpc_abort_calls(struct rxrpc_connection *conn,
3730 + trace_rxrpc_abort(call->debug_id,
3731 + "CON", call->cid,
3732 + call->call_id, 0,
3733 +- abort_code, error);
3734 ++ conn->abort_code,
3735 ++ conn->error);
3736 + if (rxrpc_set_call_completion(call, compl,
3737 +- abort_code, error))
3738 ++ conn->abort_code,
3739 ++ conn->error))
3740 + rxrpc_notify_socket(call);
3741 + }
3742 + }
3743 +@@ -202,10 +203,12 @@ static int rxrpc_abort_connection(struct rxrpc_connection *conn,
3744 + return 0;
3745 + }
3746 +
3747 ++ conn->error = error;
3748 ++ conn->abort_code = abort_code;
3749 + conn->state = RXRPC_CONN_LOCALLY_ABORTED;
3750 + spin_unlock_bh(&conn->state_lock);
3751 +
3752 +- rxrpc_abort_calls(conn, RXRPC_CALL_LOCALLY_ABORTED, abort_code, error);
3753 ++ rxrpc_abort_calls(conn, RXRPC_CALL_LOCALLY_ABORTED);
3754 +
3755 + msg.msg_name = &conn->params.peer->srx.transport;
3756 + msg.msg_namelen = conn->params.peer->srx.transport_len;
3757 +@@ -224,7 +227,7 @@ static int rxrpc_abort_connection(struct rxrpc_connection *conn,
3758 + whdr._rsvd = 0;
3759 + whdr.serviceId = htons(conn->service_id);
3760 +
3761 +- word = htonl(conn->local_abort);
3762 ++ word = htonl(conn->abort_code);
3763 +
3764 + iov[0].iov_base = &whdr;
3765 + iov[0].iov_len = sizeof(whdr);
3766 +@@ -235,7 +238,7 @@ static int rxrpc_abort_connection(struct rxrpc_connection *conn,
3767 +
3768 + serial = atomic_inc_return(&conn->serial);
3769 + whdr.serial = htonl(serial);
3770 +- _proto("Tx CONN ABORT %%%u { %d }", serial, conn->local_abort);
3771 ++ _proto("Tx CONN ABORT %%%u { %d }", serial, conn->abort_code);
3772 +
3773 + ret = kernel_sendmsg(conn->params.local->socket, &msg, iov, 2, len);
3774 + if (ret < 0) {
3775 +@@ -308,9 +311,10 @@ static int rxrpc_process_event(struct rxrpc_connection *conn,
3776 + abort_code = ntohl(wtmp);
3777 + _proto("Rx ABORT %%%u { ac=%d }", sp->hdr.serial, abort_code);
3778 +
3779 ++ conn->error = -ECONNABORTED;
3780 ++ conn->abort_code = abort_code;
3781 + conn->state = RXRPC_CONN_REMOTELY_ABORTED;
3782 +- rxrpc_abort_calls(conn, RXRPC_CALL_REMOTELY_ABORTED,
3783 +- abort_code, -ECONNABORTED);
3784 ++ rxrpc_abort_calls(conn, RXRPC_CALL_REMOTELY_ABORTED);
3785 + return -ECONNABORTED;
3786 +
3787 + case RXRPC_PACKET_TYPE_CHALLENGE:
3788 +diff --git a/net/rxrpc/conn_object.c b/net/rxrpc/conn_object.c
3789 +index 4c77a78a252a..e0d6d0fb7426 100644
3790 +--- a/net/rxrpc/conn_object.c
3791 ++++ b/net/rxrpc/conn_object.c
3792 +@@ -99,7 +99,7 @@ struct rxrpc_connection *rxrpc_find_connection_rcu(struct rxrpc_local *local,
3793 + k.epoch = sp->hdr.epoch;
3794 + k.cid = sp->hdr.cid & RXRPC_CIDMASK;
3795 +
3796 +- if (sp->hdr.flags & RXRPC_CLIENT_INITIATED) {
3797 ++ if (rxrpc_to_server(sp)) {
3798 + /* We need to look up service connections by the full protocol
3799 + * parameter set. We look up the peer first as an intermediate
3800 + * step and then the connection from the peer's tree.
3801 +@@ -214,7 +214,7 @@ void rxrpc_disconnect_call(struct rxrpc_call *call)
3802 + call->peer->cong_cwnd = call->cong_cwnd;
3803 +
3804 + spin_lock_bh(&conn->params.peer->lock);
3805 +- hlist_del_init(&call->error_link);
3806 ++ hlist_del_rcu(&call->error_link);
3807 + spin_unlock_bh(&conn->params.peer->lock);
3808 +
3809 + if (rxrpc_is_client_call(call))
3810 +diff --git a/net/rxrpc/input.c b/net/rxrpc/input.c
3811 +index 608d078a4981..a81240845224 100644
3812 +--- a/net/rxrpc/input.c
3813 ++++ b/net/rxrpc/input.c
3814 +@@ -216,10 +216,11 @@ static void rxrpc_send_ping(struct rxrpc_call *call, struct sk_buff *skb,
3815 + /*
3816 + * Apply a hard ACK by advancing the Tx window.
3817 + */
3818 +-static void rxrpc_rotate_tx_window(struct rxrpc_call *call, rxrpc_seq_t to,
3819 ++static bool rxrpc_rotate_tx_window(struct rxrpc_call *call, rxrpc_seq_t to,
3820 + struct rxrpc_ack_summary *summary)
3821 + {
3822 + struct sk_buff *skb, *list = NULL;
3823 ++ bool rot_last = false;
3824 + int ix;
3825 + u8 annotation;
3826 +
3827 +@@ -243,15 +244,17 @@ static void rxrpc_rotate_tx_window(struct rxrpc_call *call, rxrpc_seq_t to,
3828 + skb->next = list;
3829 + list = skb;
3830 +
3831 +- if (annotation & RXRPC_TX_ANNO_LAST)
3832 ++ if (annotation & RXRPC_TX_ANNO_LAST) {
3833 + set_bit(RXRPC_CALL_TX_LAST, &call->flags);
3834 ++ rot_last = true;
3835 ++ }
3836 + if ((annotation & RXRPC_TX_ANNO_MASK) != RXRPC_TX_ANNO_ACK)
3837 + summary->nr_rot_new_acks++;
3838 + }
3839 +
3840 + spin_unlock(&call->lock);
3841 +
3842 +- trace_rxrpc_transmit(call, (test_bit(RXRPC_CALL_TX_LAST, &call->flags) ?
3843 ++ trace_rxrpc_transmit(call, (rot_last ?
3844 + rxrpc_transmit_rotate_last :
3845 + rxrpc_transmit_rotate));
3846 + wake_up(&call->waitq);
3847 +@@ -262,6 +265,8 @@ static void rxrpc_rotate_tx_window(struct rxrpc_call *call, rxrpc_seq_t to,
3848 + skb->next = NULL;
3849 + rxrpc_free_skb(skb, rxrpc_skb_tx_freed);
3850 + }
3851 ++
3852 ++ return rot_last;
3853 + }
3854 +
3855 + /*
3856 +@@ -273,23 +278,26 @@ static void rxrpc_rotate_tx_window(struct rxrpc_call *call, rxrpc_seq_t to,
3857 + static bool rxrpc_end_tx_phase(struct rxrpc_call *call, bool reply_begun,
3858 + const char *abort_why)
3859 + {
3860 ++ unsigned int state;
3861 +
3862 + ASSERT(test_bit(RXRPC_CALL_TX_LAST, &call->flags));
3863 +
3864 + write_lock(&call->state_lock);
3865 +
3866 +- switch (call->state) {
3867 ++ state = call->state;
3868 ++ switch (state) {
3869 + case RXRPC_CALL_CLIENT_SEND_REQUEST:
3870 + case RXRPC_CALL_CLIENT_AWAIT_REPLY:
3871 + if (reply_begun)
3872 +- call->state = RXRPC_CALL_CLIENT_RECV_REPLY;
3873 ++ call->state = state = RXRPC_CALL_CLIENT_RECV_REPLY;
3874 + else
3875 +- call->state = RXRPC_CALL_CLIENT_AWAIT_REPLY;
3876 ++ call->state = state = RXRPC_CALL_CLIENT_AWAIT_REPLY;
3877 + break;
3878 +
3879 + case RXRPC_CALL_SERVER_AWAIT_ACK:
3880 + __rxrpc_call_completed(call);
3881 + rxrpc_notify_socket(call);
3882 ++ state = call->state;
3883 + break;
3884 +
3885 + default:
3886 +@@ -297,11 +305,10 @@ static bool rxrpc_end_tx_phase(struct rxrpc_call *call, bool reply_begun,
3887 + }
3888 +
3889 + write_unlock(&call->state_lock);
3890 +- if (call->state == RXRPC_CALL_CLIENT_AWAIT_REPLY) {
3891 ++ if (state == RXRPC_CALL_CLIENT_AWAIT_REPLY)
3892 + trace_rxrpc_transmit(call, rxrpc_transmit_await_reply);
3893 +- } else {
3894 ++ else
3895 + trace_rxrpc_transmit(call, rxrpc_transmit_end);
3896 +- }
3897 + _leave(" = ok");
3898 + return true;
3899 +
3900 +@@ -332,11 +339,11 @@ static bool rxrpc_receiving_reply(struct rxrpc_call *call)
3901 + trace_rxrpc_timer(call, rxrpc_timer_init_for_reply, now);
3902 + }
3903 +
3904 +- if (!test_bit(RXRPC_CALL_TX_LAST, &call->flags))
3905 +- rxrpc_rotate_tx_window(call, top, &summary);
3906 + if (!test_bit(RXRPC_CALL_TX_LAST, &call->flags)) {
3907 +- rxrpc_proto_abort("TXL", call, top);
3908 +- return false;
3909 ++ if (!rxrpc_rotate_tx_window(call, top, &summary)) {
3910 ++ rxrpc_proto_abort("TXL", call, top);
3911 ++ return false;
3912 ++ }
3913 + }
3914 + if (!rxrpc_end_tx_phase(call, true, "ETD"))
3915 + return false;
3916 +@@ -616,13 +623,14 @@ static void rxrpc_input_requested_ack(struct rxrpc_call *call,
3917 + if (!skb)
3918 + continue;
3919 +
3920 ++ sent_at = skb->tstamp;
3921 ++ smp_rmb(); /* Read timestamp before serial. */
3922 + sp = rxrpc_skb(skb);
3923 + if (sp->hdr.serial != orig_serial)
3924 + continue;
3925 +- smp_rmb();
3926 +- sent_at = skb->tstamp;
3927 + goto found;
3928 + }
3929 ++
3930 + return;
3931 +
3932 + found:
3933 +@@ -854,6 +862,16 @@ static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb,
3934 + rxrpc_propose_ack_respond_to_ack);
3935 + }
3936 +
3937 ++ /* Discard any out-of-order or duplicate ACKs. */
3938 ++ if (before_eq(sp->hdr.serial, call->acks_latest)) {
3939 ++ _debug("discard ACK %d <= %d",
3940 ++ sp->hdr.serial, call->acks_latest);
3941 ++ return;
3942 ++ }
3943 ++ call->acks_latest_ts = skb->tstamp;
3944 ++ call->acks_latest = sp->hdr.serial;
3945 ++
3946 ++ /* Parse rwind and mtu sizes if provided. */
3947 + ioffset = offset + nr_acks + 3;
3948 + if (skb->len >= ioffset + sizeof(buf.info)) {
3949 + if (skb_copy_bits(skb, ioffset, &buf.info, sizeof(buf.info)) < 0)
3950 +@@ -875,23 +893,18 @@ static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb,
3951 + return;
3952 + }
3953 +
3954 +- /* Discard any out-of-order or duplicate ACKs. */
3955 +- if (before_eq(sp->hdr.serial, call->acks_latest)) {
3956 +- _debug("discard ACK %d <= %d",
3957 +- sp->hdr.serial, call->acks_latest);
3958 +- return;
3959 +- }
3960 +- call->acks_latest_ts = skb->tstamp;
3961 +- call->acks_latest = sp->hdr.serial;
3962 +-
3963 + if (before(hard_ack, call->tx_hard_ack) ||
3964 + after(hard_ack, call->tx_top))
3965 + return rxrpc_proto_abort("AKW", call, 0);
3966 + if (nr_acks > call->tx_top - hard_ack)
3967 + return rxrpc_proto_abort("AKN", call, 0);
3968 +
3969 +- if (after(hard_ack, call->tx_hard_ack))
3970 +- rxrpc_rotate_tx_window(call, hard_ack, &summary);
3971 ++ if (after(hard_ack, call->tx_hard_ack)) {
3972 ++ if (rxrpc_rotate_tx_window(call, hard_ack, &summary)) {
3973 ++ rxrpc_end_tx_phase(call, false, "ETA");
3974 ++ return;
3975 ++ }
3976 ++ }
3977 +
3978 + if (nr_acks > 0) {
3979 + if (skb_copy_bits(skb, offset, buf.acks, nr_acks) < 0)
3980 +@@ -900,11 +913,6 @@ static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb,
3981 + &summary);
3982 + }
3983 +
3984 +- if (test_bit(RXRPC_CALL_TX_LAST, &call->flags)) {
3985 +- rxrpc_end_tx_phase(call, false, "ETA");
3986 +- return;
3987 +- }
3988 +-
3989 + if (call->rxtx_annotations[call->tx_top & RXRPC_RXTX_BUFF_MASK] &
3990 + RXRPC_TX_ANNO_LAST &&
3991 + summary.nr_acks == call->tx_top - hard_ack &&
3992 +@@ -926,8 +934,7 @@ static void rxrpc_input_ackall(struct rxrpc_call *call, struct sk_buff *skb)
3993 +
3994 + _proto("Rx ACKALL %%%u", sp->hdr.serial);
3995 +
3996 +- rxrpc_rotate_tx_window(call, call->tx_top, &summary);
3997 +- if (test_bit(RXRPC_CALL_TX_LAST, &call->flags))
3998 ++ if (rxrpc_rotate_tx_window(call, call->tx_top, &summary))
3999 + rxrpc_end_tx_phase(call, false, "ETL");
4000 + }
4001 +
4002 +@@ -1137,6 +1144,9 @@ void rxrpc_data_ready(struct sock *udp_sk)
4003 + return;
4004 + }
4005 +
4006 ++ if (skb->tstamp == 0)
4007 ++ skb->tstamp = ktime_get_real();
4008 ++
4009 + rxrpc_new_skb(skb, rxrpc_skb_rx_received);
4010 +
4011 + _net("recv skb %p", skb);
4012 +@@ -1171,10 +1181,6 @@ void rxrpc_data_ready(struct sock *udp_sk)
4013 +
4014 + trace_rxrpc_rx_packet(sp);
4015 +
4016 +- _net("Rx RxRPC %s ep=%x call=%x:%x",
4017 +- sp->hdr.flags & RXRPC_CLIENT_INITIATED ? "ToServer" : "ToClient",
4018 +- sp->hdr.epoch, sp->hdr.cid, sp->hdr.callNumber);
4019 +-
4020 + if (sp->hdr.type >= RXRPC_N_PACKET_TYPES ||
4021 + !((RXRPC_SUPPORTED_PACKET_TYPES >> sp->hdr.type) & 1)) {
4022 + _proto("Rx Bad Packet Type %u", sp->hdr.type);
4023 +@@ -1183,13 +1189,13 @@ void rxrpc_data_ready(struct sock *udp_sk)
4024 +
4025 + switch (sp->hdr.type) {
4026 + case RXRPC_PACKET_TYPE_VERSION:
4027 +- if (!(sp->hdr.flags & RXRPC_CLIENT_INITIATED))
4028 ++ if (rxrpc_to_client(sp))
4029 + goto discard;
4030 + rxrpc_post_packet_to_local(local, skb);
4031 + goto out;
4032 +
4033 + case RXRPC_PACKET_TYPE_BUSY:
4034 +- if (sp->hdr.flags & RXRPC_CLIENT_INITIATED)
4035 ++ if (rxrpc_to_server(sp))
4036 + goto discard;
4037 + /* Fall through */
4038 +
4039 +@@ -1269,7 +1275,7 @@ void rxrpc_data_ready(struct sock *udp_sk)
4040 + call = rcu_dereference(chan->call);
4041 +
4042 + if (sp->hdr.callNumber > chan->call_id) {
4043 +- if (!(sp->hdr.flags & RXRPC_CLIENT_INITIATED)) {
4044 ++ if (rxrpc_to_client(sp)) {
4045 + rcu_read_unlock();
4046 + goto reject_packet;
4047 + }
4048 +@@ -1292,7 +1298,7 @@ void rxrpc_data_ready(struct sock *udp_sk)
4049 + }
4050 +
4051 + if (!call || atomic_read(&call->usage) == 0) {
4052 +- if (!(sp->hdr.type & RXRPC_CLIENT_INITIATED) ||
4053 ++ if (rxrpc_to_client(sp) ||
4054 + sp->hdr.callNumber == 0 ||
4055 + sp->hdr.type != RXRPC_PACKET_TYPE_DATA)
4056 + goto bad_message_unlock;
4057 +diff --git a/net/rxrpc/local_object.c b/net/rxrpc/local_object.c
4058 +index b493e6b62740..386dc1f20c73 100644
4059 +--- a/net/rxrpc/local_object.c
4060 ++++ b/net/rxrpc/local_object.c
4061 +@@ -135,10 +135,10 @@ static int rxrpc_open_socket(struct rxrpc_local *local, struct net *net)
4062 + }
4063 +
4064 + switch (local->srx.transport.family) {
4065 +- case AF_INET:
4066 +- /* we want to receive ICMP errors */
4067 ++ case AF_INET6:
4068 ++ /* we want to receive ICMPv6 errors */
4069 + opt = 1;
4070 +- ret = kernel_setsockopt(local->socket, SOL_IP, IP_RECVERR,
4071 ++ ret = kernel_setsockopt(local->socket, SOL_IPV6, IPV6_RECVERR,
4072 + (char *) &opt, sizeof(opt));
4073 + if (ret < 0) {
4074 + _debug("setsockopt failed");
4075 +@@ -146,19 +146,22 @@ static int rxrpc_open_socket(struct rxrpc_local *local, struct net *net)
4076 + }
4077 +
4078 + /* we want to set the don't fragment bit */
4079 +- opt = IP_PMTUDISC_DO;
4080 +- ret = kernel_setsockopt(local->socket, SOL_IP, IP_MTU_DISCOVER,
4081 ++ opt = IPV6_PMTUDISC_DO;
4082 ++ ret = kernel_setsockopt(local->socket, SOL_IPV6, IPV6_MTU_DISCOVER,
4083 + (char *) &opt, sizeof(opt));
4084 + if (ret < 0) {
4085 + _debug("setsockopt failed");
4086 + goto error;
4087 + }
4088 +- break;
4089 +
4090 +- case AF_INET6:
4091 ++ /* Fall through and set IPv4 options too otherwise we don't get
4092 ++ * errors from IPv4 packets sent through the IPv6 socket.
4093 ++ */
4094 ++
4095 ++ case AF_INET:
4096 + /* we want to receive ICMP errors */
4097 + opt = 1;
4098 +- ret = kernel_setsockopt(local->socket, SOL_IPV6, IPV6_RECVERR,
4099 ++ ret = kernel_setsockopt(local->socket, SOL_IP, IP_RECVERR,
4100 + (char *) &opt, sizeof(opt));
4101 + if (ret < 0) {
4102 + _debug("setsockopt failed");
4103 +@@ -166,13 +169,22 @@ static int rxrpc_open_socket(struct rxrpc_local *local, struct net *net)
4104 + }
4105 +
4106 + /* we want to set the don't fragment bit */
4107 +- opt = IPV6_PMTUDISC_DO;
4108 +- ret = kernel_setsockopt(local->socket, SOL_IPV6, IPV6_MTU_DISCOVER,
4109 ++ opt = IP_PMTUDISC_DO;
4110 ++ ret = kernel_setsockopt(local->socket, SOL_IP, IP_MTU_DISCOVER,
4111 + (char *) &opt, sizeof(opt));
4112 + if (ret < 0) {
4113 + _debug("setsockopt failed");
4114 + goto error;
4115 + }
4116 ++
4117 ++ /* We want receive timestamps. */
4118 ++ opt = 1;
4119 ++ ret = kernel_setsockopt(local->socket, SOL_SOCKET, SO_TIMESTAMPNS,
4120 ++ (char *)&opt, sizeof(opt));
4121 ++ if (ret < 0) {
4122 ++ _debug("setsockopt failed");
4123 ++ goto error;
4124 ++ }
4125 + break;
4126 +
4127 + default:
4128 +diff --git a/net/rxrpc/output.c b/net/rxrpc/output.c
4129 +index 4774c8f5634d..6ac21bb2071d 100644
4130 +--- a/net/rxrpc/output.c
4131 ++++ b/net/rxrpc/output.c
4132 +@@ -124,7 +124,6 @@ int rxrpc_send_ack_packet(struct rxrpc_call *call, bool ping,
4133 + struct kvec iov[2];
4134 + rxrpc_serial_t serial;
4135 + rxrpc_seq_t hard_ack, top;
4136 +- ktime_t now;
4137 + size_t len, n;
4138 + int ret;
4139 + u8 reason;
4140 +@@ -196,9 +195,7 @@ int rxrpc_send_ack_packet(struct rxrpc_call *call, bool ping,
4141 + /* We need to stick a time in before we send the packet in case
4142 + * the reply gets back before kernel_sendmsg() completes - but
4143 + * asking UDP to send the packet can take a relatively long
4144 +- * time, so we update the time after, on the assumption that
4145 +- * the packet transmission is more likely to happen towards the
4146 +- * end of the kernel_sendmsg() call.
4147 ++ * time.
4148 + */
4149 + call->ping_time = ktime_get_real();
4150 + set_bit(RXRPC_CALL_PINGING, &call->flags);
4151 +@@ -206,9 +203,6 @@ int rxrpc_send_ack_packet(struct rxrpc_call *call, bool ping,
4152 + }
4153 +
4154 + ret = kernel_sendmsg(conn->params.local->socket, &msg, iov, 2, len);
4155 +- now = ktime_get_real();
4156 +- if (ping)
4157 +- call->ping_time = now;
4158 + conn->params.peer->last_tx_at = ktime_get_seconds();
4159 + if (ret < 0)
4160 + trace_rxrpc_tx_fail(call->debug_id, serial, ret,
4161 +@@ -357,8 +351,14 @@ int rxrpc_send_data_packet(struct rxrpc_call *call, struct sk_buff *skb,
4162 +
4163 + /* If our RTT cache needs working on, request an ACK. Also request
4164 + * ACKs if a DATA packet appears to have been lost.
4165 ++ *
4166 ++ * However, we mustn't request an ACK on the last reply packet of a
4167 ++ * service call, lest OpenAFS incorrectly send us an ACK with some
4168 ++ * soft-ACKs in it and then never follow up with a proper hard ACK.
4169 + */
4170 +- if (!(sp->hdr.flags & RXRPC_LAST_PACKET) &&
4171 ++ if ((!(sp->hdr.flags & RXRPC_LAST_PACKET) ||
4172 ++ rxrpc_to_server(sp)
4173 ++ ) &&
4174 + (test_and_clear_bit(RXRPC_CALL_EV_ACK_LOST, &call->events) ||
4175 + retrans ||
4176 + call->cong_mode == RXRPC_CALL_SLOW_START ||
4177 +@@ -384,6 +384,11 @@ int rxrpc_send_data_packet(struct rxrpc_call *call, struct sk_buff *skb,
4178 + goto send_fragmentable;
4179 +
4180 + down_read(&conn->params.local->defrag_sem);
4181 ++
4182 ++ sp->hdr.serial = serial;
4183 ++ smp_wmb(); /* Set serial before timestamp */
4184 ++ skb->tstamp = ktime_get_real();
4185 ++
4186 + /* send the packet by UDP
4187 + * - returns -EMSGSIZE if UDP would have to fragment the packet
4188 + * to go out of the interface
4189 +@@ -404,12 +409,8 @@ done:
4190 + trace_rxrpc_tx_data(call, sp->hdr.seq, serial, whdr.flags,
4191 + retrans, lost);
4192 + if (ret >= 0) {
4193 +- ktime_t now = ktime_get_real();
4194 +- skb->tstamp = now;
4195 +- smp_wmb();
4196 +- sp->hdr.serial = serial;
4197 + if (whdr.flags & RXRPC_REQUEST_ACK) {
4198 +- call->peer->rtt_last_req = now;
4199 ++ call->peer->rtt_last_req = skb->tstamp;
4200 + trace_rxrpc_rtt_tx(call, rxrpc_rtt_tx_data, serial);
4201 + if (call->peer->rtt_usage > 1) {
4202 + unsigned long nowj = jiffies, ack_lost_at;
4203 +@@ -448,6 +449,10 @@ send_fragmentable:
4204 +
4205 + down_write(&conn->params.local->defrag_sem);
4206 +
4207 ++ sp->hdr.serial = serial;
4208 ++ smp_wmb(); /* Set serial before timestamp */
4209 ++ skb->tstamp = ktime_get_real();
4210 ++
4211 + switch (conn->params.local->srx.transport.family) {
4212 + case AF_INET:
4213 + opt = IP_PMTUDISC_DONT;
4214 +diff --git a/net/rxrpc/peer_event.c b/net/rxrpc/peer_event.c
4215 +index 4f9da2f51c69..f3e6fc670da2 100644
4216 +--- a/net/rxrpc/peer_event.c
4217 ++++ b/net/rxrpc/peer_event.c
4218 +@@ -23,6 +23,8 @@
4219 + #include "ar-internal.h"
4220 +
4221 + static void rxrpc_store_error(struct rxrpc_peer *, struct sock_exterr_skb *);
4222 ++static void rxrpc_distribute_error(struct rxrpc_peer *, int,
4223 ++ enum rxrpc_call_completion);
4224 +
4225 + /*
4226 + * Find the peer associated with an ICMP packet.
4227 +@@ -194,8 +196,6 @@ void rxrpc_error_report(struct sock *sk)
4228 + rcu_read_unlock();
4229 + rxrpc_free_skb(skb, rxrpc_skb_rx_freed);
4230 +
4231 +- /* The ref we obtained is passed off to the work item */
4232 +- __rxrpc_queue_peer_error(peer);
4233 + _leave("");
4234 + }
4235 +
4236 +@@ -205,6 +205,7 @@ void rxrpc_error_report(struct sock *sk)
4237 + static void rxrpc_store_error(struct rxrpc_peer *peer,
4238 + struct sock_exterr_skb *serr)
4239 + {
4240 ++ enum rxrpc_call_completion compl = RXRPC_CALL_NETWORK_ERROR;
4241 + struct sock_extended_err *ee;
4242 + int err;
4243 +
4244 +@@ -255,7 +256,7 @@ static void rxrpc_store_error(struct rxrpc_peer *peer,
4245 + case SO_EE_ORIGIN_NONE:
4246 + case SO_EE_ORIGIN_LOCAL:
4247 + _proto("Rx Received local error { error=%d }", err);
4248 +- err += RXRPC_LOCAL_ERROR_OFFSET;
4249 ++ compl = RXRPC_CALL_LOCAL_ERROR;
4250 + break;
4251 +
4252 + case SO_EE_ORIGIN_ICMP6:
4253 +@@ -264,48 +265,23 @@ static void rxrpc_store_error(struct rxrpc_peer *peer,
4254 + break;
4255 + }
4256 +
4257 +- peer->error_report = err;
4258 ++ rxrpc_distribute_error(peer, err, compl);
4259 + }
4260 +
4261 + /*
4262 +- * Distribute an error that occurred on a peer
4263 ++ * Distribute an error that occurred on a peer.
4264 + */
4265 +-void rxrpc_peer_error_distributor(struct work_struct *work)
4266 ++static void rxrpc_distribute_error(struct rxrpc_peer *peer, int error,
4267 ++ enum rxrpc_call_completion compl)
4268 + {
4269 +- struct rxrpc_peer *peer =
4270 +- container_of(work, struct rxrpc_peer, error_distributor);
4271 + struct rxrpc_call *call;
4272 +- enum rxrpc_call_completion compl;
4273 +- int error;
4274 +-
4275 +- _enter("");
4276 +-
4277 +- error = READ_ONCE(peer->error_report);
4278 +- if (error < RXRPC_LOCAL_ERROR_OFFSET) {
4279 +- compl = RXRPC_CALL_NETWORK_ERROR;
4280 +- } else {
4281 +- compl = RXRPC_CALL_LOCAL_ERROR;
4282 +- error -= RXRPC_LOCAL_ERROR_OFFSET;
4283 +- }
4284 +
4285 +- _debug("ISSUE ERROR %s %d", rxrpc_call_completions[compl], error);
4286 +-
4287 +- spin_lock_bh(&peer->lock);
4288 +-
4289 +- while (!hlist_empty(&peer->error_targets)) {
4290 +- call = hlist_entry(peer->error_targets.first,
4291 +- struct rxrpc_call, error_link);
4292 +- hlist_del_init(&call->error_link);
4293 ++ hlist_for_each_entry_rcu(call, &peer->error_targets, error_link) {
4294 + rxrpc_see_call(call);
4295 +-
4296 +- if (rxrpc_set_call_completion(call, compl, 0, -error))
4297 ++ if (call->state < RXRPC_CALL_COMPLETE &&
4298 ++ rxrpc_set_call_completion(call, compl, 0, -error))
4299 + rxrpc_notify_socket(call);
4300 + }
4301 +-
4302 +- spin_unlock_bh(&peer->lock);
4303 +-
4304 +- rxrpc_put_peer(peer);
4305 +- _leave("");
4306 + }
4307 +
4308 + /*
4309 +diff --git a/net/rxrpc/peer_object.c b/net/rxrpc/peer_object.c
4310 +index 24ec7cdcf332..ef4c2e8a35cc 100644
4311 +--- a/net/rxrpc/peer_object.c
4312 ++++ b/net/rxrpc/peer_object.c
4313 +@@ -222,8 +222,6 @@ struct rxrpc_peer *rxrpc_alloc_peer(struct rxrpc_local *local, gfp_t gfp)
4314 + atomic_set(&peer->usage, 1);
4315 + peer->local = local;
4316 + INIT_HLIST_HEAD(&peer->error_targets);
4317 +- INIT_WORK(&peer->error_distributor,
4318 +- &rxrpc_peer_error_distributor);
4319 + peer->service_conns = RB_ROOT;
4320 + seqlock_init(&peer->service_conn_lock);
4321 + spin_lock_init(&peer->lock);
4322 +@@ -415,21 +413,6 @@ struct rxrpc_peer *rxrpc_get_peer_maybe(struct rxrpc_peer *peer)
4323 + return peer;
4324 + }
4325 +
4326 +-/*
4327 +- * Queue a peer record. This passes the caller's ref to the workqueue.
4328 +- */
4329 +-void __rxrpc_queue_peer_error(struct rxrpc_peer *peer)
4330 +-{
4331 +- const void *here = __builtin_return_address(0);
4332 +- int n;
4333 +-
4334 +- n = atomic_read(&peer->usage);
4335 +- if (rxrpc_queue_work(&peer->error_distributor))
4336 +- trace_rxrpc_peer(peer, rxrpc_peer_queued_error, n, here);
4337 +- else
4338 +- rxrpc_put_peer(peer);
4339 +-}
4340 +-
4341 + /*
4342 + * Discard a peer record.
4343 + */
4344 +diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c
4345 +index f74513a7c7a8..c855fd045a3c 100644
4346 +--- a/net/sched/cls_api.c
4347 ++++ b/net/sched/cls_api.c
4348 +@@ -31,6 +31,8 @@
4349 + #include <net/pkt_sched.h>
4350 + #include <net/pkt_cls.h>
4351 +
4352 ++extern const struct nla_policy rtm_tca_policy[TCA_MAX + 1];
4353 ++
4354 + /* The list of all installed classifier types */
4355 + static LIST_HEAD(tcf_proto_base);
4356 +
4357 +@@ -1083,7 +1085,7 @@ static int tc_new_tfilter(struct sk_buff *skb, struct nlmsghdr *n,
4358 + replay:
4359 + tp_created = 0;
4360 +
4361 +- err = nlmsg_parse(n, sizeof(*t), tca, TCA_MAX, NULL, extack);
4362 ++ err = nlmsg_parse(n, sizeof(*t), tca, TCA_MAX, rtm_tca_policy, extack);
4363 + if (err < 0)
4364 + return err;
4365 +
4366 +@@ -1226,7 +1228,7 @@ static int tc_del_tfilter(struct sk_buff *skb, struct nlmsghdr *n,
4367 + if (!netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN))
4368 + return -EPERM;
4369 +
4370 +- err = nlmsg_parse(n, sizeof(*t), tca, TCA_MAX, NULL, extack);
4371 ++ err = nlmsg_parse(n, sizeof(*t), tca, TCA_MAX, rtm_tca_policy, extack);
4372 + if (err < 0)
4373 + return err;
4374 +
4375 +@@ -1334,7 +1336,7 @@ static int tc_get_tfilter(struct sk_buff *skb, struct nlmsghdr *n,
4376 + void *fh = NULL;
4377 + int err;
4378 +
4379 +- err = nlmsg_parse(n, sizeof(*t), tca, TCA_MAX, NULL, extack);
4380 ++ err = nlmsg_parse(n, sizeof(*t), tca, TCA_MAX, rtm_tca_policy, extack);
4381 + if (err < 0)
4382 + return err;
4383 +
4384 +@@ -1488,7 +1490,8 @@ static int tc_dump_tfilter(struct sk_buff *skb, struct netlink_callback *cb)
4385 + if (nlmsg_len(cb->nlh) < sizeof(*tcm))
4386 + return skb->len;
4387 +
4388 +- err = nlmsg_parse(cb->nlh, sizeof(*tcm), tca, TCA_MAX, NULL, NULL);
4389 ++ err = nlmsg_parse(cb->nlh, sizeof(*tcm), tca, TCA_MAX, rtm_tca_policy,
4390 ++ NULL);
4391 + if (err)
4392 + return err;
4393 +
4394 +diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
4395 +index 99cc25aae503..57f71765febe 100644
4396 +--- a/net/sched/sch_api.c
4397 ++++ b/net/sched/sch_api.c
4398 +@@ -2052,7 +2052,8 @@ static int tc_dump_tclass_root(struct Qdisc *root, struct sk_buff *skb,
4399 +
4400 + if (tcm->tcm_parent) {
4401 + q = qdisc_match_from_root(root, TC_H_MAJ(tcm->tcm_parent));
4402 +- if (q && tc_dump_tclass_qdisc(q, skb, tcm, cb, t_p, s_t) < 0)
4403 ++ if (q && q != root &&
4404 ++ tc_dump_tclass_qdisc(q, skb, tcm, cb, t_p, s_t) < 0)
4405 + return -1;
4406 + return 0;
4407 + }
4408 +diff --git a/net/sched/sch_gred.c b/net/sched/sch_gred.c
4409 +index cbe4831f46f4..4a042abf844c 100644
4410 +--- a/net/sched/sch_gred.c
4411 ++++ b/net/sched/sch_gred.c
4412 +@@ -413,7 +413,7 @@ static int gred_change(struct Qdisc *sch, struct nlattr *opt,
4413 + if (tb[TCA_GRED_PARMS] == NULL && tb[TCA_GRED_STAB] == NULL) {
4414 + if (tb[TCA_GRED_LIMIT] != NULL)
4415 + sch->limit = nla_get_u32(tb[TCA_GRED_LIMIT]);
4416 +- return gred_change_table_def(sch, opt);
4417 ++ return gred_change_table_def(sch, tb[TCA_GRED_DPS]);
4418 + }
4419 +
4420 + if (tb[TCA_GRED_PARMS] == NULL ||
4421 +diff --git a/net/sctp/socket.c b/net/sctp/socket.c
4422 +index 50ee07cd20c4..9d903b870790 100644
4423 +--- a/net/sctp/socket.c
4424 ++++ b/net/sctp/socket.c
4425 +@@ -270,11 +270,10 @@ struct sctp_association *sctp_id2assoc(struct sock *sk, sctp_assoc_t id)
4426 +
4427 + spin_lock_bh(&sctp_assocs_id_lock);
4428 + asoc = (struct sctp_association *)idr_find(&sctp_assocs_id, (int)id);
4429 ++ if (asoc && (asoc->base.sk != sk || asoc->base.dead))
4430 ++ asoc = NULL;
4431 + spin_unlock_bh(&sctp_assocs_id_lock);
4432 +
4433 +- if (!asoc || (asoc->base.sk != sk) || asoc->base.dead)
4434 +- return NULL;
4435 +-
4436 + return asoc;
4437 + }
4438 +
4439 +@@ -1940,8 +1939,10 @@ static int sctp_sendmsg_to_asoc(struct sctp_association *asoc,
4440 + if (sp->strm_interleave) {
4441 + timeo = sock_sndtimeo(sk, 0);
4442 + err = sctp_wait_for_connect(asoc, &timeo);
4443 +- if (err)
4444 ++ if (err) {
4445 ++ err = -ESRCH;
4446 + goto err;
4447 ++ }
4448 + } else {
4449 + wait_connect = true;
4450 + }
4451 +diff --git a/net/smc/smc_core.c b/net/smc/smc_core.c
4452 +index add82b0266f3..3be95f77ec7f 100644
4453 +--- a/net/smc/smc_core.c
4454 ++++ b/net/smc/smc_core.c
4455 +@@ -114,22 +114,17 @@ static void __smc_lgr_unregister_conn(struct smc_connection *conn)
4456 + sock_put(&smc->sk); /* sock_hold in smc_lgr_register_conn() */
4457 + }
4458 +
4459 +-/* Unregister connection and trigger lgr freeing if applicable
4460 ++/* Unregister connection from lgr
4461 + */
4462 + static void smc_lgr_unregister_conn(struct smc_connection *conn)
4463 + {
4464 + struct smc_link_group *lgr = conn->lgr;
4465 +- int reduced = 0;
4466 +
4467 + write_lock_bh(&lgr->conns_lock);
4468 + if (conn->alert_token_local) {
4469 +- reduced = 1;
4470 + __smc_lgr_unregister_conn(conn);
4471 + }
4472 + write_unlock_bh(&lgr->conns_lock);
4473 +- if (!reduced || lgr->conns_num)
4474 +- return;
4475 +- smc_lgr_schedule_free_work(lgr);
4476 + }
4477 +
4478 + static void smc_lgr_free_work(struct work_struct *work)
4479 +@@ -238,7 +233,8 @@ out:
4480 + return rc;
4481 + }
4482 +
4483 +-static void smc_buf_unuse(struct smc_connection *conn)
4484 ++static void smc_buf_unuse(struct smc_connection *conn,
4485 ++ struct smc_link_group *lgr)
4486 + {
4487 + if (conn->sndbuf_desc)
4488 + conn->sndbuf_desc->used = 0;
4489 +@@ -248,8 +244,6 @@ static void smc_buf_unuse(struct smc_connection *conn)
4490 + conn->rmb_desc->used = 0;
4491 + } else {
4492 + /* buf registration failed, reuse not possible */
4493 +- struct smc_link_group *lgr = conn->lgr;
4494 +-
4495 + write_lock_bh(&lgr->rmbs_lock);
4496 + list_del(&conn->rmb_desc->list);
4497 + write_unlock_bh(&lgr->rmbs_lock);
4498 +@@ -262,11 +256,16 @@ static void smc_buf_unuse(struct smc_connection *conn)
4499 + /* remove a finished connection from its link group */
4500 + void smc_conn_free(struct smc_connection *conn)
4501 + {
4502 +- if (!conn->lgr)
4503 ++ struct smc_link_group *lgr = conn->lgr;
4504 ++
4505 ++ if (!lgr)
4506 + return;
4507 + smc_cdc_tx_dismiss_slots(conn);
4508 +- smc_lgr_unregister_conn(conn);
4509 +- smc_buf_unuse(conn);
4510 ++ smc_lgr_unregister_conn(conn); /* unsets conn->lgr */
4511 ++ smc_buf_unuse(conn, lgr); /* allow buffer reuse */
4512 ++
4513 ++ if (!lgr->conns_num)
4514 ++ smc_lgr_schedule_free_work(lgr);
4515 + }
4516 +
4517 + static void smc_link_clear(struct smc_link *lnk)
4518 +diff --git a/net/socket.c b/net/socket.c
4519 +index d4187ac17d55..fcb18a7ed14b 100644
4520 +--- a/net/socket.c
4521 ++++ b/net/socket.c
4522 +@@ -2887,9 +2887,14 @@ static int ethtool_ioctl(struct net *net, struct compat_ifreq __user *ifr32)
4523 + copy_in_user(&rxnfc->fs.ring_cookie,
4524 + &compat_rxnfc->fs.ring_cookie,
4525 + (void __user *)(&rxnfc->fs.location + 1) -
4526 +- (void __user *)&rxnfc->fs.ring_cookie) ||
4527 +- copy_in_user(&rxnfc->rule_cnt, &compat_rxnfc->rule_cnt,
4528 +- sizeof(rxnfc->rule_cnt)))
4529 ++ (void __user *)&rxnfc->fs.ring_cookie))
4530 ++ return -EFAULT;
4531 ++ if (ethcmd == ETHTOOL_GRXCLSRLALL) {
4532 ++ if (put_user(rule_cnt, &rxnfc->rule_cnt))
4533 ++ return -EFAULT;
4534 ++ } else if (copy_in_user(&rxnfc->rule_cnt,
4535 ++ &compat_rxnfc->rule_cnt,
4536 ++ sizeof(rxnfc->rule_cnt)))
4537 + return -EFAULT;
4538 + }
4539 +
4540 +diff --git a/net/tipc/name_distr.c b/net/tipc/name_distr.c
4541 +index 51b4b96f89db..3cfeb9df64b0 100644
4542 +--- a/net/tipc/name_distr.c
4543 ++++ b/net/tipc/name_distr.c
4544 +@@ -115,7 +115,7 @@ struct sk_buff *tipc_named_withdraw(struct net *net, struct publication *publ)
4545 + struct sk_buff *buf;
4546 + struct distr_item *item;
4547 +
4548 +- list_del(&publ->binding_node);
4549 ++ list_del_rcu(&publ->binding_node);
4550 +
4551 + if (publ->scope == TIPC_NODE_SCOPE)
4552 + return NULL;
4553 +@@ -147,7 +147,7 @@ static void named_distribute(struct net *net, struct sk_buff_head *list,
4554 + ITEM_SIZE) * ITEM_SIZE;
4555 + u32 msg_rem = msg_dsz;
4556 +
4557 +- list_for_each_entry(publ, pls, binding_node) {
4558 ++ list_for_each_entry_rcu(publ, pls, binding_node) {
4559 + /* Prepare next buffer: */
4560 + if (!skb) {
4561 + skb = named_prepare_buf(net, PUBLICATION, msg_rem,
4562 +diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c
4563 +index 9fab8e5a4a5b..994ddc7ec9b1 100644
4564 +--- a/net/tls/tls_sw.c
4565 ++++ b/net/tls/tls_sw.c
4566 +@@ -286,7 +286,7 @@ static int zerocopy_from_iter(struct sock *sk, struct iov_iter *from,
4567 + int length, int *pages_used,
4568 + unsigned int *size_used,
4569 + struct scatterlist *to, int to_max_pages,
4570 +- bool charge, bool revert)
4571 ++ bool charge)
4572 + {
4573 + struct page *pages[MAX_SKB_FRAGS];
4574 +
4575 +@@ -335,10 +335,10 @@ static int zerocopy_from_iter(struct sock *sk, struct iov_iter *from,
4576 + }
4577 +
4578 + out:
4579 ++ if (rc)
4580 ++ iov_iter_revert(from, size - *size_used);
4581 + *size_used = size;
4582 + *pages_used = num_elem;
4583 +- if (revert)
4584 +- iov_iter_revert(from, size);
4585 +
4586 + return rc;
4587 + }
4588 +@@ -440,7 +440,7 @@ alloc_encrypted:
4589 + &ctx->sg_plaintext_size,
4590 + ctx->sg_plaintext_data,
4591 + ARRAY_SIZE(ctx->sg_plaintext_data),
4592 +- true, false);
4593 ++ true);
4594 + if (ret)
4595 + goto fallback_to_reg_send;
4596 +
4597 +@@ -453,8 +453,6 @@ alloc_encrypted:
4598 +
4599 + copied -= try_to_copy;
4600 + fallback_to_reg_send:
4601 +- iov_iter_revert(&msg->msg_iter,
4602 +- ctx->sg_plaintext_size - orig_size);
4603 + trim_sg(sk, ctx->sg_plaintext_data,
4604 + &ctx->sg_plaintext_num_elem,
4605 + &ctx->sg_plaintext_size,
4606 +@@ -828,7 +826,7 @@ int tls_sw_recvmsg(struct sock *sk,
4607 + err = zerocopy_from_iter(sk, &msg->msg_iter,
4608 + to_copy, &pages,
4609 + &chunk, &sgin[1],
4610 +- MAX_SKB_FRAGS, false, true);
4611 ++ MAX_SKB_FRAGS, false);
4612 + if (err < 0)
4613 + goto fallback_to_reg_recv;
4614 +
4615 +diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
4616 +index 733ccf867972..214f9ef79a64 100644
4617 +--- a/net/wireless/nl80211.c
4618 ++++ b/net/wireless/nl80211.c
4619 +@@ -3699,6 +3699,7 @@ static bool ht_rateset_to_mask(struct ieee80211_supported_band *sband,
4620 + return false;
4621 +
4622 + /* check availability */
4623 ++ ridx = array_index_nospec(ridx, IEEE80211_HT_MCS_MASK_LEN);
4624 + if (sband->ht_cap.mcs.rx_mask[ridx] & rbit)
4625 + mcs[ridx] |= rbit;
4626 + else
4627 +@@ -10124,7 +10125,7 @@ static int cfg80211_cqm_rssi_update(struct cfg80211_registered_device *rdev,
4628 + struct wireless_dev *wdev = dev->ieee80211_ptr;
4629 + s32 last, low, high;
4630 + u32 hyst;
4631 +- int i, n;
4632 ++ int i, n, low_index;
4633 + int err;
4634 +
4635 + /* RSSI reporting disabled? */
4636 +@@ -10161,10 +10162,19 @@ static int cfg80211_cqm_rssi_update(struct cfg80211_registered_device *rdev,
4637 + if (last < wdev->cqm_config->rssi_thresholds[i])
4638 + break;
4639 +
4640 +- low = i > 0 ?
4641 +- (wdev->cqm_config->rssi_thresholds[i - 1] - hyst) : S32_MIN;
4642 +- high = i < n ?
4643 +- (wdev->cqm_config->rssi_thresholds[i] + hyst - 1) : S32_MAX;
4644 ++ low_index = i - 1;
4645 ++ if (low_index >= 0) {
4646 ++ low_index = array_index_nospec(low_index, n);
4647 ++ low = wdev->cqm_config->rssi_thresholds[low_index] - hyst;
4648 ++ } else {
4649 ++ low = S32_MIN;
4650 ++ }
4651 ++ if (i < n) {
4652 ++ i = array_index_nospec(i, n);
4653 ++ high = wdev->cqm_config->rssi_thresholds[i] + hyst - 1;
4654 ++ } else {
4655 ++ high = S32_MAX;
4656 ++ }
4657 +
4658 + return rdev_set_cqm_rssi_range_config(rdev, dev, low, high);
4659 + }
4660 +diff --git a/net/wireless/reg.c b/net/wireless/reg.c
4661 +index 2f702adf2912..24cfa2776f50 100644
4662 +--- a/net/wireless/reg.c
4663 ++++ b/net/wireless/reg.c
4664 +@@ -2661,11 +2661,12 @@ static void reg_process_hint(struct regulatory_request *reg_request)
4665 + {
4666 + struct wiphy *wiphy = NULL;
4667 + enum reg_request_treatment treatment;
4668 ++ enum nl80211_reg_initiator initiator = reg_request->initiator;
4669 +
4670 + if (reg_request->wiphy_idx != WIPHY_IDX_INVALID)
4671 + wiphy = wiphy_idx_to_wiphy(reg_request->wiphy_idx);
4672 +
4673 +- switch (reg_request->initiator) {
4674 ++ switch (initiator) {
4675 + case NL80211_REGDOM_SET_BY_CORE:
4676 + treatment = reg_process_hint_core(reg_request);
4677 + break;
4678 +@@ -2683,7 +2684,7 @@ static void reg_process_hint(struct regulatory_request *reg_request)
4679 + treatment = reg_process_hint_country_ie(wiphy, reg_request);
4680 + break;
4681 + default:
4682 +- WARN(1, "invalid initiator %d\n", reg_request->initiator);
4683 ++ WARN(1, "invalid initiator %d\n", initiator);
4684 + goto out_free;
4685 + }
4686 +
4687 +@@ -2698,7 +2699,7 @@ static void reg_process_hint(struct regulatory_request *reg_request)
4688 + */
4689 + if (treatment == REG_REQ_ALREADY_SET && wiphy &&
4690 + wiphy->regulatory_flags & REGULATORY_STRICT_REG) {
4691 +- wiphy_update_regulatory(wiphy, reg_request->initiator);
4692 ++ wiphy_update_regulatory(wiphy, initiator);
4693 + wiphy_all_share_dfs_chan_state(wiphy);
4694 + reg_check_channels();
4695 + }
4696 +@@ -2867,6 +2868,7 @@ static int regulatory_hint_core(const char *alpha2)
4697 + request->alpha2[0] = alpha2[0];
4698 + request->alpha2[1] = alpha2[1];
4699 + request->initiator = NL80211_REGDOM_SET_BY_CORE;
4700 ++ request->wiphy_idx = WIPHY_IDX_INVALID;
4701 +
4702 + queue_regulatory_request(request);
4703 +
4704 +diff --git a/net/wireless/scan.c b/net/wireless/scan.c
4705 +index d36c3eb7b931..d0e7472dd9fd 100644
4706 +--- a/net/wireless/scan.c
4707 ++++ b/net/wireless/scan.c
4708 +@@ -1058,13 +1058,23 @@ cfg80211_bss_update(struct cfg80211_registered_device *rdev,
4709 + return NULL;
4710 + }
4711 +
4712 ++/*
4713 ++ * Update RX channel information based on the available frame payload
4714 ++ * information. This is mainly for the 2.4 GHz band where frames can be received
4715 ++ * from neighboring channels and the Beacon frames use the DSSS Parameter Set
4716 ++ * element to indicate the current (transmitting) channel, but this might also
4717 ++ * be needed on other bands if RX frequency does not match with the actual
4718 ++ * operating channel of a BSS.
4719 ++ */
4720 + static struct ieee80211_channel *
4721 + cfg80211_get_bss_channel(struct wiphy *wiphy, const u8 *ie, size_t ielen,
4722 +- struct ieee80211_channel *channel)
4723 ++ struct ieee80211_channel *channel,
4724 ++ enum nl80211_bss_scan_width scan_width)
4725 + {
4726 + const u8 *tmp;
4727 + u32 freq;
4728 + int channel_number = -1;
4729 ++ struct ieee80211_channel *alt_channel;
4730 +
4731 + tmp = cfg80211_find_ie(WLAN_EID_DS_PARAMS, ie, ielen);
4732 + if (tmp && tmp[1] == 1) {
4733 +@@ -1078,16 +1088,45 @@ cfg80211_get_bss_channel(struct wiphy *wiphy, const u8 *ie, size_t ielen,
4734 + }
4735 + }
4736 +
4737 +- if (channel_number < 0)
4738 ++ if (channel_number < 0) {
4739 ++ /* No channel information in frame payload */
4740 + return channel;
4741 ++ }
4742 +
4743 + freq = ieee80211_channel_to_frequency(channel_number, channel->band);
4744 +- channel = ieee80211_get_channel(wiphy, freq);
4745 +- if (!channel)
4746 +- return NULL;
4747 +- if (channel->flags & IEEE80211_CHAN_DISABLED)
4748 ++ alt_channel = ieee80211_get_channel(wiphy, freq);
4749 ++ if (!alt_channel) {
4750 ++ if (channel->band == NL80211_BAND_2GHZ) {
4751 ++ /*
4752 ++ * Better not allow unexpected channels when that could
4753 ++ * be going beyond the 1-11 range (e.g., discovering
4754 ++ * BSS on channel 12 when radio is configured for
4755 ++ * channel 11.
4756 ++ */
4757 ++ return NULL;
4758 ++ }
4759 ++
4760 ++ /* No match for the payload channel number - ignore it */
4761 ++ return channel;
4762 ++ }
4763 ++
4764 ++ if (scan_width == NL80211_BSS_CHAN_WIDTH_10 ||
4765 ++ scan_width == NL80211_BSS_CHAN_WIDTH_5) {
4766 ++ /*
4767 ++ * Ignore channel number in 5 and 10 MHz channels where there
4768 ++ * may not be an n:1 or 1:n mapping between frequencies and
4769 ++ * channel numbers.
4770 ++ */
4771 ++ return channel;
4772 ++ }
4773 ++
4774 ++ /*
4775 ++ * Use the channel determined through the payload channel number
4776 ++ * instead of the RX channel reported by the driver.
4777 ++ */
4778 ++ if (alt_channel->flags & IEEE80211_CHAN_DISABLED)
4779 + return NULL;
4780 +- return channel;
4781 ++ return alt_channel;
4782 + }
4783 +
4784 + /* Returned bss is reference counted and must be cleaned up appropriately. */
4785 +@@ -1112,7 +1151,8 @@ cfg80211_inform_bss_data(struct wiphy *wiphy,
4786 + (data->signal < 0 || data->signal > 100)))
4787 + return NULL;
4788 +
4789 +- channel = cfg80211_get_bss_channel(wiphy, ie, ielen, data->chan);
4790 ++ channel = cfg80211_get_bss_channel(wiphy, ie, ielen, data->chan,
4791 ++ data->scan_width);
4792 + if (!channel)
4793 + return NULL;
4794 +
4795 +@@ -1210,7 +1250,7 @@ cfg80211_inform_bss_frame_data(struct wiphy *wiphy,
4796 + return NULL;
4797 +
4798 + channel = cfg80211_get_bss_channel(wiphy, mgmt->u.beacon.variable,
4799 +- ielen, data->chan);
4800 ++ ielen, data->chan, data->scan_width);
4801 + if (!channel)
4802 + return NULL;
4803 +
4804 +diff --git a/net/xfrm/xfrm_input.c b/net/xfrm/xfrm_input.c
4805 +index 352abca2605f..86f5afbd0a0c 100644
4806 +--- a/net/xfrm/xfrm_input.c
4807 ++++ b/net/xfrm/xfrm_input.c
4808 +@@ -453,6 +453,7 @@ resume:
4809 + XFRM_INC_STATS(net, LINUX_MIB_XFRMINHDRERROR);
4810 + goto drop;
4811 + }
4812 ++ crypto_done = false;
4813 + } while (!err);
4814 +
4815 + err = xfrm_rcv_cb(skb, family, x->type->proto, 0);
4816 +diff --git a/net/xfrm/xfrm_output.c b/net/xfrm/xfrm_output.c
4817 +index 89b178a78dc7..36d15a38ce5e 100644
4818 +--- a/net/xfrm/xfrm_output.c
4819 ++++ b/net/xfrm/xfrm_output.c
4820 +@@ -101,6 +101,10 @@ static int xfrm_output_one(struct sk_buff *skb, int err)
4821 + spin_unlock_bh(&x->lock);
4822 +
4823 + skb_dst_force(skb);
4824 ++ if (!skb_dst(skb)) {
4825 ++ XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTERROR);
4826 ++ goto error_nolock;
4827 ++ }
4828 +
4829 + if (xfrm_offload(skb)) {
4830 + x->type_offload->encap(x, skb);
4831 +diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
4832 +index a94983e03a8b..526e6814ed4b 100644
4833 +--- a/net/xfrm/xfrm_policy.c
4834 ++++ b/net/xfrm/xfrm_policy.c
4835 +@@ -2551,6 +2551,10 @@ int __xfrm_route_forward(struct sk_buff *skb, unsigned short family)
4836 + }
4837 +
4838 + skb_dst_force(skb);
4839 ++ if (!skb_dst(skb)) {
4840 ++ XFRM_INC_STATS(net, LINUX_MIB_XFRMFWDHDRERROR);
4841 ++ return 0;
4842 ++ }
4843 +
4844 + dst = xfrm_lookup(net, skb_dst(skb), &fl, NULL, XFRM_LOOKUP_QUEUE);
4845 + if (IS_ERR(dst)) {
4846 +diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
4847 +index 33878e6e0d0a..d0672c400c2f 100644
4848 +--- a/net/xfrm/xfrm_user.c
4849 ++++ b/net/xfrm/xfrm_user.c
4850 +@@ -151,10 +151,16 @@ static int verify_newsa_info(struct xfrm_usersa_info *p,
4851 + err = -EINVAL;
4852 + switch (p->family) {
4853 + case AF_INET:
4854 ++ if (p->sel.prefixlen_d > 32 || p->sel.prefixlen_s > 32)
4855 ++ goto out;
4856 ++
4857 + break;
4858 +
4859 + case AF_INET6:
4860 + #if IS_ENABLED(CONFIG_IPV6)
4861 ++ if (p->sel.prefixlen_d > 128 || p->sel.prefixlen_s > 128)
4862 ++ goto out;
4863 ++
4864 + break;
4865 + #else
4866 + err = -EAFNOSUPPORT;
4867 +@@ -1359,10 +1365,16 @@ static int verify_newpolicy_info(struct xfrm_userpolicy_info *p)
4868 +
4869 + switch (p->sel.family) {
4870 + case AF_INET:
4871 ++ if (p->sel.prefixlen_d > 32 || p->sel.prefixlen_s > 32)
4872 ++ return -EINVAL;
4873 ++
4874 + break;
4875 +
4876 + case AF_INET6:
4877 + #if IS_ENABLED(CONFIG_IPV6)
4878 ++ if (p->sel.prefixlen_d > 128 || p->sel.prefixlen_s > 128)
4879 ++ return -EINVAL;
4880 ++
4881 + break;
4882 + #else
4883 + return -EAFNOSUPPORT;
4884 +@@ -1443,6 +1455,9 @@ static int validate_tmpl(int nr, struct xfrm_user_tmpl *ut, u16 family)
4885 + (ut[i].family != prev_family))
4886 + return -EINVAL;
4887 +
4888 ++ if (ut[i].mode >= XFRM_MODE_MAX)
4889 ++ return -EINVAL;
4890 ++
4891 + prev_family = ut[i].family;
4892 +
4893 + switch (ut[i].family) {
4894 +diff --git a/tools/perf/Makefile b/tools/perf/Makefile
4895 +index 225454416ed5..7902a5681fc8 100644
4896 +--- a/tools/perf/Makefile
4897 ++++ b/tools/perf/Makefile
4898 +@@ -84,10 +84,10 @@ endif # has_clean
4899 + endif # MAKECMDGOALS
4900 +
4901 + #
4902 +-# The clean target is not really parallel, don't print the jobs info:
4903 ++# Explicitly disable parallelism for the clean target.
4904 + #
4905 + clean:
4906 +- $(make)
4907 ++ $(make) -j1
4908 +
4909 + #
4910 + # The build-test target is not really parallel, don't print the jobs info,
4911 +diff --git a/tools/perf/util/machine.c b/tools/perf/util/machine.c
4912 +index 22dbb6612b41..b70cce40ca97 100644
4913 +--- a/tools/perf/util/machine.c
4914 ++++ b/tools/perf/util/machine.c
4915 +@@ -2246,7 +2246,8 @@ static int append_inlines(struct callchain_cursor *cursor,
4916 + if (!symbol_conf.inline_name || !map || !sym)
4917 + return ret;
4918 +
4919 +- addr = map__rip_2objdump(map, ip);
4920 ++ addr = map__map_ip(map, ip);
4921 ++ addr = map__rip_2objdump(map, addr);
4922 +
4923 + inline_node = inlines__tree_find(&map->dso->inlined_nodes, addr);
4924 + if (!inline_node) {
4925 +@@ -2272,7 +2273,7 @@ static int unwind_entry(struct unwind_entry *entry, void *arg)
4926 + {
4927 + struct callchain_cursor *cursor = arg;
4928 + const char *srcline = NULL;
4929 +- u64 addr;
4930 ++ u64 addr = entry->ip;
4931 +
4932 + if (symbol_conf.hide_unresolved && entry->sym == NULL)
4933 + return 0;
4934 +@@ -2284,7 +2285,8 @@ static int unwind_entry(struct unwind_entry *entry, void *arg)
4935 + * Convert entry->ip from a virtual address to an offset in
4936 + * its corresponding binary.
4937 + */
4938 +- addr = map__map_ip(entry->map, entry->ip);
4939 ++ if (entry->map)
4940 ++ addr = map__map_ip(entry->map, entry->ip);
4941 +
4942 + srcline = callchain_srcline(entry->map, entry->sym, addr);
4943 + return callchain_cursor_append(cursor, entry->ip,
4944 +diff --git a/tools/perf/util/setup.py b/tools/perf/util/setup.py
4945 +index 001be4f9d3b9..a5f9e236cc71 100644
4946 +--- a/tools/perf/util/setup.py
4947 ++++ b/tools/perf/util/setup.py
4948 +@@ -27,7 +27,7 @@ class install_lib(_install_lib):
4949 +
4950 + cflags = getenv('CFLAGS', '').split()
4951 + # switch off several checks (need to be at the end of cflags list)
4952 +-cflags += ['-fno-strict-aliasing', '-Wno-write-strings', '-Wno-unused-parameter' ]
4953 ++cflags += ['-fno-strict-aliasing', '-Wno-write-strings', '-Wno-unused-parameter', '-Wno-redundant-decls' ]
4954 + if cc != "clang":
4955 + cflags += ['-Wno-cast-function-type' ]
4956 +
4957 +diff --git a/tools/testing/selftests/net/fib-onlink-tests.sh b/tools/testing/selftests/net/fib-onlink-tests.sh
4958 +index 3991ad1a368d..864f865eee55 100755
4959 +--- a/tools/testing/selftests/net/fib-onlink-tests.sh
4960 ++++ b/tools/testing/selftests/net/fib-onlink-tests.sh
4961 +@@ -167,8 +167,8 @@ setup()
4962 + # add vrf table
4963 + ip li add ${VRF} type vrf table ${VRF_TABLE}
4964 + ip li set ${VRF} up
4965 +- ip ro add table ${VRF_TABLE} unreachable default
4966 +- ip -6 ro add table ${VRF_TABLE} unreachable default
4967 ++ ip ro add table ${VRF_TABLE} unreachable default metric 8192
4968 ++ ip -6 ro add table ${VRF_TABLE} unreachable default metric 8192
4969 +
4970 + # create test interfaces
4971 + ip li add ${NETIFS[p1]} type veth peer name ${NETIFS[p2]}
4972 +@@ -185,20 +185,20 @@ setup()
4973 + for n in 1 3 5 7; do
4974 + ip li set ${NETIFS[p${n}]} up
4975 + ip addr add ${V4ADDRS[p${n}]}/24 dev ${NETIFS[p${n}]}
4976 +- ip addr add ${V6ADDRS[p${n}]}/64 dev ${NETIFS[p${n}]}
4977 ++ ip addr add ${V6ADDRS[p${n}]}/64 dev ${NETIFS[p${n}]} nodad
4978 + done
4979 +
4980 + # move peer interfaces to namespace and add addresses
4981 + for n in 2 4 6 8; do
4982 + ip li set ${NETIFS[p${n}]} netns ${PEER_NS} up
4983 + ip -netns ${PEER_NS} addr add ${V4ADDRS[p${n}]}/24 dev ${NETIFS[p${n}]}
4984 +- ip -netns ${PEER_NS} addr add ${V6ADDRS[p${n}]}/64 dev ${NETIFS[p${n}]}
4985 ++ ip -netns ${PEER_NS} addr add ${V6ADDRS[p${n}]}/64 dev ${NETIFS[p${n}]} nodad
4986 + done
4987 +
4988 +- set +e
4989 ++ ip -6 ro add default via ${V6ADDRS[p3]/::[0-9]/::64}
4990 ++ ip -6 ro add table ${VRF_TABLE} default via ${V6ADDRS[p7]/::[0-9]/::64}
4991 +
4992 +- # let DAD complete - assume default of 1 probe
4993 +- sleep 1
4994 ++ set +e
4995 + }
4996 +
4997 + cleanup()
4998 +diff --git a/tools/testing/selftests/net/rtnetlink.sh b/tools/testing/selftests/net/rtnetlink.sh
4999 +index 0d7a44fa30af..8e509cbcb209 100755
5000 +--- a/tools/testing/selftests/net/rtnetlink.sh
5001 ++++ b/tools/testing/selftests/net/rtnetlink.sh
5002 +@@ -1,4 +1,4 @@
5003 +-#!/bin/sh
5004 ++#!/bin/bash
5005 + #
5006 + # This test is for checking rtnetlink callpaths, and get as much coverage as possible.
5007 + #
5008 +diff --git a/tools/testing/selftests/net/udpgso_bench.sh b/tools/testing/selftests/net/udpgso_bench.sh
5009 +index 850767befa47..99e537ab5ad9 100755
5010 +--- a/tools/testing/selftests/net/udpgso_bench.sh
5011 ++++ b/tools/testing/selftests/net/udpgso_bench.sh
5012 +@@ -1,4 +1,4 @@
5013 +-#!/bin/sh
5014 ++#!/bin/bash
5015 + # SPDX-License-Identifier: GPL-2.0
5016 + #
5017 + # Run a series of udpgso benchmarks