Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:4.18 commit in: /
Date: Wed, 14 Nov 2018 13:16:08
Message-Id: 1542201340.171b3b7ec507044ac98ac85e3bdecb9ea3c96432.mpagano@gentoo
1 commit: 171b3b7ec507044ac98ac85e3bdecb9ea3c96432
2 Author: Alice Ferrazzi <alicef <AT> gentoo <DOT> org>
3 AuthorDate: Sun Nov 4 17:33:00 2018 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Wed Nov 14 13:15:40 2018 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=171b3b7e
7
8 linux kernel 4.18.17
9
10 Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>
11
12 0000_README | 4 +
13 1016_linux-4.18.17.patch | 4982 ++++++++++++++++++++++++++++++++++++++++++++++
14 2 files changed, 4986 insertions(+)
15
16 diff --git a/0000_README b/0000_README
17 index 52e9ca9..fcd301e 100644
18 --- a/0000_README
19 +++ b/0000_README
20 @@ -107,6 +107,10 @@ Patch: 1015_linux-4.18.16.patch
21 From: http://www.kernel.org
22 Desc: Linux 4.18.16
23
24 +Patch: 1016_linux-4.18.17.patch
25 +From: http://www.kernel.org
26 +Desc: Linux 4.18.17
27 +
28 Patch: 1500_XATTR_USER_PREFIX.patch
29 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
30 Desc: Support for namespace user.pax.* on tmpfs.
31
32 diff --git a/1016_linux-4.18.17.patch b/1016_linux-4.18.17.patch
33 new file mode 100644
34 index 0000000..1e385a1
35 --- /dev/null
36 +++ b/1016_linux-4.18.17.patch
37 @@ -0,0 +1,4982 @@
38 +diff --git a/Makefile b/Makefile
39 +index 034dd990b0ae..c051db0ca5a0 100644
40 +--- a/Makefile
41 ++++ b/Makefile
42 +@@ -1,7 +1,7 @@
43 + # SPDX-License-Identifier: GPL-2.0
44 + VERSION = 4
45 + PATCHLEVEL = 18
46 +-SUBLEVEL = 16
47 ++SUBLEVEL = 17
48 + EXTRAVERSION =
49 + NAME = Merciless Moray
50 +
51 +diff --git a/arch/Kconfig b/arch/Kconfig
52 +index f03b72644902..a18371a36e03 100644
53 +--- a/arch/Kconfig
54 ++++ b/arch/Kconfig
55 +@@ -977,4 +977,12 @@ config REFCOUNT_FULL
56 + against various use-after-free conditions that can be used in
57 + security flaw exploits.
58 +
59 ++config HAVE_ARCH_COMPILER_H
60 ++ bool
61 ++ help
62 ++ An architecture can select this if it provides an
63 ++ asm/compiler.h header that should be included after
64 ++ linux/compiler-*.h in order to override macro definitions that those
65 ++ headers generally provide.
66 ++
67 + source "kernel/gcov/Kconfig"
68 +diff --git a/arch/arm/boot/dts/bcm63138.dtsi b/arch/arm/boot/dts/bcm63138.dtsi
69 +index 43ee992ccdcf..6df61518776f 100644
70 +--- a/arch/arm/boot/dts/bcm63138.dtsi
71 ++++ b/arch/arm/boot/dts/bcm63138.dtsi
72 +@@ -106,21 +106,23 @@
73 + global_timer: timer@1e200 {
74 + compatible = "arm,cortex-a9-global-timer";
75 + reg = <0x1e200 0x20>;
76 +- interrupts = <GIC_PPI 11 IRQ_TYPE_LEVEL_HIGH>;
77 ++ interrupts = <GIC_PPI 11 IRQ_TYPE_EDGE_RISING>;
78 + clocks = <&axi_clk>;
79 + };
80 +
81 + local_timer: local-timer@1e600 {
82 + compatible = "arm,cortex-a9-twd-timer";
83 + reg = <0x1e600 0x20>;
84 +- interrupts = <GIC_PPI 13 IRQ_TYPE_LEVEL_HIGH>;
85 ++ interrupts = <GIC_PPI 13 (GIC_CPU_MASK_SIMPLE(2) |
86 ++ IRQ_TYPE_EDGE_RISING)>;
87 + clocks = <&axi_clk>;
88 + };
89 +
90 + twd_watchdog: watchdog@1e620 {
91 + compatible = "arm,cortex-a9-twd-wdt";
92 + reg = <0x1e620 0x20>;
93 +- interrupts = <GIC_PPI 14 IRQ_TYPE_LEVEL_HIGH>;
94 ++ interrupts = <GIC_PPI 14 (GIC_CPU_MASK_SIMPLE(2) |
95 ++ IRQ_TYPE_LEVEL_HIGH)>;
96 + };
97 +
98 + armpll: armpll {
99 +@@ -158,7 +160,7 @@
100 + serial0: serial@600 {
101 + compatible = "brcm,bcm6345-uart";
102 + reg = <0x600 0x1b>;
103 +- interrupts = <GIC_SPI 32 0>;
104 ++ interrupts = <GIC_SPI 32 IRQ_TYPE_LEVEL_HIGH>;
105 + clocks = <&periph_clk>;
106 + clock-names = "periph";
107 + status = "disabled";
108 +@@ -167,7 +169,7 @@
109 + serial1: serial@620 {
110 + compatible = "brcm,bcm6345-uart";
111 + reg = <0x620 0x1b>;
112 +- interrupts = <GIC_SPI 33 0>;
113 ++ interrupts = <GIC_SPI 33 IRQ_TYPE_LEVEL_HIGH>;
114 + clocks = <&periph_clk>;
115 + clock-names = "periph";
116 + status = "disabled";
117 +@@ -180,7 +182,7 @@
118 + reg = <0x2000 0x600>, <0xf0 0x10>;
119 + reg-names = "nand", "nand-int-base";
120 + status = "disabled";
121 +- interrupts = <GIC_SPI 38 0>;
122 ++ interrupts = <GIC_SPI 38 IRQ_TYPE_LEVEL_HIGH>;
123 + interrupt-names = "nand";
124 + };
125 +
126 +diff --git a/arch/arm/boot/dts/imx53-qsb-common.dtsi b/arch/arm/boot/dts/imx53-qsb-common.dtsi
127 +index ef7658a78836..c1548adee789 100644
128 +--- a/arch/arm/boot/dts/imx53-qsb-common.dtsi
129 ++++ b/arch/arm/boot/dts/imx53-qsb-common.dtsi
130 +@@ -123,6 +123,17 @@
131 + };
132 + };
133 +
134 ++&cpu0 {
135 ++ /* CPU rated to 1GHz, not 1.2GHz as per the default settings */
136 ++ operating-points = <
137 ++ /* kHz uV */
138 ++ 166666 850000
139 ++ 400000 900000
140 ++ 800000 1050000
141 ++ 1000000 1200000
142 ++ >;
143 ++};
144 ++
145 + &esdhc1 {
146 + pinctrl-names = "default";
147 + pinctrl-0 = <&pinctrl_esdhc1>;
148 +diff --git a/arch/arm/kernel/vmlinux.lds.h b/arch/arm/kernel/vmlinux.lds.h
149 +index ae5fdff18406..8247bc15addc 100644
150 +--- a/arch/arm/kernel/vmlinux.lds.h
151 ++++ b/arch/arm/kernel/vmlinux.lds.h
152 +@@ -49,6 +49,8 @@
153 + #define ARM_DISCARD \
154 + *(.ARM.exidx.exit.text) \
155 + *(.ARM.extab.exit.text) \
156 ++ *(.ARM.exidx.text.exit) \
157 ++ *(.ARM.extab.text.exit) \
158 + ARM_CPU_DISCARD(*(.ARM.exidx.cpuexit.text)) \
159 + ARM_CPU_DISCARD(*(.ARM.extab.cpuexit.text)) \
160 + ARM_EXIT_DISCARD(EXIT_TEXT) \
161 +diff --git a/arch/arm/mm/ioremap.c b/arch/arm/mm/ioremap.c
162 +index fc91205ff46c..5bf9443cfbaa 100644
163 +--- a/arch/arm/mm/ioremap.c
164 ++++ b/arch/arm/mm/ioremap.c
165 +@@ -473,7 +473,7 @@ void pci_ioremap_set_mem_type(int mem_type)
166 +
167 + int pci_ioremap_io(unsigned int offset, phys_addr_t phys_addr)
168 + {
169 +- BUG_ON(offset + SZ_64K > IO_SPACE_LIMIT);
170 ++ BUG_ON(offset + SZ_64K - 1 > IO_SPACE_LIMIT);
171 +
172 + return ioremap_page_range(PCI_IO_VIRT_BASE + offset,
173 + PCI_IO_VIRT_BASE + offset + SZ_64K,
174 +diff --git a/arch/arm64/mm/hugetlbpage.c b/arch/arm64/mm/hugetlbpage.c
175 +index 192b3ba07075..f85be2f8b140 100644
176 +--- a/arch/arm64/mm/hugetlbpage.c
177 ++++ b/arch/arm64/mm/hugetlbpage.c
178 +@@ -117,11 +117,14 @@ static pte_t get_clear_flush(struct mm_struct *mm,
179 +
180 + /*
181 + * If HW_AFDBM is enabled, then the HW could turn on
182 +- * the dirty bit for any page in the set, so check
183 +- * them all. All hugetlb entries are already young.
184 ++ * the dirty or accessed bit for any page in the set,
185 ++ * so check them all.
186 + */
187 + if (pte_dirty(pte))
188 + orig_pte = pte_mkdirty(orig_pte);
189 ++
190 ++ if (pte_young(pte))
191 ++ orig_pte = pte_mkyoung(orig_pte);
192 + }
193 +
194 + if (valid) {
195 +@@ -340,10 +343,13 @@ int huge_ptep_set_access_flags(struct vm_area_struct *vma,
196 + if (!pte_same(orig_pte, pte))
197 + changed = 1;
198 +
199 +- /* Make sure we don't lose the dirty state */
200 ++ /* Make sure we don't lose the dirty or young state */
201 + if (pte_dirty(orig_pte))
202 + pte = pte_mkdirty(pte);
203 +
204 ++ if (pte_young(orig_pte))
205 ++ pte = pte_mkyoung(pte);
206 ++
207 + hugeprot = pte_pgprot(pte);
208 + for (i = 0; i < ncontig; i++, ptep++, addr += pgsize, pfn += dpfn)
209 + set_pte_at(vma->vm_mm, addr, ptep, pfn_pte(pfn, hugeprot));
210 +diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c
211 +index 59d07bd5374a..055b211b7126 100644
212 +--- a/arch/powerpc/mm/numa.c
213 ++++ b/arch/powerpc/mm/numa.c
214 +@@ -1217,9 +1217,10 @@ int find_and_online_cpu_nid(int cpu)
215 + * Need to ensure that NODE_DATA is initialized for a node from
216 + * available memory (see memblock_alloc_try_nid). If unable to
217 + * init the node, then default to nearest node that has memory
218 +- * installed.
219 ++ * installed. Skip onlining a node if the subsystems are not
220 ++ * yet initialized.
221 + */
222 +- if (try_online_node(new_nid))
223 ++ if (!topology_inited || try_online_node(new_nid))
224 + new_nid = first_online_node;
225 + #else
226 + /*
227 +diff --git a/arch/riscv/kernel/setup.c b/arch/riscv/kernel/setup.c
228 +index 0efa5b29d0a3..dcff272aee06 100644
229 +--- a/arch/riscv/kernel/setup.c
230 ++++ b/arch/riscv/kernel/setup.c
231 +@@ -165,7 +165,7 @@ static void __init setup_bootmem(void)
232 + BUG_ON(mem_size == 0);
233 +
234 + set_max_mapnr(PFN_DOWN(mem_size));
235 +- max_low_pfn = pfn_base + PFN_DOWN(mem_size);
236 ++ max_low_pfn = memblock_end_of_DRAM();
237 +
238 + #ifdef CONFIG_BLK_DEV_INITRD
239 + setup_initrd();
240 +diff --git a/arch/sparc/include/asm/cpudata_64.h b/arch/sparc/include/asm/cpudata_64.h
241 +index 666d6b5c0440..9c3fc03abe9a 100644
242 +--- a/arch/sparc/include/asm/cpudata_64.h
243 ++++ b/arch/sparc/include/asm/cpudata_64.h
244 +@@ -28,7 +28,7 @@ typedef struct {
245 + unsigned short sock_id; /* physical package */
246 + unsigned short core_id;
247 + unsigned short max_cache_id; /* groupings of highest shared cache */
248 +- unsigned short proc_id; /* strand (aka HW thread) id */
249 ++ signed short proc_id; /* strand (aka HW thread) id */
250 + } cpuinfo_sparc;
251 +
252 + DECLARE_PER_CPU(cpuinfo_sparc, __cpu_data);
253 +diff --git a/arch/sparc/include/asm/switch_to_64.h b/arch/sparc/include/asm/switch_to_64.h
254 +index 4ff29b1406a9..b1d4e2e3210f 100644
255 +--- a/arch/sparc/include/asm/switch_to_64.h
256 ++++ b/arch/sparc/include/asm/switch_to_64.h
257 +@@ -67,6 +67,7 @@ do { save_and_clear_fpu(); \
258 + } while(0)
259 +
260 + void synchronize_user_stack(void);
261 +-void fault_in_user_windows(void);
262 ++struct pt_regs;
263 ++void fault_in_user_windows(struct pt_regs *);
264 +
265 + #endif /* __SPARC64_SWITCH_TO_64_H */
266 +diff --git a/arch/sparc/kernel/perf_event.c b/arch/sparc/kernel/perf_event.c
267 +index d3149baaa33c..67b3e6b3ce5d 100644
268 +--- a/arch/sparc/kernel/perf_event.c
269 ++++ b/arch/sparc/kernel/perf_event.c
270 +@@ -24,6 +24,7 @@
271 + #include <asm/cpudata.h>
272 + #include <linux/uaccess.h>
273 + #include <linux/atomic.h>
274 ++#include <linux/sched/clock.h>
275 + #include <asm/nmi.h>
276 + #include <asm/pcr.h>
277 + #include <asm/cacheflush.h>
278 +@@ -927,6 +928,8 @@ static void read_in_all_counters(struct cpu_hw_events *cpuc)
279 + sparc_perf_event_update(cp, &cp->hw,
280 + cpuc->current_idx[i]);
281 + cpuc->current_idx[i] = PIC_NO_INDEX;
282 ++ if (cp->hw.state & PERF_HES_STOPPED)
283 ++ cp->hw.state |= PERF_HES_ARCH;
284 + }
285 + }
286 + }
287 +@@ -959,10 +962,12 @@ static void calculate_single_pcr(struct cpu_hw_events *cpuc)
288 +
289 + enc = perf_event_get_enc(cpuc->events[i]);
290 + cpuc->pcr[0] &= ~mask_for_index(idx);
291 +- if (hwc->state & PERF_HES_STOPPED)
292 ++ if (hwc->state & PERF_HES_ARCH) {
293 + cpuc->pcr[0] |= nop_for_index(idx);
294 +- else
295 ++ } else {
296 + cpuc->pcr[0] |= event_encoding(enc, idx);
297 ++ hwc->state = 0;
298 ++ }
299 + }
300 + out:
301 + cpuc->pcr[0] |= cpuc->event[0]->hw.config_base;
302 +@@ -988,6 +993,9 @@ static void calculate_multiple_pcrs(struct cpu_hw_events *cpuc)
303 +
304 + cpuc->current_idx[i] = idx;
305 +
306 ++ if (cp->hw.state & PERF_HES_ARCH)
307 ++ continue;
308 ++
309 + sparc_pmu_start(cp, PERF_EF_RELOAD);
310 + }
311 + out:
312 +@@ -1079,6 +1087,8 @@ static void sparc_pmu_start(struct perf_event *event, int flags)
313 + event->hw.state = 0;
314 +
315 + sparc_pmu_enable_event(cpuc, &event->hw, idx);
316 ++
317 ++ perf_event_update_userpage(event);
318 + }
319 +
320 + static void sparc_pmu_stop(struct perf_event *event, int flags)
321 +@@ -1371,9 +1381,9 @@ static int sparc_pmu_add(struct perf_event *event, int ef_flags)
322 + cpuc->events[n0] = event->hw.event_base;
323 + cpuc->current_idx[n0] = PIC_NO_INDEX;
324 +
325 +- event->hw.state = PERF_HES_UPTODATE;
326 ++ event->hw.state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
327 + if (!(ef_flags & PERF_EF_START))
328 +- event->hw.state |= PERF_HES_STOPPED;
329 ++ event->hw.state |= PERF_HES_ARCH;
330 +
331 + /*
332 + * If group events scheduling transaction was started,
333 +@@ -1603,6 +1613,8 @@ static int __kprobes perf_event_nmi_handler(struct notifier_block *self,
334 + struct perf_sample_data data;
335 + struct cpu_hw_events *cpuc;
336 + struct pt_regs *regs;
337 ++ u64 finish_clock;
338 ++ u64 start_clock;
339 + int i;
340 +
341 + if (!atomic_read(&active_events))
342 +@@ -1616,6 +1628,8 @@ static int __kprobes perf_event_nmi_handler(struct notifier_block *self,
343 + return NOTIFY_DONE;
344 + }
345 +
346 ++ start_clock = sched_clock();
347 ++
348 + regs = args->regs;
349 +
350 + cpuc = this_cpu_ptr(&cpu_hw_events);
351 +@@ -1654,6 +1668,10 @@ static int __kprobes perf_event_nmi_handler(struct notifier_block *self,
352 + sparc_pmu_stop(event, 0);
353 + }
354 +
355 ++ finish_clock = sched_clock();
356 ++
357 ++ perf_sample_event_took(finish_clock - start_clock);
358 ++
359 + return NOTIFY_STOP;
360 + }
361 +
362 +diff --git a/arch/sparc/kernel/process_64.c b/arch/sparc/kernel/process_64.c
363 +index 6c086086ca8f..59eaf6227af1 100644
364 +--- a/arch/sparc/kernel/process_64.c
365 ++++ b/arch/sparc/kernel/process_64.c
366 +@@ -36,6 +36,7 @@
367 + #include <linux/sysrq.h>
368 + #include <linux/nmi.h>
369 + #include <linux/context_tracking.h>
370 ++#include <linux/signal.h>
371 +
372 + #include <linux/uaccess.h>
373 + #include <asm/page.h>
374 +@@ -521,7 +522,12 @@ static void stack_unaligned(unsigned long sp)
375 + force_sig_fault(SIGBUS, BUS_ADRALN, (void __user *) sp, 0, current);
376 + }
377 +
378 +-void fault_in_user_windows(void)
379 ++static const char uwfault32[] = KERN_INFO \
380 ++ "%s[%d]: bad register window fault: SP %08lx (orig_sp %08lx) TPC %08lx O7 %08lx\n";
381 ++static const char uwfault64[] = KERN_INFO \
382 ++ "%s[%d]: bad register window fault: SP %016lx (orig_sp %016lx) TPC %08lx O7 %016lx\n";
383 ++
384 ++void fault_in_user_windows(struct pt_regs *regs)
385 + {
386 + struct thread_info *t = current_thread_info();
387 + unsigned long window;
388 +@@ -534,9 +540,9 @@ void fault_in_user_windows(void)
389 + do {
390 + struct reg_window *rwin = &t->reg_window[window];
391 + int winsize = sizeof(struct reg_window);
392 +- unsigned long sp;
393 ++ unsigned long sp, orig_sp;
394 +
395 +- sp = t->rwbuf_stkptrs[window];
396 ++ orig_sp = sp = t->rwbuf_stkptrs[window];
397 +
398 + if (test_thread_64bit_stack(sp))
399 + sp += STACK_BIAS;
400 +@@ -547,8 +553,16 @@ void fault_in_user_windows(void)
401 + stack_unaligned(sp);
402 +
403 + if (unlikely(copy_to_user((char __user *)sp,
404 +- rwin, winsize)))
405 ++ rwin, winsize))) {
406 ++ if (show_unhandled_signals)
407 ++ printk_ratelimited(is_compat_task() ?
408 ++ uwfault32 : uwfault64,
409 ++ current->comm, current->pid,
410 ++ sp, orig_sp,
411 ++ regs->tpc,
412 ++ regs->u_regs[UREG_I7]);
413 + goto barf;
414 ++ }
415 + } while (window--);
416 + }
417 + set_thread_wsaved(0);
418 +@@ -556,8 +570,7 @@ void fault_in_user_windows(void)
419 +
420 + barf:
421 + set_thread_wsaved(window + 1);
422 +- user_exit();
423 +- do_exit(SIGILL);
424 ++ force_sig(SIGSEGV, current);
425 + }
426 +
427 + asmlinkage long sparc_do_fork(unsigned long clone_flags,
428 +diff --git a/arch/sparc/kernel/rtrap_64.S b/arch/sparc/kernel/rtrap_64.S
429 +index f6528884a2c8..29aa34f11720 100644
430 +--- a/arch/sparc/kernel/rtrap_64.S
431 ++++ b/arch/sparc/kernel/rtrap_64.S
432 +@@ -39,6 +39,7 @@ __handle_preemption:
433 + wrpr %g0, RTRAP_PSTATE_IRQOFF, %pstate
434 +
435 + __handle_user_windows:
436 ++ add %sp, PTREGS_OFF, %o0
437 + call fault_in_user_windows
438 + 661: wrpr %g0, RTRAP_PSTATE, %pstate
439 + /* If userspace is using ADI, it could potentially pass
440 +@@ -84,8 +85,9 @@ __handle_signal:
441 + ldx [%sp + PTREGS_OFF + PT_V9_TSTATE], %l1
442 + sethi %hi(0xf << 20), %l4
443 + and %l1, %l4, %l4
444 ++ andn %l1, %l4, %l1
445 + ba,pt %xcc, __handle_preemption_continue
446 +- andn %l1, %l4, %l1
447 ++ srl %l4, 20, %l4
448 +
449 + /* When returning from a NMI (%pil==15) interrupt we want to
450 + * avoid running softirqs, doing IRQ tracing, preempting, etc.
451 +diff --git a/arch/sparc/kernel/signal32.c b/arch/sparc/kernel/signal32.c
452 +index 44d379db3f64..4c5b3fcbed94 100644
453 +--- a/arch/sparc/kernel/signal32.c
454 ++++ b/arch/sparc/kernel/signal32.c
455 +@@ -371,7 +371,11 @@ static int setup_frame32(struct ksignal *ksig, struct pt_regs *regs,
456 + get_sigframe(ksig, regs, sigframe_size);
457 +
458 + if (invalid_frame_pointer(sf, sigframe_size)) {
459 +- do_exit(SIGILL);
460 ++ if (show_unhandled_signals)
461 ++ pr_info("%s[%d] bad frame in setup_frame32: %08lx TPC %08lx O7 %08lx\n",
462 ++ current->comm, current->pid, (unsigned long)sf,
463 ++ regs->tpc, regs->u_regs[UREG_I7]);
464 ++ force_sigsegv(ksig->sig, current);
465 + return -EINVAL;
466 + }
467 +
468 +@@ -501,7 +505,11 @@ static int setup_rt_frame32(struct ksignal *ksig, struct pt_regs *regs,
469 + get_sigframe(ksig, regs, sigframe_size);
470 +
471 + if (invalid_frame_pointer(sf, sigframe_size)) {
472 +- do_exit(SIGILL);
473 ++ if (show_unhandled_signals)
474 ++ pr_info("%s[%d] bad frame in setup_rt_frame32: %08lx TPC %08lx O7 %08lx\n",
475 ++ current->comm, current->pid, (unsigned long)sf,
476 ++ regs->tpc, regs->u_regs[UREG_I7]);
477 ++ force_sigsegv(ksig->sig, current);
478 + return -EINVAL;
479 + }
480 +
481 +diff --git a/arch/sparc/kernel/signal_64.c b/arch/sparc/kernel/signal_64.c
482 +index 48366e5eb5b2..e9de1803a22e 100644
483 +--- a/arch/sparc/kernel/signal_64.c
484 ++++ b/arch/sparc/kernel/signal_64.c
485 +@@ -370,7 +370,11 @@ setup_rt_frame(struct ksignal *ksig, struct pt_regs *regs)
486 + get_sigframe(ksig, regs, sf_size);
487 +
488 + if (invalid_frame_pointer (sf)) {
489 +- do_exit(SIGILL); /* won't return, actually */
490 ++ if (show_unhandled_signals)
491 ++ pr_info("%s[%d] bad frame in setup_rt_frame: %016lx TPC %016lx O7 %016lx\n",
492 ++ current->comm, current->pid, (unsigned long)sf,
493 ++ regs->tpc, regs->u_regs[UREG_I7]);
494 ++ force_sigsegv(ksig->sig, current);
495 + return -EINVAL;
496 + }
497 +
498 +diff --git a/arch/sparc/kernel/systbls_64.S b/arch/sparc/kernel/systbls_64.S
499 +index 387ef993880a..25699462ad5b 100644
500 +--- a/arch/sparc/kernel/systbls_64.S
501 ++++ b/arch/sparc/kernel/systbls_64.S
502 +@@ -47,9 +47,9 @@ sys_call_table32:
503 + .word sys_recvfrom, sys_setreuid16, sys_setregid16, sys_rename, compat_sys_truncate
504 + /*130*/ .word compat_sys_ftruncate, sys_flock, compat_sys_lstat64, sys_sendto, sys_shutdown
505 + .word sys_socketpair, sys_mkdir, sys_rmdir, compat_sys_utimes, compat_sys_stat64
506 +-/*140*/ .word sys_sendfile64, sys_nis_syscall, compat_sys_futex, sys_gettid, compat_sys_getrlimit
507 ++/*140*/ .word sys_sendfile64, sys_getpeername, compat_sys_futex, sys_gettid, compat_sys_getrlimit
508 + .word compat_sys_setrlimit, sys_pivot_root, sys_prctl, sys_pciconfig_read, sys_pciconfig_write
509 +-/*150*/ .word sys_nis_syscall, sys_inotify_init, sys_inotify_add_watch, sys_poll, sys_getdents64
510 ++/*150*/ .word sys_getsockname, sys_inotify_init, sys_inotify_add_watch, sys_poll, sys_getdents64
511 + .word compat_sys_fcntl64, sys_inotify_rm_watch, compat_sys_statfs, compat_sys_fstatfs, sys_oldumount
512 + /*160*/ .word compat_sys_sched_setaffinity, compat_sys_sched_getaffinity, sys_getdomainname, sys_setdomainname, sys_nis_syscall
513 + .word sys_quotactl, sys_set_tid_address, compat_sys_mount, compat_sys_ustat, sys_setxattr
514 +diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c
515 +index f396048a0d68..39822f611c01 100644
516 +--- a/arch/sparc/mm/init_64.c
517 ++++ b/arch/sparc/mm/init_64.c
518 +@@ -1383,6 +1383,7 @@ int __node_distance(int from, int to)
519 + }
520 + return numa_latency[from][to];
521 + }
522 ++EXPORT_SYMBOL(__node_distance);
523 +
524 + static int __init find_best_numa_node_for_mlgroup(struct mdesc_mlgroup *grp)
525 + {
526 +diff --git a/arch/sparc/vdso/vclock_gettime.c b/arch/sparc/vdso/vclock_gettime.c
527 +index 3feb3d960ca5..75dca9aab737 100644
528 +--- a/arch/sparc/vdso/vclock_gettime.c
529 ++++ b/arch/sparc/vdso/vclock_gettime.c
530 +@@ -33,9 +33,19 @@
531 + #define TICK_PRIV_BIT (1ULL << 63)
532 + #endif
533 +
534 ++#ifdef CONFIG_SPARC64
535 + #define SYSCALL_STRING \
536 + "ta 0x6d;" \
537 +- "sub %%g0, %%o0, %%o0;" \
538 ++ "bcs,a 1f;" \
539 ++ " sub %%g0, %%o0, %%o0;" \
540 ++ "1:"
541 ++#else
542 ++#define SYSCALL_STRING \
543 ++ "ta 0x10;" \
544 ++ "bcs,a 1f;" \
545 ++ " sub %%g0, %%o0, %%o0;" \
546 ++ "1:"
547 ++#endif
548 +
549 + #define SYSCALL_CLOBBERS \
550 + "f0", "f1", "f2", "f3", "f4", "f5", "f6", "f7", \
551 +diff --git a/arch/x86/events/amd/uncore.c b/arch/x86/events/amd/uncore.c
552 +index 981ba5e8241b..8671de126eac 100644
553 +--- a/arch/x86/events/amd/uncore.c
554 ++++ b/arch/x86/events/amd/uncore.c
555 +@@ -36,6 +36,7 @@
556 +
557 + static int num_counters_llc;
558 + static int num_counters_nb;
559 ++static bool l3_mask;
560 +
561 + static HLIST_HEAD(uncore_unused_list);
562 +
563 +@@ -209,6 +210,13 @@ static int amd_uncore_event_init(struct perf_event *event)
564 + hwc->config = event->attr.config & AMD64_RAW_EVENT_MASK_NB;
565 + hwc->idx = -1;
566 +
567 ++ /*
568 ++ * SliceMask and ThreadMask need to be set for certain L3 events in
569 ++ * Family 17h. For other events, the two fields do not affect the count.
570 ++ */
571 ++ if (l3_mask)
572 ++ hwc->config |= (AMD64_L3_SLICE_MASK | AMD64_L3_THREAD_MASK);
573 ++
574 + if (event->cpu < 0)
575 + return -EINVAL;
576 +
577 +@@ -525,6 +533,7 @@ static int __init amd_uncore_init(void)
578 + amd_llc_pmu.name = "amd_l3";
579 + format_attr_event_df.show = &event_show_df;
580 + format_attr_event_l3.show = &event_show_l3;
581 ++ l3_mask = true;
582 + } else {
583 + num_counters_nb = NUM_COUNTERS_NB;
584 + num_counters_llc = NUM_COUNTERS_L2;
585 +@@ -532,6 +541,7 @@ static int __init amd_uncore_init(void)
586 + amd_llc_pmu.name = "amd_l2";
587 + format_attr_event_df = format_attr_event;
588 + format_attr_event_l3 = format_attr_event;
589 ++ l3_mask = false;
590 + }
591 +
592 + amd_nb_pmu.attr_groups = amd_uncore_attr_groups_df;
593 +diff --git a/arch/x86/events/intel/uncore_snbep.c b/arch/x86/events/intel/uncore_snbep.c
594 +index 51d7c117e3c7..c07bee31abe8 100644
595 +--- a/arch/x86/events/intel/uncore_snbep.c
596 ++++ b/arch/x86/events/intel/uncore_snbep.c
597 +@@ -3061,7 +3061,7 @@ static struct event_constraint bdx_uncore_pcu_constraints[] = {
598 +
599 + void bdx_uncore_cpu_init(void)
600 + {
601 +- int pkg = topology_phys_to_logical_pkg(0);
602 ++ int pkg = topology_phys_to_logical_pkg(boot_cpu_data.phys_proc_id);
603 +
604 + if (bdx_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores)
605 + bdx_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores;
606 +@@ -3931,16 +3931,16 @@ static const struct pci_device_id skx_uncore_pci_ids[] = {
607 + .driver_data = UNCORE_PCI_DEV_FULL_DATA(21, 5, SKX_PCI_UNCORE_M2PCIE, 3),
608 + },
609 + { /* M3UPI0 Link 0 */
610 +- PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x204C),
611 +- .driver_data = UNCORE_PCI_DEV_FULL_DATA(18, 0, SKX_PCI_UNCORE_M3UPI, 0),
612 ++ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x204D),
613 ++ .driver_data = UNCORE_PCI_DEV_FULL_DATA(18, 1, SKX_PCI_UNCORE_M3UPI, 0),
614 + },
615 + { /* M3UPI0 Link 1 */
616 +- PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x204D),
617 +- .driver_data = UNCORE_PCI_DEV_FULL_DATA(18, 1, SKX_PCI_UNCORE_M3UPI, 1),
618 ++ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x204E),
619 ++ .driver_data = UNCORE_PCI_DEV_FULL_DATA(18, 2, SKX_PCI_UNCORE_M3UPI, 1),
620 + },
621 + { /* M3UPI1 Link 2 */
622 +- PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x204C),
623 +- .driver_data = UNCORE_PCI_DEV_FULL_DATA(18, 4, SKX_PCI_UNCORE_M3UPI, 2),
624 ++ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x204D),
625 ++ .driver_data = UNCORE_PCI_DEV_FULL_DATA(18, 5, SKX_PCI_UNCORE_M3UPI, 2),
626 + },
627 + { /* end: all zeroes */ }
628 + };
629 +diff --git a/arch/x86/include/asm/perf_event.h b/arch/x86/include/asm/perf_event.h
630 +index 12f54082f4c8..78241b736f2a 100644
631 +--- a/arch/x86/include/asm/perf_event.h
632 ++++ b/arch/x86/include/asm/perf_event.h
633 +@@ -46,6 +46,14 @@
634 + #define INTEL_ARCH_EVENT_MASK \
635 + (ARCH_PERFMON_EVENTSEL_UMASK | ARCH_PERFMON_EVENTSEL_EVENT)
636 +
637 ++#define AMD64_L3_SLICE_SHIFT 48
638 ++#define AMD64_L3_SLICE_MASK \
639 ++ ((0xFULL) << AMD64_L3_SLICE_SHIFT)
640 ++
641 ++#define AMD64_L3_THREAD_SHIFT 56
642 ++#define AMD64_L3_THREAD_MASK \
643 ++ ((0xFFULL) << AMD64_L3_THREAD_SHIFT)
644 ++
645 + #define X86_RAW_EVENT_MASK \
646 + (ARCH_PERFMON_EVENTSEL_EVENT | \
647 + ARCH_PERFMON_EVENTSEL_UMASK | \
648 +diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
649 +index 930c88341e4e..1fbf38dde84c 100644
650 +--- a/arch/x86/kernel/paravirt.c
651 ++++ b/arch/x86/kernel/paravirt.c
652 +@@ -90,7 +90,7 @@ unsigned paravirt_patch_call(void *insnbuf,
653 +
654 + if (len < 5) {
655 + #ifdef CONFIG_RETPOLINE
656 +- WARN_ONCE("Failing to patch indirect CALL in %ps\n", (void *)addr);
657 ++ WARN_ONCE(1, "Failing to patch indirect CALL in %ps\n", (void *)addr);
658 + #endif
659 + return len; /* call too long for patch site */
660 + }
661 +@@ -110,7 +110,7 @@ unsigned paravirt_patch_jmp(void *insnbuf, const void *target,
662 +
663 + if (len < 5) {
664 + #ifdef CONFIG_RETPOLINE
665 +- WARN_ONCE("Failing to patch indirect JMP in %ps\n", (void *)addr);
666 ++ WARN_ONCE(1, "Failing to patch indirect JMP in %ps\n", (void *)addr);
667 + #endif
668 + return len; /* call too long for patch site */
669 + }
670 +diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
671 +index ef772e5634d4..3e59a187fe30 100644
672 +--- a/arch/x86/kvm/svm.c
673 ++++ b/arch/x86/kvm/svm.c
674 +@@ -436,14 +436,18 @@ static inline struct kvm_svm *to_kvm_svm(struct kvm *kvm)
675 +
676 + static inline bool svm_sev_enabled(void)
677 + {
678 +- return max_sev_asid;
679 ++ return IS_ENABLED(CONFIG_KVM_AMD_SEV) ? max_sev_asid : 0;
680 + }
681 +
682 + static inline bool sev_guest(struct kvm *kvm)
683 + {
684 ++#ifdef CONFIG_KVM_AMD_SEV
685 + struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
686 +
687 + return sev->active;
688 ++#else
689 ++ return false;
690 ++#endif
691 + }
692 +
693 + static inline int sev_get_asid(struct kvm *kvm)
694 +diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
695 +index 32721ef9652d..9efe130ea2e6 100644
696 +--- a/arch/x86/kvm/vmx.c
697 ++++ b/arch/x86/kvm/vmx.c
698 +@@ -819,6 +819,7 @@ struct nested_vmx {
699 +
700 + /* to migrate it to L2 if VM_ENTRY_LOAD_DEBUG_CONTROLS is off */
701 + u64 vmcs01_debugctl;
702 ++ u64 vmcs01_guest_bndcfgs;
703 +
704 + u16 vpid02;
705 + u16 last_vpid;
706 +@@ -3395,9 +3396,6 @@ static void nested_vmx_setup_ctls_msrs(struct nested_vmx_msrs *msrs, bool apicv)
707 + VM_EXIT_LOAD_IA32_EFER | VM_EXIT_SAVE_IA32_EFER |
708 + VM_EXIT_SAVE_VMX_PREEMPTION_TIMER | VM_EXIT_ACK_INTR_ON_EXIT;
709 +
710 +- if (kvm_mpx_supported())
711 +- msrs->exit_ctls_high |= VM_EXIT_CLEAR_BNDCFGS;
712 +-
713 + /* We support free control of debug control saving. */
714 + msrs->exit_ctls_low &= ~VM_EXIT_SAVE_DEBUG_CONTROLS;
715 +
716 +@@ -3414,8 +3412,6 @@ static void nested_vmx_setup_ctls_msrs(struct nested_vmx_msrs *msrs, bool apicv)
717 + VM_ENTRY_LOAD_IA32_PAT;
718 + msrs->entry_ctls_high |=
719 + (VM_ENTRY_ALWAYSON_WITHOUT_TRUE_MSR | VM_ENTRY_LOAD_IA32_EFER);
720 +- if (kvm_mpx_supported())
721 +- msrs->entry_ctls_high |= VM_ENTRY_LOAD_BNDCFGS;
722 +
723 + /* We support free control of debug control loading. */
724 + msrs->entry_ctls_low &= ~VM_ENTRY_LOAD_DEBUG_CONTROLS;
725 +@@ -10825,6 +10821,23 @@ static void nested_vmx_cr_fixed1_bits_update(struct kvm_vcpu *vcpu)
726 + #undef cr4_fixed1_update
727 + }
728 +
729 ++static void nested_vmx_entry_exit_ctls_update(struct kvm_vcpu *vcpu)
730 ++{
731 ++ struct vcpu_vmx *vmx = to_vmx(vcpu);
732 ++
733 ++ if (kvm_mpx_supported()) {
734 ++ bool mpx_enabled = guest_cpuid_has(vcpu, X86_FEATURE_MPX);
735 ++
736 ++ if (mpx_enabled) {
737 ++ vmx->nested.msrs.entry_ctls_high |= VM_ENTRY_LOAD_BNDCFGS;
738 ++ vmx->nested.msrs.exit_ctls_high |= VM_EXIT_CLEAR_BNDCFGS;
739 ++ } else {
740 ++ vmx->nested.msrs.entry_ctls_high &= ~VM_ENTRY_LOAD_BNDCFGS;
741 ++ vmx->nested.msrs.exit_ctls_high &= ~VM_EXIT_CLEAR_BNDCFGS;
742 ++ }
743 ++ }
744 ++}
745 ++
746 + static void vmx_cpuid_update(struct kvm_vcpu *vcpu)
747 + {
748 + struct vcpu_vmx *vmx = to_vmx(vcpu);
749 +@@ -10841,8 +10854,10 @@ static void vmx_cpuid_update(struct kvm_vcpu *vcpu)
750 + to_vmx(vcpu)->msr_ia32_feature_control_valid_bits &=
751 + ~FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX;
752 +
753 +- if (nested_vmx_allowed(vcpu))
754 ++ if (nested_vmx_allowed(vcpu)) {
755 + nested_vmx_cr_fixed1_bits_update(vcpu);
756 ++ nested_vmx_entry_exit_ctls_update(vcpu);
757 ++ }
758 + }
759 +
760 + static void vmx_set_supported_cpuid(u32 func, struct kvm_cpuid_entry2 *entry)
761 +@@ -11553,8 +11568,13 @@ static void prepare_vmcs02_full(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
762 +
763 + set_cr4_guest_host_mask(vmx);
764 +
765 +- if (vmx_mpx_supported())
766 +- vmcs_write64(GUEST_BNDCFGS, vmcs12->guest_bndcfgs);
767 ++ if (kvm_mpx_supported()) {
768 ++ if (vmx->nested.nested_run_pending &&
769 ++ (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_BNDCFGS))
770 ++ vmcs_write64(GUEST_BNDCFGS, vmcs12->guest_bndcfgs);
771 ++ else
772 ++ vmcs_write64(GUEST_BNDCFGS, vmx->nested.vmcs01_guest_bndcfgs);
773 ++ }
774 +
775 + if (enable_vpid) {
776 + if (nested_cpu_has_vpid(vmcs12) && vmx->nested.vpid02)
777 +@@ -12068,6 +12088,9 @@ static int enter_vmx_non_root_mode(struct kvm_vcpu *vcpu)
778 +
779 + if (!(vmcs12->vm_entry_controls & VM_ENTRY_LOAD_DEBUG_CONTROLS))
780 + vmx->nested.vmcs01_debugctl = vmcs_read64(GUEST_IA32_DEBUGCTL);
781 ++ if (kvm_mpx_supported() &&
782 ++ !(vmcs12->vm_entry_controls & VM_ENTRY_LOAD_BNDCFGS))
783 ++ vmx->nested.vmcs01_guest_bndcfgs = vmcs_read64(GUEST_BNDCFGS);
784 +
785 + vmx_switch_vmcs(vcpu, &vmx->nested.vmcs02);
786 + vmx_segment_cache_clear(vmx);
787 +diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
788 +index 97fcac34e007..3cd58a5eb449 100644
789 +--- a/arch/x86/kvm/x86.c
790 ++++ b/arch/x86/kvm/x86.c
791 +@@ -4625,7 +4625,7 @@ static void kvm_init_msr_list(void)
792 + */
793 + switch (msrs_to_save[i]) {
794 + case MSR_IA32_BNDCFGS:
795 +- if (!kvm_x86_ops->mpx_supported())
796 ++ if (!kvm_mpx_supported())
797 + continue;
798 + break;
799 + case MSR_TSC_AUX:
800 +diff --git a/drivers/clk/mvebu/armada-37xx-periph.c b/drivers/clk/mvebu/armada-37xx-periph.c
801 +index 6f7637b19738..e764dfdea53f 100644
802 +--- a/drivers/clk/mvebu/armada-37xx-periph.c
803 ++++ b/drivers/clk/mvebu/armada-37xx-periph.c
804 +@@ -419,7 +419,6 @@ static unsigned int armada_3700_pm_dvfs_get_cpu_parent(struct regmap *base)
805 + static u8 clk_pm_cpu_get_parent(struct clk_hw *hw)
806 + {
807 + struct clk_pm_cpu *pm_cpu = to_clk_pm_cpu(hw);
808 +- int num_parents = clk_hw_get_num_parents(hw);
809 + u32 val;
810 +
811 + if (armada_3700_pm_dvfs_is_enabled(pm_cpu->nb_pm_base)) {
812 +diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
813 +index 06dce16e22bb..70f0dedca59f 100644
814 +--- a/drivers/gpio/gpiolib.c
815 ++++ b/drivers/gpio/gpiolib.c
816 +@@ -1675,7 +1675,8 @@ static void gpiochip_set_cascaded_irqchip(struct gpio_chip *gpiochip,
817 + irq_set_chained_handler_and_data(parent_irq, parent_handler,
818 + gpiochip);
819 +
820 +- gpiochip->irq.parents = &parent_irq;
821 ++ gpiochip->irq.parent_irq = parent_irq;
822 ++ gpiochip->irq.parents = &gpiochip->irq.parent_irq;
823 + gpiochip->irq.num_parents = 1;
824 + }
825 +
826 +diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
827 +index e484d0a94bdc..5b9cc3aeaa55 100644
828 +--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
829 ++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
830 +@@ -4494,12 +4494,18 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
831 + }
832 + spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
833 +
834 +- /* Signal HW programming completion */
835 +- drm_atomic_helper_commit_hw_done(state);
836 +
837 + if (wait_for_vblank)
838 + drm_atomic_helper_wait_for_flip_done(dev, state);
839 +
840 ++ /*
841 ++ * FIXME:
842 ++ * Delay hw_done() until flip_done() is signaled. This is to block
843 ++ * another commit from freeing the CRTC state while we're still
844 ++ * waiting on flip_done.
845 ++ */
846 ++ drm_atomic_helper_commit_hw_done(state);
847 ++
848 + drm_atomic_helper_cleanup_planes(dev, state);
849 +
850 + /* Finally, drop a runtime PM reference for each newly disabled CRTC,
851 +diff --git a/drivers/gpu/drm/i2c/tda9950.c b/drivers/gpu/drm/i2c/tda9950.c
852 +index 3f7396caad48..ccd355d0c123 100644
853 +--- a/drivers/gpu/drm/i2c/tda9950.c
854 ++++ b/drivers/gpu/drm/i2c/tda9950.c
855 +@@ -188,7 +188,8 @@ static irqreturn_t tda9950_irq(int irq, void *data)
856 + break;
857 + }
858 + /* TDA9950 executes all retries for us */
859 +- tx_status |= CEC_TX_STATUS_MAX_RETRIES;
860 ++ if (tx_status != CEC_TX_STATUS_OK)
861 ++ tx_status |= CEC_TX_STATUS_MAX_RETRIES;
862 + cec_transmit_done(priv->adap, tx_status, arb_lost_cnt,
863 + nack_cnt, 0, err_cnt);
864 + break;
865 +@@ -307,7 +308,7 @@ static void tda9950_release(struct tda9950_priv *priv)
866 + /* Wait up to .5s for it to signal non-busy */
867 + do {
868 + csr = tda9950_read(client, REG_CSR);
869 +- if (!(csr & CSR_BUSY) || --timeout)
870 ++ if (!(csr & CSR_BUSY) || !--timeout)
871 + break;
872 + msleep(10);
873 + } while (1);
874 +diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
875 +index eee6b79fb131..ae5b72269e27 100644
876 +--- a/drivers/hid/hid-ids.h
877 ++++ b/drivers/hid/hid-ids.h
878 +@@ -974,7 +974,6 @@
879 + #define USB_DEVICE_ID_SIS817_TOUCH 0x0817
880 + #define USB_DEVICE_ID_SIS_TS 0x1013
881 + #define USB_DEVICE_ID_SIS1030_TOUCH 0x1030
882 +-#define USB_DEVICE_ID_SIS10FB_TOUCH 0x10fb
883 +
884 + #define USB_VENDOR_ID_SKYCABLE 0x1223
885 + #define USB_DEVICE_ID_SKYCABLE_WIRELESS_PRESENTER 0x3F07
886 +diff --git a/drivers/hid/i2c-hid/i2c-hid.c b/drivers/hid/i2c-hid/i2c-hid.c
887 +index 37013b58098c..d17cf6e323b2 100644
888 +--- a/drivers/hid/i2c-hid/i2c-hid.c
889 ++++ b/drivers/hid/i2c-hid/i2c-hid.c
890 +@@ -47,8 +47,7 @@
891 + /* quirks to control the device */
892 + #define I2C_HID_QUIRK_SET_PWR_WAKEUP_DEV BIT(0)
893 + #define I2C_HID_QUIRK_NO_IRQ_AFTER_RESET BIT(1)
894 +-#define I2C_HID_QUIRK_RESEND_REPORT_DESCR BIT(2)
895 +-#define I2C_HID_QUIRK_NO_RUNTIME_PM BIT(3)
896 ++#define I2C_HID_QUIRK_NO_RUNTIME_PM BIT(2)
897 +
898 + /* flags */
899 + #define I2C_HID_STARTED 0
900 +@@ -172,8 +171,6 @@ static const struct i2c_hid_quirks {
901 + { I2C_VENDOR_ID_HANTICK, I2C_PRODUCT_ID_HANTICK_5288,
902 + I2C_HID_QUIRK_NO_IRQ_AFTER_RESET |
903 + I2C_HID_QUIRK_NO_RUNTIME_PM },
904 +- { USB_VENDOR_ID_SIS_TOUCH, USB_DEVICE_ID_SIS10FB_TOUCH,
905 +- I2C_HID_QUIRK_RESEND_REPORT_DESCR },
906 + { 0, 0 }
907 + };
908 +
909 +@@ -1241,22 +1238,13 @@ static int i2c_hid_resume(struct device *dev)
910 +
911 + /* Instead of resetting device, simply powers the device on. This
912 + * solves "incomplete reports" on Raydium devices 2386:3118 and
913 +- * 2386:4B33
914 ++ * 2386:4B33 and fixes various SIS touchscreens no longer sending
915 ++ * data after a suspend/resume.
916 + */
917 + ret = i2c_hid_set_power(client, I2C_HID_PWR_ON);
918 + if (ret)
919 + return ret;
920 +
921 +- /* Some devices need to re-send report descr cmd
922 +- * after resume, after this it will be back normal.
923 +- * otherwise it issues too many incomplete reports.
924 +- */
925 +- if (ihid->quirks & I2C_HID_QUIRK_RESEND_REPORT_DESCR) {
926 +- ret = i2c_hid_command(client, &hid_report_descr_cmd, NULL, 0);
927 +- if (ret)
928 +- return ret;
929 +- }
930 +-
931 + if (hid->driver && hid->driver->reset_resume) {
932 + ret = hid->driver->reset_resume(hid);
933 + return ret;
934 +diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c
935 +index 308456d28afb..73339fd47dd8 100644
936 +--- a/drivers/infiniband/hw/mlx5/mr.c
937 ++++ b/drivers/infiniband/hw/mlx5/mr.c
938 +@@ -544,6 +544,9 @@ void mlx5_mr_cache_free(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
939 + int shrink = 0;
940 + int c;
941 +
942 ++ if (!mr->allocated_from_cache)
943 ++ return;
944 ++
945 + c = order2idx(dev, mr->order);
946 + if (c < 0 || c >= MAX_MR_CACHE_ENTRIES) {
947 + mlx5_ib_warn(dev, "order %d, cache index %d\n", mr->order, c);
948 +@@ -1647,18 +1650,19 @@ static void dereg_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
949 + umem = NULL;
950 + }
951 + #endif
952 +-
953 + clean_mr(dev, mr);
954 +
955 ++ /*
956 ++ * We should unregister the DMA address from the HCA before
957 ++ * remove the DMA mapping.
958 ++ */
959 ++ mlx5_mr_cache_free(dev, mr);
960 + if (umem) {
961 + ib_umem_release(umem);
962 + atomic_sub(npages, &dev->mdev->priv.reg_pages);
963 + }
964 +-
965 + if (!mr->allocated_from_cache)
966 + kfree(mr);
967 +- else
968 +- mlx5_mr_cache_free(dev, mr);
969 + }
970 +
971 + int mlx5_ib_dereg_mr(struct ib_mr *ibmr)
972 +diff --git a/drivers/net/bonding/bond_netlink.c b/drivers/net/bonding/bond_netlink.c
973 +index 9697977b80f0..6b9ad8673218 100644
974 +--- a/drivers/net/bonding/bond_netlink.c
975 ++++ b/drivers/net/bonding/bond_netlink.c
976 +@@ -638,8 +638,7 @@ static int bond_fill_info(struct sk_buff *skb,
977 + goto nla_put_failure;
978 +
979 + if (nla_put(skb, IFLA_BOND_AD_ACTOR_SYSTEM,
980 +- sizeof(bond->params.ad_actor_system),
981 +- &bond->params.ad_actor_system))
982 ++ ETH_ALEN, &bond->params.ad_actor_system))
983 + goto nla_put_failure;
984 + }
985 + if (!bond_3ad_get_active_agg_info(bond, &info)) {
986 +diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c
987 +index 1b01cd2820ba..000f0d42a710 100644
988 +--- a/drivers/net/ethernet/amazon/ena/ena_netdev.c
989 ++++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c
990 +@@ -1580,8 +1580,6 @@ static int ena_up_complete(struct ena_adapter *adapter)
991 + if (rc)
992 + return rc;
993 +
994 +- ena_init_napi(adapter);
995 +-
996 + ena_change_mtu(adapter->netdev, adapter->netdev->mtu);
997 +
998 + ena_refill_all_rx_bufs(adapter);
999 +@@ -1735,6 +1733,13 @@ static int ena_up(struct ena_adapter *adapter)
1000 +
1001 + ena_setup_io_intr(adapter);
1002 +
1003 ++ /* napi poll functions should be initialized before running
1004 ++ * request_irq(), to handle a rare condition where there is a pending
1005 ++ * interrupt, causing the ISR to fire immediately while the poll
1006 ++ * function wasn't set yet, causing a null dereference
1007 ++ */
1008 ++ ena_init_napi(adapter);
1009 ++
1010 + rc = ena_request_io_irq(adapter);
1011 + if (rc)
1012 + goto err_req_irq;
1013 +@@ -2648,7 +2653,11 @@ err_disable_msix:
1014 + ena_free_mgmnt_irq(adapter);
1015 + ena_disable_msix(adapter);
1016 + err_device_destroy:
1017 ++ ena_com_abort_admin_commands(ena_dev);
1018 ++ ena_com_wait_for_abort_completion(ena_dev);
1019 + ena_com_admin_destroy(ena_dev);
1020 ++ ena_com_mmio_reg_read_request_destroy(ena_dev);
1021 ++ ena_com_dev_reset(ena_dev, ENA_REGS_RESET_DRIVER_INVALID_STATE);
1022 + err:
1023 + clear_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags);
1024 + clear_bit(ENA_FLAG_ONGOING_RESET, &adapter->flags);
1025 +@@ -3128,15 +3137,8 @@ err_rss_init:
1026 +
1027 + static void ena_release_bars(struct ena_com_dev *ena_dev, struct pci_dev *pdev)
1028 + {
1029 +- int release_bars;
1030 +-
1031 +- if (ena_dev->mem_bar)
1032 +- devm_iounmap(&pdev->dev, ena_dev->mem_bar);
1033 +-
1034 +- if (ena_dev->reg_bar)
1035 +- devm_iounmap(&pdev->dev, ena_dev->reg_bar);
1036 ++ int release_bars = pci_select_bars(pdev, IORESOURCE_MEM) & ENA_BAR_MASK;
1037 +
1038 +- release_bars = pci_select_bars(pdev, IORESOURCE_MEM) & ENA_BAR_MASK;
1039 + pci_release_selected_regions(pdev, release_bars);
1040 + }
1041 +
1042 +diff --git a/drivers/net/ethernet/amd/declance.c b/drivers/net/ethernet/amd/declance.c
1043 +index 116997a8b593..00332a1ea84b 100644
1044 +--- a/drivers/net/ethernet/amd/declance.c
1045 ++++ b/drivers/net/ethernet/amd/declance.c
1046 +@@ -1031,6 +1031,7 @@ static int dec_lance_probe(struct device *bdev, const int type)
1047 + int i, ret;
1048 + unsigned long esar_base;
1049 + unsigned char *esar;
1050 ++ const char *desc;
1051 +
1052 + if (dec_lance_debug && version_printed++ == 0)
1053 + printk(version);
1054 +@@ -1216,19 +1217,20 @@ static int dec_lance_probe(struct device *bdev, const int type)
1055 + */
1056 + switch (type) {
1057 + case ASIC_LANCE:
1058 +- printk("%s: IOASIC onboard LANCE", name);
1059 ++ desc = "IOASIC onboard LANCE";
1060 + break;
1061 + case PMAD_LANCE:
1062 +- printk("%s: PMAD-AA", name);
1063 ++ desc = "PMAD-AA";
1064 + break;
1065 + case PMAX_LANCE:
1066 +- printk("%s: PMAX onboard LANCE", name);
1067 ++ desc = "PMAX onboard LANCE";
1068 + break;
1069 + }
1070 + for (i = 0; i < 6; i++)
1071 + dev->dev_addr[i] = esar[i * 4];
1072 +
1073 +- printk(", addr = %pM, irq = %d\n", dev->dev_addr, dev->irq);
1074 ++ printk("%s: %s, addr = %pM, irq = %d\n",
1075 ++ name, desc, dev->dev_addr, dev->irq);
1076 +
1077 + dev->netdev_ops = &lance_netdev_ops;
1078 + dev->watchdog_timeo = 5*HZ;
1079 +diff --git a/drivers/net/ethernet/broadcom/genet/bcmmii.c b/drivers/net/ethernet/broadcom/genet/bcmmii.c
1080 +index 4241ae928d4a..34af5f1569c8 100644
1081 +--- a/drivers/net/ethernet/broadcom/genet/bcmmii.c
1082 ++++ b/drivers/net/ethernet/broadcom/genet/bcmmii.c
1083 +@@ -321,9 +321,12 @@ int bcmgenet_mii_probe(struct net_device *dev)
1084 + phydev->advertising = phydev->supported;
1085 +
1086 + /* The internal PHY has its link interrupts routed to the
1087 +- * Ethernet MAC ISRs
1088 ++ * Ethernet MAC ISRs. On GENETv5 there is a hardware issue
1089 ++ * that prevents the signaling of link UP interrupts when
1090 ++ * the link operates at 10Mbps, so fallback to polling for
1091 ++ * those versions of GENET.
1092 + */
1093 +- if (priv->internal_phy)
1094 ++ if (priv->internal_phy && !GENET_IS_V5(priv))
1095 + dev->phydev->irq = PHY_IGNORE_INTERRUPT;
1096 +
1097 + return 0;
1098 +diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c
1099 +index dfa045f22ef1..db568232ff3e 100644
1100 +--- a/drivers/net/ethernet/cadence/macb_main.c
1101 ++++ b/drivers/net/ethernet/cadence/macb_main.c
1102 +@@ -2089,6 +2089,7 @@ static void macb_configure_dma(struct macb *bp)
1103 + else
1104 + dmacfg &= ~GEM_BIT(TXCOEN);
1105 +
1106 ++ dmacfg &= ~GEM_BIT(ADDR64);
1107 + #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
1108 + if (bp->hw_dma_cap & HW_DMA_CAP_64B)
1109 + dmacfg |= GEM_BIT(ADDR64);
1110 +diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
1111 +index a19172dbe6be..c34ea385fe4a 100644
1112 +--- a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
1113 ++++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
1114 +@@ -2159,6 +2159,8 @@ static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
1115 + return -EPERM;
1116 + if (copy_from_user(&t, useraddr, sizeof(t)))
1117 + return -EFAULT;
1118 ++ if (t.cmd != CHELSIO_SET_QSET_PARAMS)
1119 ++ return -EINVAL;
1120 + if (t.qset_idx >= SGE_QSETS)
1121 + return -EINVAL;
1122 + if (!in_range(t.intr_lat, 0, M_NEWTIMER) ||
1123 +@@ -2258,6 +2260,9 @@ static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
1124 + if (copy_from_user(&t, useraddr, sizeof(t)))
1125 + return -EFAULT;
1126 +
1127 ++ if (t.cmd != CHELSIO_GET_QSET_PARAMS)
1128 ++ return -EINVAL;
1129 ++
1130 + /* Display qsets for all ports when offload enabled */
1131 + if (test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map)) {
1132 + q1 = 0;
1133 +@@ -2303,6 +2308,8 @@ static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
1134 + return -EBUSY;
1135 + if (copy_from_user(&edata, useraddr, sizeof(edata)))
1136 + return -EFAULT;
1137 ++ if (edata.cmd != CHELSIO_SET_QSET_NUM)
1138 ++ return -EINVAL;
1139 + if (edata.val < 1 ||
1140 + (edata.val > 1 && !(adapter->flags & USING_MSIX)))
1141 + return -EINVAL;
1142 +@@ -2343,6 +2350,8 @@ static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
1143 + return -EPERM;
1144 + if (copy_from_user(&t, useraddr, sizeof(t)))
1145 + return -EFAULT;
1146 ++ if (t.cmd != CHELSIO_LOAD_FW)
1147 ++ return -EINVAL;
1148 + /* Check t.len sanity ? */
1149 + fw_data = memdup_user(useraddr + sizeof(t), t.len);
1150 + if (IS_ERR(fw_data))
1151 +@@ -2366,6 +2375,8 @@ static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
1152 + return -EBUSY;
1153 + if (copy_from_user(&m, useraddr, sizeof(m)))
1154 + return -EFAULT;
1155 ++ if (m.cmd != CHELSIO_SETMTUTAB)
1156 ++ return -EINVAL;
1157 + if (m.nmtus != NMTUS)
1158 + return -EINVAL;
1159 + if (m.mtus[0] < 81) /* accommodate SACK */
1160 +@@ -2407,6 +2418,8 @@ static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
1161 + return -EBUSY;
1162 + if (copy_from_user(&m, useraddr, sizeof(m)))
1163 + return -EFAULT;
1164 ++ if (m.cmd != CHELSIO_SET_PM)
1165 ++ return -EINVAL;
1166 + if (!is_power_of_2(m.rx_pg_sz) ||
1167 + !is_power_of_2(m.tx_pg_sz))
1168 + return -EINVAL; /* not power of 2 */
1169 +@@ -2440,6 +2453,8 @@ static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
1170 + return -EIO; /* need the memory controllers */
1171 + if (copy_from_user(&t, useraddr, sizeof(t)))
1172 + return -EFAULT;
1173 ++ if (t.cmd != CHELSIO_GET_MEM)
1174 ++ return -EINVAL;
1175 + if ((t.addr & 7) || (t.len & 7))
1176 + return -EINVAL;
1177 + if (t.mem_id == MEM_CM)
1178 +@@ -2492,6 +2507,8 @@ static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
1179 + return -EAGAIN;
1180 + if (copy_from_user(&t, useraddr, sizeof(t)))
1181 + return -EFAULT;
1182 ++ if (t.cmd != CHELSIO_SET_TRACE_FILTER)
1183 ++ return -EINVAL;
1184 +
1185 + tp = (const struct trace_params *)&t.sip;
1186 + if (t.config_tx)
1187 +diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
1188 +index 8f755009ff38..c8445a4135a9 100644
1189 +--- a/drivers/net/ethernet/emulex/benet/be_main.c
1190 ++++ b/drivers/net/ethernet/emulex/benet/be_main.c
1191 +@@ -3915,8 +3915,6 @@ static int be_enable_vxlan_offloads(struct be_adapter *adapter)
1192 + netdev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
1193 + NETIF_F_TSO | NETIF_F_TSO6 |
1194 + NETIF_F_GSO_UDP_TUNNEL;
1195 +- netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL;
1196 +- netdev->features |= NETIF_F_GSO_UDP_TUNNEL;
1197 +
1198 + dev_info(dev, "Enabled VxLAN offloads for UDP port %d\n",
1199 + be16_to_cpu(port));
1200 +@@ -3938,8 +3936,6 @@ static void be_disable_vxlan_offloads(struct be_adapter *adapter)
1201 + adapter->vxlan_port = 0;
1202 +
1203 + netdev->hw_enc_features = 0;
1204 +- netdev->hw_features &= ~(NETIF_F_GSO_UDP_TUNNEL);
1205 +- netdev->features &= ~(NETIF_F_GSO_UDP_TUNNEL);
1206 + }
1207 +
1208 + static void be_calculate_vf_res(struct be_adapter *adapter, u16 num_vfs,
1209 +@@ -5232,6 +5228,7 @@ static void be_netdev_init(struct net_device *netdev)
1210 + struct be_adapter *adapter = netdev_priv(netdev);
1211 +
1212 + netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
1213 ++ NETIF_F_GSO_UDP_TUNNEL |
1214 + NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
1215 + NETIF_F_HW_VLAN_CTAG_TX;
1216 + if ((be_if_cap_flags(adapter) & BE_IF_FLAGS_RSS))
1217 +diff --git a/drivers/net/ethernet/freescale/fec.h b/drivers/net/ethernet/freescale/fec.h
1218 +index 4778b663653e..bf80855dd0dd 100644
1219 +--- a/drivers/net/ethernet/freescale/fec.h
1220 ++++ b/drivers/net/ethernet/freescale/fec.h
1221 +@@ -452,6 +452,10 @@ struct bufdesc_ex {
1222 + * initialisation.
1223 + */
1224 + #define FEC_QUIRK_MIB_CLEAR (1 << 15)
1225 ++/* Only i.MX25/i.MX27/i.MX28 controller supports FRBR,FRSR registers,
1226 ++ * those FIFO receive registers are resolved in other platforms.
1227 ++ */
1228 ++#define FEC_QUIRK_HAS_FRREG (1 << 16)
1229 +
1230 + struct bufdesc_prop {
1231 + int qid;
1232 +diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
1233 +index c729665107f5..11f90bb2d2a9 100644
1234 +--- a/drivers/net/ethernet/freescale/fec_main.c
1235 ++++ b/drivers/net/ethernet/freescale/fec_main.c
1236 +@@ -90,14 +90,16 @@ static struct platform_device_id fec_devtype[] = {
1237 + .driver_data = 0,
1238 + }, {
1239 + .name = "imx25-fec",
1240 +- .driver_data = FEC_QUIRK_USE_GASKET | FEC_QUIRK_MIB_CLEAR,
1241 ++ .driver_data = FEC_QUIRK_USE_GASKET | FEC_QUIRK_MIB_CLEAR |
1242 ++ FEC_QUIRK_HAS_FRREG,
1243 + }, {
1244 + .name = "imx27-fec",
1245 +- .driver_data = FEC_QUIRK_MIB_CLEAR,
1246 ++ .driver_data = FEC_QUIRK_MIB_CLEAR | FEC_QUIRK_HAS_FRREG,
1247 + }, {
1248 + .name = "imx28-fec",
1249 + .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_SWAP_FRAME |
1250 +- FEC_QUIRK_SINGLE_MDIO | FEC_QUIRK_HAS_RACC,
1251 ++ FEC_QUIRK_SINGLE_MDIO | FEC_QUIRK_HAS_RACC |
1252 ++ FEC_QUIRK_HAS_FRREG,
1253 + }, {
1254 + .name = "imx6q-fec",
1255 + .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT |
1256 +@@ -1157,7 +1159,7 @@ static void fec_enet_timeout_work(struct work_struct *work)
1257 + napi_disable(&fep->napi);
1258 + netif_tx_lock_bh(ndev);
1259 + fec_restart(ndev);
1260 +- netif_wake_queue(ndev);
1261 ++ netif_tx_wake_all_queues(ndev);
1262 + netif_tx_unlock_bh(ndev);
1263 + napi_enable(&fep->napi);
1264 + }
1265 +@@ -1272,7 +1274,7 @@ skb_done:
1266 +
1267 + /* Since we have freed up a buffer, the ring is no longer full
1268 + */
1269 +- if (netif_queue_stopped(ndev)) {
1270 ++ if (netif_tx_queue_stopped(nq)) {
1271 + entries_free = fec_enet_get_free_txdesc_num(txq);
1272 + if (entries_free >= txq->tx_wake_threshold)
1273 + netif_tx_wake_queue(nq);
1274 +@@ -1745,7 +1747,7 @@ static void fec_enet_adjust_link(struct net_device *ndev)
1275 + napi_disable(&fep->napi);
1276 + netif_tx_lock_bh(ndev);
1277 + fec_restart(ndev);
1278 +- netif_wake_queue(ndev);
1279 ++ netif_tx_wake_all_queues(ndev);
1280 + netif_tx_unlock_bh(ndev);
1281 + napi_enable(&fep->napi);
1282 + }
1283 +@@ -2163,7 +2165,13 @@ static void fec_enet_get_regs(struct net_device *ndev,
1284 + memset(buf, 0, regs->len);
1285 +
1286 + for (i = 0; i < ARRAY_SIZE(fec_enet_register_offset); i++) {
1287 +- off = fec_enet_register_offset[i] / 4;
1288 ++ off = fec_enet_register_offset[i];
1289 ++
1290 ++ if ((off == FEC_R_BOUND || off == FEC_R_FSTART) &&
1291 ++ !(fep->quirks & FEC_QUIRK_HAS_FRREG))
1292 ++ continue;
1293 ++
1294 ++ off >>= 2;
1295 + buf[off] = readl(&theregs[off]);
1296 + }
1297 + }
1298 +@@ -2246,7 +2254,7 @@ static int fec_enet_set_pauseparam(struct net_device *ndev,
1299 + napi_disable(&fep->napi);
1300 + netif_tx_lock_bh(ndev);
1301 + fec_restart(ndev);
1302 +- netif_wake_queue(ndev);
1303 ++ netif_tx_wake_all_queues(ndev);
1304 + netif_tx_unlock_bh(ndev);
1305 + napi_enable(&fep->napi);
1306 + }
1307 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
1308 +index d3a1dd20e41d..fb6c72cf70a0 100644
1309 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
1310 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
1311 +@@ -429,10 +429,9 @@ static inline u16 mlx5e_icosq_wrap_cnt(struct mlx5e_icosq *sq)
1312 +
1313 + static inline void mlx5e_fill_icosq_frag_edge(struct mlx5e_icosq *sq,
1314 + struct mlx5_wq_cyc *wq,
1315 +- u16 pi, u16 frag_pi)
1316 ++ u16 pi, u16 nnops)
1317 + {
1318 + struct mlx5e_sq_wqe_info *edge_wi, *wi = &sq->db.ico_wqe[pi];
1319 +- u8 nnops = mlx5_wq_cyc_get_frag_size(wq) - frag_pi;
1320 +
1321 + edge_wi = wi + nnops;
1322 +
1323 +@@ -451,15 +450,14 @@ static int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix)
1324 + struct mlx5_wq_cyc *wq = &sq->wq;
1325 + struct mlx5e_umr_wqe *umr_wqe;
1326 + u16 xlt_offset = ix << (MLX5E_LOG_ALIGNED_MPWQE_PPW - 1);
1327 +- u16 pi, frag_pi;
1328 ++ u16 pi, contig_wqebbs_room;
1329 + int err;
1330 + int i;
1331 +
1332 + pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
1333 +- frag_pi = mlx5_wq_cyc_ctr2fragix(wq, sq->pc);
1334 +-
1335 +- if (unlikely(frag_pi + MLX5E_UMR_WQEBBS > mlx5_wq_cyc_get_frag_size(wq))) {
1336 +- mlx5e_fill_icosq_frag_edge(sq, wq, pi, frag_pi);
1337 ++ contig_wqebbs_room = mlx5_wq_cyc_get_contig_wqebbs(wq, pi);
1338 ++ if (unlikely(contig_wqebbs_room < MLX5E_UMR_WQEBBS)) {
1339 ++ mlx5e_fill_icosq_frag_edge(sq, wq, pi, contig_wqebbs_room);
1340 + pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
1341 + }
1342 +
1343 +@@ -693,43 +691,15 @@ static inline bool is_last_ethertype_ip(struct sk_buff *skb, int *network_depth)
1344 + return (ethertype == htons(ETH_P_IP) || ethertype == htons(ETH_P_IPV6));
1345 + }
1346 +
1347 +-static __be32 mlx5e_get_fcs(struct sk_buff *skb)
1348 ++static u32 mlx5e_get_fcs(const struct sk_buff *skb)
1349 + {
1350 +- int last_frag_sz, bytes_in_prev, nr_frags;
1351 +- u8 *fcs_p1, *fcs_p2;
1352 +- skb_frag_t *last_frag;
1353 +- __be32 fcs_bytes;
1354 +-
1355 +- if (!skb_is_nonlinear(skb))
1356 +- return *(__be32 *)(skb->data + skb->len - ETH_FCS_LEN);
1357 +-
1358 +- nr_frags = skb_shinfo(skb)->nr_frags;
1359 +- last_frag = &skb_shinfo(skb)->frags[nr_frags - 1];
1360 +- last_frag_sz = skb_frag_size(last_frag);
1361 +-
1362 +- /* If all FCS data is in last frag */
1363 +- if (last_frag_sz >= ETH_FCS_LEN)
1364 +- return *(__be32 *)(skb_frag_address(last_frag) +
1365 +- last_frag_sz - ETH_FCS_LEN);
1366 +-
1367 +- fcs_p2 = (u8 *)skb_frag_address(last_frag);
1368 +- bytes_in_prev = ETH_FCS_LEN - last_frag_sz;
1369 +-
1370 +- /* Find where the other part of the FCS is - Linear or another frag */
1371 +- if (nr_frags == 1) {
1372 +- fcs_p1 = skb_tail_pointer(skb);
1373 +- } else {
1374 +- skb_frag_t *prev_frag = &skb_shinfo(skb)->frags[nr_frags - 2];
1375 +-
1376 +- fcs_p1 = skb_frag_address(prev_frag) +
1377 +- skb_frag_size(prev_frag);
1378 +- }
1379 +- fcs_p1 -= bytes_in_prev;
1380 ++ const void *fcs_bytes;
1381 ++ u32 _fcs_bytes;
1382 +
1383 +- memcpy(&fcs_bytes, fcs_p1, bytes_in_prev);
1384 +- memcpy(((u8 *)&fcs_bytes) + bytes_in_prev, fcs_p2, last_frag_sz);
1385 ++ fcs_bytes = skb_header_pointer(skb, skb->len - ETH_FCS_LEN,
1386 ++ ETH_FCS_LEN, &_fcs_bytes);
1387 +
1388 +- return fcs_bytes;
1389 ++ return __get_unaligned_cpu32(fcs_bytes);
1390 + }
1391 +
1392 + static inline void mlx5e_handle_csum(struct net_device *netdev,
1393 +@@ -762,8 +732,9 @@ static inline void mlx5e_handle_csum(struct net_device *netdev,
1394 + network_depth - ETH_HLEN,
1395 + skb->csum);
1396 + if (unlikely(netdev->features & NETIF_F_RXFCS))
1397 +- skb->csum = csum_add(skb->csum,
1398 +- (__force __wsum)mlx5e_get_fcs(skb));
1399 ++ skb->csum = csum_block_add(skb->csum,
1400 ++ (__force __wsum)mlx5e_get_fcs(skb),
1401 ++ skb->len - ETH_FCS_LEN);
1402 + stats->csum_complete++;
1403 + return;
1404 + }
1405 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
1406 +index f29deb44bf3b..1e774d979c85 100644
1407 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
1408 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
1409 +@@ -287,10 +287,9 @@ dma_unmap_wqe_err:
1410 +
1411 + static inline void mlx5e_fill_sq_frag_edge(struct mlx5e_txqsq *sq,
1412 + struct mlx5_wq_cyc *wq,
1413 +- u16 pi, u16 frag_pi)
1414 ++ u16 pi, u16 nnops)
1415 + {
1416 + struct mlx5e_tx_wqe_info *edge_wi, *wi = &sq->db.wqe_info[pi];
1417 +- u8 nnops = mlx5_wq_cyc_get_frag_size(wq) - frag_pi;
1418 +
1419 + edge_wi = wi + nnops;
1420 +
1421 +@@ -345,8 +344,8 @@ netdev_tx_t mlx5e_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
1422 + struct mlx5e_tx_wqe_info *wi;
1423 +
1424 + struct mlx5e_sq_stats *stats = sq->stats;
1425 ++ u16 headlen, ihs, contig_wqebbs_room;
1426 + u16 ds_cnt, ds_cnt_inl = 0;
1427 +- u16 headlen, ihs, frag_pi;
1428 + u8 num_wqebbs, opcode;
1429 + u32 num_bytes;
1430 + int num_dma;
1431 +@@ -383,9 +382,9 @@ netdev_tx_t mlx5e_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
1432 + }
1433 +
1434 + num_wqebbs = DIV_ROUND_UP(ds_cnt, MLX5_SEND_WQEBB_NUM_DS);
1435 +- frag_pi = mlx5_wq_cyc_ctr2fragix(wq, sq->pc);
1436 +- if (unlikely(frag_pi + num_wqebbs > mlx5_wq_cyc_get_frag_size(wq))) {
1437 +- mlx5e_fill_sq_frag_edge(sq, wq, pi, frag_pi);
1438 ++ contig_wqebbs_room = mlx5_wq_cyc_get_contig_wqebbs(wq, pi);
1439 ++ if (unlikely(contig_wqebbs_room < num_wqebbs)) {
1440 ++ mlx5e_fill_sq_frag_edge(sq, wq, pi, contig_wqebbs_room);
1441 + mlx5e_sq_fetch_wqe(sq, &wqe, &pi);
1442 + }
1443 +
1444 +@@ -629,7 +628,7 @@ netdev_tx_t mlx5i_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
1445 + struct mlx5e_tx_wqe_info *wi;
1446 +
1447 + struct mlx5e_sq_stats *stats = sq->stats;
1448 +- u16 headlen, ihs, pi, frag_pi;
1449 ++ u16 headlen, ihs, pi, contig_wqebbs_room;
1450 + u16 ds_cnt, ds_cnt_inl = 0;
1451 + u8 num_wqebbs, opcode;
1452 + u32 num_bytes;
1453 +@@ -665,13 +664,14 @@ netdev_tx_t mlx5i_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
1454 + }
1455 +
1456 + num_wqebbs = DIV_ROUND_UP(ds_cnt, MLX5_SEND_WQEBB_NUM_DS);
1457 +- frag_pi = mlx5_wq_cyc_ctr2fragix(wq, sq->pc);
1458 +- if (unlikely(frag_pi + num_wqebbs > mlx5_wq_cyc_get_frag_size(wq))) {
1459 ++ pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
1460 ++ contig_wqebbs_room = mlx5_wq_cyc_get_contig_wqebbs(wq, pi);
1461 ++ if (unlikely(contig_wqebbs_room < num_wqebbs)) {
1462 ++ mlx5e_fill_sq_frag_edge(sq, wq, pi, contig_wqebbs_room);
1463 + pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
1464 +- mlx5e_fill_sq_frag_edge(sq, wq, pi, frag_pi);
1465 + }
1466 +
1467 +- mlx5i_sq_fetch_wqe(sq, &wqe, &pi);
1468 ++ mlx5i_sq_fetch_wqe(sq, &wqe, pi);
1469 +
1470 + /* fill wqe */
1471 + wi = &sq->db.wqe_info[pi];
1472 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
1473 +index 406c23862f5f..01ccc8201052 100644
1474 +--- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c
1475 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
1476 +@@ -269,7 +269,7 @@ static void eq_pf_process(struct mlx5_eq *eq)
1477 + case MLX5_PFAULT_SUBTYPE_WQE:
1478 + /* WQE based event */
1479 + pfault->type =
1480 +- be32_to_cpu(pf_eqe->wqe.pftype_wq) >> 24;
1481 ++ (be32_to_cpu(pf_eqe->wqe.pftype_wq) >> 24) & 0x7;
1482 + pfault->token =
1483 + be32_to_cpu(pf_eqe->wqe.token);
1484 + pfault->wqe.wq_num =
1485 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c b/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c
1486 +index 5645a4facad2..b8ee9101c506 100644
1487 +--- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c
1488 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c
1489 +@@ -245,7 +245,7 @@ static void *mlx5_fpga_ipsec_cmd_exec(struct mlx5_core_dev *mdev,
1490 + return ERR_PTR(res);
1491 + }
1492 +
1493 +- /* Context will be freed by wait func after completion */
1494 ++ /* Context should be freed by the caller after completion. */
1495 + return context;
1496 + }
1497 +
1498 +@@ -418,10 +418,8 @@ static int mlx5_fpga_ipsec_set_caps(struct mlx5_core_dev *mdev, u32 flags)
1499 + cmd.cmd = htonl(MLX5_FPGA_IPSEC_CMD_OP_SET_CAP);
1500 + cmd.flags = htonl(flags);
1501 + context = mlx5_fpga_ipsec_cmd_exec(mdev, &cmd, sizeof(cmd));
1502 +- if (IS_ERR(context)) {
1503 +- err = PTR_ERR(context);
1504 +- goto out;
1505 +- }
1506 ++ if (IS_ERR(context))
1507 ++ return PTR_ERR(context);
1508 +
1509 + err = mlx5_fpga_ipsec_cmd_wait(context);
1510 + if (err)
1511 +@@ -435,6 +433,7 @@ static int mlx5_fpga_ipsec_set_caps(struct mlx5_core_dev *mdev, u32 flags)
1512 + }
1513 +
1514 + out:
1515 ++ kfree(context);
1516 + return err;
1517 + }
1518 +
1519 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h
1520 +index 08eac92fc26c..0982c579ec74 100644
1521 +--- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h
1522 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h
1523 +@@ -109,12 +109,11 @@ struct mlx5i_tx_wqe {
1524 +
1525 + static inline void mlx5i_sq_fetch_wqe(struct mlx5e_txqsq *sq,
1526 + struct mlx5i_tx_wqe **wqe,
1527 +- u16 *pi)
1528 ++ u16 pi)
1529 + {
1530 + struct mlx5_wq_cyc *wq = &sq->wq;
1531 +
1532 +- *pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
1533 +- *wqe = mlx5_wq_cyc_get_wqe(wq, *pi);
1534 ++ *wqe = mlx5_wq_cyc_get_wqe(wq, pi);
1535 + memset(*wqe, 0, sizeof(**wqe));
1536 + }
1537 +
1538 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/wq.c b/drivers/net/ethernet/mellanox/mlx5/core/wq.c
1539 +index d838af9539b1..9046475c531c 100644
1540 +--- a/drivers/net/ethernet/mellanox/mlx5/core/wq.c
1541 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/wq.c
1542 +@@ -39,11 +39,6 @@ u32 mlx5_wq_cyc_get_size(struct mlx5_wq_cyc *wq)
1543 + return (u32)wq->fbc.sz_m1 + 1;
1544 + }
1545 +
1546 +-u16 mlx5_wq_cyc_get_frag_size(struct mlx5_wq_cyc *wq)
1547 +-{
1548 +- return wq->fbc.frag_sz_m1 + 1;
1549 +-}
1550 +-
1551 + u32 mlx5_cqwq_get_size(struct mlx5_cqwq *wq)
1552 + {
1553 + return wq->fbc.sz_m1 + 1;
1554 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/wq.h b/drivers/net/ethernet/mellanox/mlx5/core/wq.h
1555 +index 16476cc1a602..311256554520 100644
1556 +--- a/drivers/net/ethernet/mellanox/mlx5/core/wq.h
1557 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/wq.h
1558 +@@ -80,7 +80,6 @@ int mlx5_wq_cyc_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
1559 + void *wqc, struct mlx5_wq_cyc *wq,
1560 + struct mlx5_wq_ctrl *wq_ctrl);
1561 + u32 mlx5_wq_cyc_get_size(struct mlx5_wq_cyc *wq);
1562 +-u16 mlx5_wq_cyc_get_frag_size(struct mlx5_wq_cyc *wq);
1563 +
1564 + int mlx5_wq_qp_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
1565 + void *qpc, struct mlx5_wq_qp *wq,
1566 +@@ -140,11 +139,6 @@ static inline u16 mlx5_wq_cyc_ctr2ix(struct mlx5_wq_cyc *wq, u16 ctr)
1567 + return ctr & wq->fbc.sz_m1;
1568 + }
1569 +
1570 +-static inline u16 mlx5_wq_cyc_ctr2fragix(struct mlx5_wq_cyc *wq, u16 ctr)
1571 +-{
1572 +- return ctr & wq->fbc.frag_sz_m1;
1573 +-}
1574 +-
1575 + static inline u16 mlx5_wq_cyc_get_head(struct mlx5_wq_cyc *wq)
1576 + {
1577 + return mlx5_wq_cyc_ctr2ix(wq, wq->wqe_ctr);
1578 +@@ -160,6 +154,11 @@ static inline void *mlx5_wq_cyc_get_wqe(struct mlx5_wq_cyc *wq, u16 ix)
1579 + return mlx5_frag_buf_get_wqe(&wq->fbc, ix);
1580 + }
1581 +
1582 ++static inline u16 mlx5_wq_cyc_get_contig_wqebbs(struct mlx5_wq_cyc *wq, u16 ix)
1583 ++{
1584 ++ return mlx5_frag_buf_get_idx_last_contig_stride(&wq->fbc, ix) - ix + 1;
1585 ++}
1586 ++
1587 + static inline int mlx5_wq_cyc_cc_bigger(u16 cc1, u16 cc2)
1588 + {
1589 + int equal = (cc1 == cc2);
1590 +diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.c b/drivers/net/ethernet/mellanox/mlxsw/core.c
1591 +index f9c724752a32..13636a537f37 100644
1592 +--- a/drivers/net/ethernet/mellanox/mlxsw/core.c
1593 ++++ b/drivers/net/ethernet/mellanox/mlxsw/core.c
1594 +@@ -985,8 +985,8 @@ static int mlxsw_devlink_core_bus_device_reload(struct devlink *devlink,
1595 + mlxsw_core->bus,
1596 + mlxsw_core->bus_priv, true,
1597 + devlink);
1598 +- if (err)
1599 +- mlxsw_core->reload_fail = true;
1600 ++ mlxsw_core->reload_fail = !!err;
1601 ++
1602 + return err;
1603 + }
1604 +
1605 +@@ -1126,8 +1126,15 @@ void mlxsw_core_bus_device_unregister(struct mlxsw_core *mlxsw_core,
1606 + const char *device_kind = mlxsw_core->bus_info->device_kind;
1607 + struct devlink *devlink = priv_to_devlink(mlxsw_core);
1608 +
1609 +- if (mlxsw_core->reload_fail)
1610 +- goto reload_fail;
1611 ++ if (mlxsw_core->reload_fail) {
1612 ++ if (!reload)
1613 ++ /* Only the parts that were not de-initialized in the
1614 ++ * failed reload attempt need to be de-initialized.
1615 ++ */
1616 ++ goto reload_fail_deinit;
1617 ++ else
1618 ++ return;
1619 ++ }
1620 +
1621 + if (mlxsw_core->driver->fini)
1622 + mlxsw_core->driver->fini(mlxsw_core);
1623 +@@ -1140,9 +1147,12 @@ void mlxsw_core_bus_device_unregister(struct mlxsw_core *mlxsw_core,
1624 + if (!reload)
1625 + devlink_resources_unregister(devlink, NULL);
1626 + mlxsw_core->bus->fini(mlxsw_core->bus_priv);
1627 +- if (reload)
1628 +- return;
1629 +-reload_fail:
1630 ++
1631 ++ return;
1632 ++
1633 ++reload_fail_deinit:
1634 ++ devlink_unregister(devlink);
1635 ++ devlink_resources_unregister(devlink, NULL);
1636 + devlink_free(devlink);
1637 + mlxsw_core_driver_put(device_kind);
1638 + }
1639 +diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
1640 +index 6cb43dda8232..9883e48d8a21 100644
1641 +--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
1642 ++++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
1643 +@@ -2307,8 +2307,6 @@ static void mlxsw_sp_switchdev_event_work(struct work_struct *work)
1644 + break;
1645 + case SWITCHDEV_FDB_DEL_TO_DEVICE:
1646 + fdb_info = &switchdev_work->fdb_info;
1647 +- if (!fdb_info->added_by_user)
1648 +- break;
1649 + mlxsw_sp_port_fdb_set(mlxsw_sp_port, fdb_info, false);
1650 + break;
1651 + case SWITCHDEV_FDB_ADD_TO_BRIDGE: /* fall through */
1652 +diff --git a/drivers/net/ethernet/qlogic/qed/qed_iwarp.c b/drivers/net/ethernet/qlogic/qed/qed_iwarp.c
1653 +index 90a2b53096e2..51bbb0e5b514 100644
1654 +--- a/drivers/net/ethernet/qlogic/qed/qed_iwarp.c
1655 ++++ b/drivers/net/ethernet/qlogic/qed/qed_iwarp.c
1656 +@@ -1710,7 +1710,7 @@ qed_iwarp_parse_rx_pkt(struct qed_hwfn *p_hwfn,
1657 +
1658 + cm_info->local_ip[0] = ntohl(iph->daddr);
1659 + cm_info->remote_ip[0] = ntohl(iph->saddr);
1660 +- cm_info->ip_version = TCP_IPV4;
1661 ++ cm_info->ip_version = QED_TCP_IPV4;
1662 +
1663 + ip_hlen = (iph->ihl) * sizeof(u32);
1664 + *payload_len = ntohs(iph->tot_len) - ip_hlen;
1665 +@@ -1730,7 +1730,7 @@ qed_iwarp_parse_rx_pkt(struct qed_hwfn *p_hwfn,
1666 + cm_info->remote_ip[i] =
1667 + ntohl(ip6h->saddr.in6_u.u6_addr32[i]);
1668 + }
1669 +- cm_info->ip_version = TCP_IPV6;
1670 ++ cm_info->ip_version = QED_TCP_IPV6;
1671 +
1672 + ip_hlen = sizeof(*ip6h);
1673 + *payload_len = ntohs(ip6h->payload_len);
1674 +diff --git a/drivers/net/ethernet/qlogic/qed/qed_roce.c b/drivers/net/ethernet/qlogic/qed/qed_roce.c
1675 +index b5ce1581645f..79424e6f0976 100644
1676 +--- a/drivers/net/ethernet/qlogic/qed/qed_roce.c
1677 ++++ b/drivers/net/ethernet/qlogic/qed/qed_roce.c
1678 +@@ -138,23 +138,16 @@ static void qed_rdma_copy_gids(struct qed_rdma_qp *qp, __le32 *src_gid,
1679 +
1680 + static enum roce_flavor qed_roce_mode_to_flavor(enum roce_mode roce_mode)
1681 + {
1682 +- enum roce_flavor flavor;
1683 +-
1684 + switch (roce_mode) {
1685 + case ROCE_V1:
1686 +- flavor = PLAIN_ROCE;
1687 +- break;
1688 ++ return PLAIN_ROCE;
1689 + case ROCE_V2_IPV4:
1690 +- flavor = RROCE_IPV4;
1691 +- break;
1692 ++ return RROCE_IPV4;
1693 + case ROCE_V2_IPV6:
1694 +- flavor = ROCE_V2_IPV6;
1695 +- break;
1696 ++ return RROCE_IPV6;
1697 + default:
1698 +- flavor = MAX_ROCE_MODE;
1699 +- break;
1700 ++ return MAX_ROCE_FLAVOR;
1701 + }
1702 +- return flavor;
1703 + }
1704 +
1705 + void qed_roce_free_cid_pair(struct qed_hwfn *p_hwfn, u16 cid)
1706 +diff --git a/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c b/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c
1707 +index 8de644b4721e..77b6248ad3b9 100644
1708 +--- a/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c
1709 ++++ b/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c
1710 +@@ -154,7 +154,7 @@ qed_set_pf_update_tunn_mode(struct qed_tunnel_info *p_tun,
1711 + static void qed_set_tunn_cls_info(struct qed_tunnel_info *p_tun,
1712 + struct qed_tunnel_info *p_src)
1713 + {
1714 +- enum tunnel_clss type;
1715 ++ int type;
1716 +
1717 + p_tun->b_update_rx_cls = p_src->b_update_rx_cls;
1718 + p_tun->b_update_tx_cls = p_src->b_update_tx_cls;
1719 +diff --git a/drivers/net/ethernet/qlogic/qed/qed_vf.c b/drivers/net/ethernet/qlogic/qed/qed_vf.c
1720 +index be6ddde1a104..c4766e4ac485 100644
1721 +--- a/drivers/net/ethernet/qlogic/qed/qed_vf.c
1722 ++++ b/drivers/net/ethernet/qlogic/qed/qed_vf.c
1723 +@@ -413,7 +413,6 @@ static int qed_vf_pf_acquire(struct qed_hwfn *p_hwfn)
1724 + }
1725 +
1726 + if (!p_iov->b_pre_fp_hsi &&
1727 +- ETH_HSI_VER_MINOR &&
1728 + (resp->pfdev_info.minor_fp_hsi < ETH_HSI_VER_MINOR)) {
1729 + DP_INFO(p_hwfn,
1730 + "PF is using older fastpath HSI; %02x.%02x is configured\n",
1731 +@@ -572,7 +571,7 @@ free_p_iov:
1732 + static void
1733 + __qed_vf_prep_tunn_req_tlv(struct vfpf_update_tunn_param_tlv *p_req,
1734 + struct qed_tunn_update_type *p_src,
1735 +- enum qed_tunn_clss mask, u8 *p_cls)
1736 ++ enum qed_tunn_mode mask, u8 *p_cls)
1737 + {
1738 + if (p_src->b_update_mode) {
1739 + p_req->tun_mode_update_mask |= BIT(mask);
1740 +@@ -587,7 +586,7 @@ __qed_vf_prep_tunn_req_tlv(struct vfpf_update_tunn_param_tlv *p_req,
1741 + static void
1742 + qed_vf_prep_tunn_req_tlv(struct vfpf_update_tunn_param_tlv *p_req,
1743 + struct qed_tunn_update_type *p_src,
1744 +- enum qed_tunn_clss mask,
1745 ++ enum qed_tunn_mode mask,
1746 + u8 *p_cls, struct qed_tunn_update_udp_port *p_port,
1747 + u8 *p_update_port, u16 *p_udp_port)
1748 + {
1749 +diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
1750 +index 627c5cd8f786..f18087102d40 100644
1751 +--- a/drivers/net/ethernet/realtek/r8169.c
1752 ++++ b/drivers/net/ethernet/realtek/r8169.c
1753 +@@ -7044,17 +7044,15 @@ static int rtl8169_poll(struct napi_struct *napi, int budget)
1754 + struct rtl8169_private *tp = container_of(napi, struct rtl8169_private, napi);
1755 + struct net_device *dev = tp->dev;
1756 + u16 enable_mask = RTL_EVENT_NAPI | tp->event_slow;
1757 +- int work_done= 0;
1758 ++ int work_done;
1759 + u16 status;
1760 +
1761 + status = rtl_get_events(tp);
1762 + rtl_ack_events(tp, status & ~tp->event_slow);
1763 +
1764 +- if (status & RTL_EVENT_NAPI_RX)
1765 +- work_done = rtl_rx(dev, tp, (u32) budget);
1766 ++ work_done = rtl_rx(dev, tp, (u32) budget);
1767 +
1768 +- if (status & RTL_EVENT_NAPI_TX)
1769 +- rtl_tx(dev, tp);
1770 ++ rtl_tx(dev, tp);
1771 +
1772 + if (status & tp->event_slow) {
1773 + enable_mask &= ~tp->event_slow;
1774 +diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
1775 +index 5df1a608e566..541602d70c24 100644
1776 +--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
1777 ++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
1778 +@@ -133,7 +133,7 @@ static int stmmac_mdio_write(struct mii_bus *bus, int phyaddr, int phyreg,
1779 + */
1780 + int stmmac_mdio_reset(struct mii_bus *bus)
1781 + {
1782 +-#if defined(CONFIG_STMMAC_PLATFORM)
1783 ++#if IS_ENABLED(CONFIG_STMMAC_PLATFORM)
1784 + struct net_device *ndev = bus->priv;
1785 + struct stmmac_priv *priv = netdev_priv(ndev);
1786 + unsigned int mii_address = priv->hw->mii.addr;
1787 +diff --git a/drivers/net/hamradio/yam.c b/drivers/net/hamradio/yam.c
1788 +index 16ec7af6ab7b..ba9df430fca6 100644
1789 +--- a/drivers/net/hamradio/yam.c
1790 ++++ b/drivers/net/hamradio/yam.c
1791 +@@ -966,6 +966,8 @@ static int yam_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
1792 + sizeof(struct yamdrv_ioctl_mcs));
1793 + if (IS_ERR(ym))
1794 + return PTR_ERR(ym);
1795 ++ if (ym->cmd != SIOCYAMSMCS)
1796 ++ return -EINVAL;
1797 + if (ym->bitrate > YAM_MAXBITRATE) {
1798 + kfree(ym);
1799 + return -EINVAL;
1800 +@@ -981,6 +983,8 @@ static int yam_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
1801 + if (copy_from_user(&yi, ifr->ifr_data, sizeof(struct yamdrv_ioctl_cfg)))
1802 + return -EFAULT;
1803 +
1804 ++ if (yi.cmd != SIOCYAMSCFG)
1805 ++ return -EINVAL;
1806 + if ((yi.cfg.mask & YAM_IOBASE) && netif_running(dev))
1807 + return -EINVAL; /* Cannot change this parameter when up */
1808 + if ((yi.cfg.mask & YAM_IRQ) && netif_running(dev))
1809 +diff --git a/drivers/net/usb/asix_common.c b/drivers/net/usb/asix_common.c
1810 +index e95dd12edec4..023b8d0bf175 100644
1811 +--- a/drivers/net/usb/asix_common.c
1812 ++++ b/drivers/net/usb/asix_common.c
1813 +@@ -607,6 +607,9 @@ int asix_set_wol(struct net_device *net, struct ethtool_wolinfo *wolinfo)
1814 + struct usbnet *dev = netdev_priv(net);
1815 + u8 opt = 0;
1816 +
1817 ++ if (wolinfo->wolopts & ~(WAKE_PHY | WAKE_MAGIC))
1818 ++ return -EINVAL;
1819 ++
1820 + if (wolinfo->wolopts & WAKE_PHY)
1821 + opt |= AX_MONITOR_LINK;
1822 + if (wolinfo->wolopts & WAKE_MAGIC)
1823 +diff --git a/drivers/net/usb/ax88179_178a.c b/drivers/net/usb/ax88179_178a.c
1824 +index 9e8ad372f419..2207f7a7d1ff 100644
1825 +--- a/drivers/net/usb/ax88179_178a.c
1826 ++++ b/drivers/net/usb/ax88179_178a.c
1827 +@@ -566,6 +566,9 @@ ax88179_set_wol(struct net_device *net, struct ethtool_wolinfo *wolinfo)
1828 + struct usbnet *dev = netdev_priv(net);
1829 + u8 opt = 0;
1830 +
1831 ++ if (wolinfo->wolopts & ~(WAKE_PHY | WAKE_MAGIC))
1832 ++ return -EINVAL;
1833 ++
1834 + if (wolinfo->wolopts & WAKE_PHY)
1835 + opt |= AX_MONITOR_MODE_RWLC;
1836 + if (wolinfo->wolopts & WAKE_MAGIC)
1837 +diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c
1838 +index aeca484a75b8..2bb3a081ff10 100644
1839 +--- a/drivers/net/usb/lan78xx.c
1840 ++++ b/drivers/net/usb/lan78xx.c
1841 +@@ -1401,19 +1401,10 @@ static int lan78xx_set_wol(struct net_device *netdev,
1842 + if (ret < 0)
1843 + return ret;
1844 +
1845 +- pdata->wol = 0;
1846 +- if (wol->wolopts & WAKE_UCAST)
1847 +- pdata->wol |= WAKE_UCAST;
1848 +- if (wol->wolopts & WAKE_MCAST)
1849 +- pdata->wol |= WAKE_MCAST;
1850 +- if (wol->wolopts & WAKE_BCAST)
1851 +- pdata->wol |= WAKE_BCAST;
1852 +- if (wol->wolopts & WAKE_MAGIC)
1853 +- pdata->wol |= WAKE_MAGIC;
1854 +- if (wol->wolopts & WAKE_PHY)
1855 +- pdata->wol |= WAKE_PHY;
1856 +- if (wol->wolopts & WAKE_ARP)
1857 +- pdata->wol |= WAKE_ARP;
1858 ++ if (wol->wolopts & ~WAKE_ALL)
1859 ++ return -EINVAL;
1860 ++
1861 ++ pdata->wol = wol->wolopts;
1862 +
1863 + device_set_wakeup_enable(&dev->udev->dev, (bool)wol->wolopts);
1864 +
1865 +diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
1866 +index 1b07bb5e110d..9a55d75f7f10 100644
1867 +--- a/drivers/net/usb/r8152.c
1868 ++++ b/drivers/net/usb/r8152.c
1869 +@@ -4503,6 +4503,9 @@ static int rtl8152_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1870 + if (!rtl_can_wakeup(tp))
1871 + return -EOPNOTSUPP;
1872 +
1873 ++ if (wol->wolopts & ~WAKE_ANY)
1874 ++ return -EINVAL;
1875 ++
1876 + ret = usb_autopm_get_interface(tp->intf);
1877 + if (ret < 0)
1878 + goto out_set_wol;
1879 +diff --git a/drivers/net/usb/smsc75xx.c b/drivers/net/usb/smsc75xx.c
1880 +index b64b1ee56d2d..ec287c9741e8 100644
1881 +--- a/drivers/net/usb/smsc75xx.c
1882 ++++ b/drivers/net/usb/smsc75xx.c
1883 +@@ -731,6 +731,9 @@ static int smsc75xx_ethtool_set_wol(struct net_device *net,
1884 + struct smsc75xx_priv *pdata = (struct smsc75xx_priv *)(dev->data[0]);
1885 + int ret;
1886 +
1887 ++ if (wolinfo->wolopts & ~SUPPORTED_WAKE)
1888 ++ return -EINVAL;
1889 ++
1890 + pdata->wolopts = wolinfo->wolopts & SUPPORTED_WAKE;
1891 +
1892 + ret = device_set_wakeup_enable(&dev->udev->dev, pdata->wolopts);
1893 +diff --git a/drivers/net/usb/smsc95xx.c b/drivers/net/usb/smsc95xx.c
1894 +index 06b4d290784d..262e7a3c23cb 100644
1895 +--- a/drivers/net/usb/smsc95xx.c
1896 ++++ b/drivers/net/usb/smsc95xx.c
1897 +@@ -774,6 +774,9 @@ static int smsc95xx_ethtool_set_wol(struct net_device *net,
1898 + struct smsc95xx_priv *pdata = (struct smsc95xx_priv *)(dev->data[0]);
1899 + int ret;
1900 +
1901 ++ if (wolinfo->wolopts & ~SUPPORTED_WAKE)
1902 ++ return -EINVAL;
1903 ++
1904 + pdata->wolopts = wolinfo->wolopts & SUPPORTED_WAKE;
1905 +
1906 + ret = device_set_wakeup_enable(&dev->udev->dev, pdata->wolopts);
1907 +diff --git a/drivers/net/usb/sr9800.c b/drivers/net/usb/sr9800.c
1908 +index 9277a0f228df..35f39f23d881 100644
1909 +--- a/drivers/net/usb/sr9800.c
1910 ++++ b/drivers/net/usb/sr9800.c
1911 +@@ -421,6 +421,9 @@ sr_set_wol(struct net_device *net, struct ethtool_wolinfo *wolinfo)
1912 + struct usbnet *dev = netdev_priv(net);
1913 + u8 opt = 0;
1914 +
1915 ++ if (wolinfo->wolopts & ~(WAKE_PHY | WAKE_MAGIC))
1916 ++ return -EINVAL;
1917 ++
1918 + if (wolinfo->wolopts & WAKE_PHY)
1919 + opt |= SR_MONITOR_LINK;
1920 + if (wolinfo->wolopts & WAKE_MAGIC)
1921 +diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
1922 +index 2b6ec927809e..500e2d8f10bc 100644
1923 +--- a/drivers/net/virtio_net.c
1924 ++++ b/drivers/net/virtio_net.c
1925 +@@ -2162,8 +2162,9 @@ static void virtnet_freeze_down(struct virtio_device *vdev)
1926 + /* Make sure no work handler is accessing the device */
1927 + flush_work(&vi->config_work);
1928 +
1929 ++ netif_tx_lock_bh(vi->dev);
1930 + netif_device_detach(vi->dev);
1931 +- netif_tx_disable(vi->dev);
1932 ++ netif_tx_unlock_bh(vi->dev);
1933 + cancel_delayed_work_sync(&vi->refill);
1934 +
1935 + if (netif_running(vi->dev)) {
1936 +@@ -2199,7 +2200,9 @@ static int virtnet_restore_up(struct virtio_device *vdev)
1937 + }
1938 + }
1939 +
1940 ++ netif_tx_lock_bh(vi->dev);
1941 + netif_device_attach(vi->dev);
1942 ++ netif_tx_unlock_bh(vi->dev);
1943 + return err;
1944 + }
1945 +
1946 +diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
1947 +index 80e2c8595c7c..58dd217811c8 100644
1948 +--- a/drivers/net/wireless/mac80211_hwsim.c
1949 ++++ b/drivers/net/wireless/mac80211_hwsim.c
1950 +@@ -519,7 +519,6 @@ struct mac80211_hwsim_data {
1951 + int channels, idx;
1952 + bool use_chanctx;
1953 + bool destroy_on_close;
1954 +- struct work_struct destroy_work;
1955 + u32 portid;
1956 + char alpha2[2];
1957 + const struct ieee80211_regdomain *regd;
1958 +@@ -2812,8 +2811,7 @@ static int mac80211_hwsim_new_radio(struct genl_info *info,
1959 + hwsim_radios_generation++;
1960 + spin_unlock_bh(&hwsim_radio_lock);
1961 +
1962 +- if (idx > 0)
1963 +- hwsim_mcast_new_radio(idx, info, param);
1964 ++ hwsim_mcast_new_radio(idx, info, param);
1965 +
1966 + return idx;
1967 +
1968 +@@ -3442,30 +3440,27 @@ static struct genl_family hwsim_genl_family __ro_after_init = {
1969 + .n_mcgrps = ARRAY_SIZE(hwsim_mcgrps),
1970 + };
1971 +
1972 +-static void destroy_radio(struct work_struct *work)
1973 +-{
1974 +- struct mac80211_hwsim_data *data =
1975 +- container_of(work, struct mac80211_hwsim_data, destroy_work);
1976 +-
1977 +- hwsim_radios_generation++;
1978 +- mac80211_hwsim_del_radio(data, wiphy_name(data->hw->wiphy), NULL);
1979 +-}
1980 +-
1981 + static void remove_user_radios(u32 portid)
1982 + {
1983 + struct mac80211_hwsim_data *entry, *tmp;
1984 ++ LIST_HEAD(list);
1985 +
1986 + spin_lock_bh(&hwsim_radio_lock);
1987 + list_for_each_entry_safe(entry, tmp, &hwsim_radios, list) {
1988 + if (entry->destroy_on_close && entry->portid == portid) {
1989 +- list_del(&entry->list);
1990 ++ list_move(&entry->list, &list);
1991 + rhashtable_remove_fast(&hwsim_radios_rht, &entry->rht,
1992 + hwsim_rht_params);
1993 +- INIT_WORK(&entry->destroy_work, destroy_radio);
1994 +- queue_work(hwsim_wq, &entry->destroy_work);
1995 ++ hwsim_radios_generation++;
1996 + }
1997 + }
1998 + spin_unlock_bh(&hwsim_radio_lock);
1999 ++
2000 ++ list_for_each_entry_safe(entry, tmp, &list, list) {
2001 ++ list_del(&entry->list);
2002 ++ mac80211_hwsim_del_radio(entry, wiphy_name(entry->hw->wiphy),
2003 ++ NULL);
2004 ++ }
2005 + }
2006 +
2007 + static int mac80211_hwsim_netlink_notify(struct notifier_block *nb,
2008 +@@ -3523,6 +3518,7 @@ static __net_init int hwsim_init_net(struct net *net)
2009 + static void __net_exit hwsim_exit_net(struct net *net)
2010 + {
2011 + struct mac80211_hwsim_data *data, *tmp;
2012 ++ LIST_HEAD(list);
2013 +
2014 + spin_lock_bh(&hwsim_radio_lock);
2015 + list_for_each_entry_safe(data, tmp, &hwsim_radios, list) {
2016 +@@ -3533,17 +3529,19 @@ static void __net_exit hwsim_exit_net(struct net *net)
2017 + if (data->netgroup == hwsim_net_get_netgroup(&init_net))
2018 + continue;
2019 +
2020 +- list_del(&data->list);
2021 ++ list_move(&data->list, &list);
2022 + rhashtable_remove_fast(&hwsim_radios_rht, &data->rht,
2023 + hwsim_rht_params);
2024 + hwsim_radios_generation++;
2025 +- spin_unlock_bh(&hwsim_radio_lock);
2026 ++ }
2027 ++ spin_unlock_bh(&hwsim_radio_lock);
2028 ++
2029 ++ list_for_each_entry_safe(data, tmp, &list, list) {
2030 ++ list_del(&data->list);
2031 + mac80211_hwsim_del_radio(data,
2032 + wiphy_name(data->hw->wiphy),
2033 + NULL);
2034 +- spin_lock_bh(&hwsim_radio_lock);
2035 + }
2036 +- spin_unlock_bh(&hwsim_radio_lock);
2037 +
2038 + ida_simple_remove(&hwsim_netgroup_ida, hwsim_net_get_netgroup(net));
2039 + }
2040 +diff --git a/drivers/net/wireless/marvell/libertas/if_sdio.c b/drivers/net/wireless/marvell/libertas/if_sdio.c
2041 +index 43743c26c071..39bf85d0ade0 100644
2042 +--- a/drivers/net/wireless/marvell/libertas/if_sdio.c
2043 ++++ b/drivers/net/wireless/marvell/libertas/if_sdio.c
2044 +@@ -1317,6 +1317,10 @@ static int if_sdio_suspend(struct device *dev)
2045 + if (priv->wol_criteria == EHS_REMOVE_WAKEUP) {
2046 + dev_info(dev, "Suspend without wake params -- powering down card\n");
2047 + if (priv->fw_ready) {
2048 ++ ret = lbs_suspend(priv);
2049 ++ if (ret)
2050 ++ return ret;
2051 ++
2052 + priv->power_up_on_resume = true;
2053 + if_sdio_power_off(card);
2054 + }
2055 +diff --git a/drivers/scsi/qedi/qedi_main.c b/drivers/scsi/qedi/qedi_main.c
2056 +index 3e18a68c2b03..054e66d93ed6 100644
2057 +--- a/drivers/scsi/qedi/qedi_main.c
2058 ++++ b/drivers/scsi/qedi/qedi_main.c
2059 +@@ -2472,6 +2472,7 @@ static int __qedi_probe(struct pci_dev *pdev, int mode)
2060 + /* start qedi context */
2061 + spin_lock_init(&qedi->hba_lock);
2062 + spin_lock_init(&qedi->task_idx_lock);
2063 ++ mutex_init(&qedi->stats_lock);
2064 + }
2065 + qedi_ops->ll2->register_cb_ops(qedi->cdev, &qedi_ll2_cb_ops, qedi);
2066 + qedi_ops->ll2->start(qedi->cdev, &params);
2067 +diff --git a/drivers/soc/fsl/qbman/qman.c b/drivers/soc/fsl/qbman/qman.c
2068 +index ecb22749df0b..8cc015183043 100644
2069 +--- a/drivers/soc/fsl/qbman/qman.c
2070 ++++ b/drivers/soc/fsl/qbman/qman.c
2071 +@@ -2729,6 +2729,9 @@ static int qman_alloc_range(struct gen_pool *p, u32 *result, u32 cnt)
2072 + {
2073 + unsigned long addr;
2074 +
2075 ++ if (!p)
2076 ++ return -ENODEV;
2077 ++
2078 + addr = gen_pool_alloc(p, cnt);
2079 + if (!addr)
2080 + return -ENOMEM;
2081 +diff --git a/drivers/soc/fsl/qe/ucc.c b/drivers/soc/fsl/qe/ucc.c
2082 +index c646d8713861..681f7d4b7724 100644
2083 +--- a/drivers/soc/fsl/qe/ucc.c
2084 ++++ b/drivers/soc/fsl/qe/ucc.c
2085 +@@ -626,7 +626,7 @@ static u32 ucc_get_tdm_sync_shift(enum comm_dir mode, u32 tdm_num)
2086 + {
2087 + u32 shift;
2088 +
2089 +- shift = (mode == COMM_DIR_RX) ? RX_SYNC_SHIFT_BASE : RX_SYNC_SHIFT_BASE;
2090 ++ shift = (mode == COMM_DIR_RX) ? RX_SYNC_SHIFT_BASE : TX_SYNC_SHIFT_BASE;
2091 + shift -= tdm_num * 2;
2092 +
2093 + return shift;
2094 +diff --git a/drivers/thunderbolt/icm.c b/drivers/thunderbolt/icm.c
2095 +index 500911f16498..5bad9fdec5f8 100644
2096 +--- a/drivers/thunderbolt/icm.c
2097 ++++ b/drivers/thunderbolt/icm.c
2098 +@@ -653,14 +653,6 @@ icm_fr_xdomain_connected(struct tb *tb, const struct icm_pkg_header *hdr)
2099 + bool approved;
2100 + u64 route;
2101 +
2102 +- /*
2103 +- * After NVM upgrade adding root switch device fails because we
2104 +- * initiated reset. During that time ICM might still send
2105 +- * XDomain connected message which we ignore here.
2106 +- */
2107 +- if (!tb->root_switch)
2108 +- return;
2109 +-
2110 + link = pkg->link_info & ICM_LINK_INFO_LINK_MASK;
2111 + depth = (pkg->link_info & ICM_LINK_INFO_DEPTH_MASK) >>
2112 + ICM_LINK_INFO_DEPTH_SHIFT;
2113 +@@ -950,14 +942,6 @@ icm_tr_device_connected(struct tb *tb, const struct icm_pkg_header *hdr)
2114 + if (pkg->hdr.packet_id)
2115 + return;
2116 +
2117 +- /*
2118 +- * After NVM upgrade adding root switch device fails because we
2119 +- * initiated reset. During that time ICM might still send device
2120 +- * connected message which we ignore here.
2121 +- */
2122 +- if (!tb->root_switch)
2123 +- return;
2124 +-
2125 + route = get_route(pkg->route_hi, pkg->route_lo);
2126 + authorized = pkg->link_info & ICM_LINK_INFO_APPROVED;
2127 + security_level = (pkg->hdr.flags & ICM_FLAGS_SLEVEL_MASK) >>
2128 +@@ -1317,19 +1301,26 @@ static void icm_handle_notification(struct work_struct *work)
2129 +
2130 + mutex_lock(&tb->lock);
2131 +
2132 +- switch (n->pkg->code) {
2133 +- case ICM_EVENT_DEVICE_CONNECTED:
2134 +- icm->device_connected(tb, n->pkg);
2135 +- break;
2136 +- case ICM_EVENT_DEVICE_DISCONNECTED:
2137 +- icm->device_disconnected(tb, n->pkg);
2138 +- break;
2139 +- case ICM_EVENT_XDOMAIN_CONNECTED:
2140 +- icm->xdomain_connected(tb, n->pkg);
2141 +- break;
2142 +- case ICM_EVENT_XDOMAIN_DISCONNECTED:
2143 +- icm->xdomain_disconnected(tb, n->pkg);
2144 +- break;
2145 ++ /*
2146 ++ * When the domain is stopped we flush its workqueue but before
2147 ++ * that the root switch is removed. In that case we should treat
2148 ++ * the queued events as being canceled.
2149 ++ */
2150 ++ if (tb->root_switch) {
2151 ++ switch (n->pkg->code) {
2152 ++ case ICM_EVENT_DEVICE_CONNECTED:
2153 ++ icm->device_connected(tb, n->pkg);
2154 ++ break;
2155 ++ case ICM_EVENT_DEVICE_DISCONNECTED:
2156 ++ icm->device_disconnected(tb, n->pkg);
2157 ++ break;
2158 ++ case ICM_EVENT_XDOMAIN_CONNECTED:
2159 ++ icm->xdomain_connected(tb, n->pkg);
2160 ++ break;
2161 ++ case ICM_EVENT_XDOMAIN_DISCONNECTED:
2162 ++ icm->xdomain_disconnected(tb, n->pkg);
2163 ++ break;
2164 ++ }
2165 + }
2166 +
2167 + mutex_unlock(&tb->lock);
2168 +diff --git a/drivers/thunderbolt/nhi.c b/drivers/thunderbolt/nhi.c
2169 +index f5a33e88e676..2d042150e41c 100644
2170 +--- a/drivers/thunderbolt/nhi.c
2171 ++++ b/drivers/thunderbolt/nhi.c
2172 +@@ -1147,5 +1147,5 @@ static void __exit nhi_unload(void)
2173 + tb_domain_exit();
2174 + }
2175 +
2176 +-fs_initcall(nhi_init);
2177 ++rootfs_initcall(nhi_init);
2178 + module_exit(nhi_unload);
2179 +diff --git a/drivers/tty/serial/8250/8250_dw.c b/drivers/tty/serial/8250/8250_dw.c
2180 +index af842000188c..a25f6ea5c784 100644
2181 +--- a/drivers/tty/serial/8250/8250_dw.c
2182 ++++ b/drivers/tty/serial/8250/8250_dw.c
2183 +@@ -576,10 +576,6 @@ static int dw8250_probe(struct platform_device *pdev)
2184 + if (!data->skip_autocfg)
2185 + dw8250_setup_port(p);
2186 +
2187 +-#ifdef CONFIG_PM
2188 +- uart.capabilities |= UART_CAP_RPM;
2189 +-#endif
2190 +-
2191 + /* If we have a valid fifosize, try hooking up DMA */
2192 + if (p->fifosize) {
2193 + data->dma.rxconf.src_maxburst = p->fifosize / 4;
2194 +diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
2195 +index 560ed8711706..c4424cbd9943 100644
2196 +--- a/drivers/vhost/vhost.c
2197 ++++ b/drivers/vhost/vhost.c
2198 +@@ -30,6 +30,7 @@
2199 + #include <linux/sched/mm.h>
2200 + #include <linux/sched/signal.h>
2201 + #include <linux/interval_tree_generic.h>
2202 ++#include <linux/nospec.h>
2203 +
2204 + #include "vhost.h"
2205 +
2206 +@@ -1362,6 +1363,7 @@ long vhost_vring_ioctl(struct vhost_dev *d, unsigned int ioctl, void __user *arg
2207 + if (idx >= d->nvqs)
2208 + return -ENOBUFS;
2209 +
2210 ++ idx = array_index_nospec(idx, d->nvqs);
2211 + vq = d->vqs[idx];
2212 +
2213 + mutex_lock(&vq->mutex);
2214 +diff --git a/drivers/video/fbdev/pxa168fb.c b/drivers/video/fbdev/pxa168fb.c
2215 +index def3a501acd6..d059d04c63ac 100644
2216 +--- a/drivers/video/fbdev/pxa168fb.c
2217 ++++ b/drivers/video/fbdev/pxa168fb.c
2218 +@@ -712,7 +712,7 @@ static int pxa168fb_probe(struct platform_device *pdev)
2219 + /*
2220 + * enable controller clock
2221 + */
2222 +- clk_enable(fbi->clk);
2223 ++ clk_prepare_enable(fbi->clk);
2224 +
2225 + pxa168fb_set_par(info);
2226 +
2227 +@@ -767,7 +767,7 @@ static int pxa168fb_probe(struct platform_device *pdev)
2228 + failed_free_cmap:
2229 + fb_dealloc_cmap(&info->cmap);
2230 + failed_free_clk:
2231 +- clk_disable(fbi->clk);
2232 ++ clk_disable_unprepare(fbi->clk);
2233 + failed_free_fbmem:
2234 + dma_free_coherent(fbi->dev, info->fix.smem_len,
2235 + info->screen_base, fbi->fb_start_dma);
2236 +@@ -807,7 +807,7 @@ static int pxa168fb_remove(struct platform_device *pdev)
2237 + dma_free_wc(fbi->dev, PAGE_ALIGN(info->fix.smem_len),
2238 + info->screen_base, info->fix.smem_start);
2239 +
2240 +- clk_disable(fbi->clk);
2241 ++ clk_disable_unprepare(fbi->clk);
2242 +
2243 + framebuffer_release(info);
2244 +
2245 +diff --git a/fs/afs/cell.c b/fs/afs/cell.c
2246 +index f3d0bef16d78..6127f0fcd62c 100644
2247 +--- a/fs/afs/cell.c
2248 ++++ b/fs/afs/cell.c
2249 +@@ -514,6 +514,8 @@ static int afs_alloc_anon_key(struct afs_cell *cell)
2250 + */
2251 + static int afs_activate_cell(struct afs_net *net, struct afs_cell *cell)
2252 + {
2253 ++ struct hlist_node **p;
2254 ++ struct afs_cell *pcell;
2255 + int ret;
2256 +
2257 + if (!cell->anonymous_key) {
2258 +@@ -534,7 +536,18 @@ static int afs_activate_cell(struct afs_net *net, struct afs_cell *cell)
2259 + return ret;
2260 +
2261 + mutex_lock(&net->proc_cells_lock);
2262 +- list_add_tail(&cell->proc_link, &net->proc_cells);
2263 ++ for (p = &net->proc_cells.first; *p; p = &(*p)->next) {
2264 ++ pcell = hlist_entry(*p, struct afs_cell, proc_link);
2265 ++ if (strcmp(cell->name, pcell->name) < 0)
2266 ++ break;
2267 ++ }
2268 ++
2269 ++ cell->proc_link.pprev = p;
2270 ++ cell->proc_link.next = *p;
2271 ++ rcu_assign_pointer(*p, &cell->proc_link.next);
2272 ++ if (cell->proc_link.next)
2273 ++ cell->proc_link.next->pprev = &cell->proc_link.next;
2274 ++
2275 + afs_dynroot_mkdir(net, cell);
2276 + mutex_unlock(&net->proc_cells_lock);
2277 + return 0;
2278 +@@ -550,7 +563,7 @@ static void afs_deactivate_cell(struct afs_net *net, struct afs_cell *cell)
2279 + afs_proc_cell_remove(cell);
2280 +
2281 + mutex_lock(&net->proc_cells_lock);
2282 +- list_del_init(&cell->proc_link);
2283 ++ hlist_del_rcu(&cell->proc_link);
2284 + afs_dynroot_rmdir(net, cell);
2285 + mutex_unlock(&net->proc_cells_lock);
2286 +
2287 +diff --git a/fs/afs/dynroot.c b/fs/afs/dynroot.c
2288 +index 174e843f0633..7de7223843cc 100644
2289 +--- a/fs/afs/dynroot.c
2290 ++++ b/fs/afs/dynroot.c
2291 +@@ -286,7 +286,7 @@ int afs_dynroot_populate(struct super_block *sb)
2292 + return -ERESTARTSYS;
2293 +
2294 + net->dynroot_sb = sb;
2295 +- list_for_each_entry(cell, &net->proc_cells, proc_link) {
2296 ++ hlist_for_each_entry(cell, &net->proc_cells, proc_link) {
2297 + ret = afs_dynroot_mkdir(net, cell);
2298 + if (ret < 0)
2299 + goto error;
2300 +diff --git a/fs/afs/internal.h b/fs/afs/internal.h
2301 +index 9778df135717..270d1caa27c6 100644
2302 +--- a/fs/afs/internal.h
2303 ++++ b/fs/afs/internal.h
2304 +@@ -241,7 +241,7 @@ struct afs_net {
2305 + seqlock_t cells_lock;
2306 +
2307 + struct mutex proc_cells_lock;
2308 +- struct list_head proc_cells;
2309 ++ struct hlist_head proc_cells;
2310 +
2311 + /* Known servers. Theoretically each fileserver can only be in one
2312 + * cell, but in practice, people create aliases and subsets and there's
2313 +@@ -319,7 +319,7 @@ struct afs_cell {
2314 + struct afs_net *net;
2315 + struct key *anonymous_key; /* anonymous user key for this cell */
2316 + struct work_struct manager; /* Manager for init/deinit/dns */
2317 +- struct list_head proc_link; /* /proc cell list link */
2318 ++ struct hlist_node proc_link; /* /proc cell list link */
2319 + #ifdef CONFIG_AFS_FSCACHE
2320 + struct fscache_cookie *cache; /* caching cookie */
2321 + #endif
2322 +diff --git a/fs/afs/main.c b/fs/afs/main.c
2323 +index e84fe822a960..107427688edd 100644
2324 +--- a/fs/afs/main.c
2325 ++++ b/fs/afs/main.c
2326 +@@ -87,7 +87,7 @@ static int __net_init afs_net_init(struct net *net_ns)
2327 + timer_setup(&net->cells_timer, afs_cells_timer, 0);
2328 +
2329 + mutex_init(&net->proc_cells_lock);
2330 +- INIT_LIST_HEAD(&net->proc_cells);
2331 ++ INIT_HLIST_HEAD(&net->proc_cells);
2332 +
2333 + seqlock_init(&net->fs_lock);
2334 + net->fs_servers = RB_ROOT;
2335 +diff --git a/fs/afs/proc.c b/fs/afs/proc.c
2336 +index 476dcbb79713..9101f62707af 100644
2337 +--- a/fs/afs/proc.c
2338 ++++ b/fs/afs/proc.c
2339 +@@ -33,9 +33,8 @@ static inline struct afs_net *afs_seq2net_single(struct seq_file *m)
2340 + static int afs_proc_cells_show(struct seq_file *m, void *v)
2341 + {
2342 + struct afs_cell *cell = list_entry(v, struct afs_cell, proc_link);
2343 +- struct afs_net *net = afs_seq2net(m);
2344 +
2345 +- if (v == &net->proc_cells) {
2346 ++ if (v == SEQ_START_TOKEN) {
2347 + /* display header on line 1 */
2348 + seq_puts(m, "USE NAME\n");
2349 + return 0;
2350 +@@ -50,12 +49,12 @@ static void *afs_proc_cells_start(struct seq_file *m, loff_t *_pos)
2351 + __acquires(rcu)
2352 + {
2353 + rcu_read_lock();
2354 +- return seq_list_start_head(&afs_seq2net(m)->proc_cells, *_pos);
2355 ++ return seq_hlist_start_head_rcu(&afs_seq2net(m)->proc_cells, *_pos);
2356 + }
2357 +
2358 + static void *afs_proc_cells_next(struct seq_file *m, void *v, loff_t *pos)
2359 + {
2360 +- return seq_list_next(v, &afs_seq2net(m)->proc_cells, pos);
2361 ++ return seq_hlist_next_rcu(v, &afs_seq2net(m)->proc_cells, pos);
2362 + }
2363 +
2364 + static void afs_proc_cells_stop(struct seq_file *m, void *v)
2365 +diff --git a/fs/fat/fatent.c b/fs/fat/fatent.c
2366 +index 3aef8630a4b9..95d2c716e0da 100644
2367 +--- a/fs/fat/fatent.c
2368 ++++ b/fs/fat/fatent.c
2369 +@@ -681,6 +681,7 @@ int fat_count_free_clusters(struct super_block *sb)
2370 + if (ops->ent_get(&fatent) == FAT_ENT_FREE)
2371 + free++;
2372 + } while (fat_ent_next(sbi, &fatent));
2373 ++ cond_resched();
2374 + }
2375 + sbi->free_clusters = free;
2376 + sbi->free_clus_valid = 1;
2377 +diff --git a/fs/ocfs2/refcounttree.c b/fs/ocfs2/refcounttree.c
2378 +index 7869622af22a..7a5ee145c733 100644
2379 +--- a/fs/ocfs2/refcounttree.c
2380 ++++ b/fs/ocfs2/refcounttree.c
2381 +@@ -2946,6 +2946,7 @@ int ocfs2_duplicate_clusters_by_page(handle_t *handle,
2382 + if (map_end & (PAGE_SIZE - 1))
2383 + to = map_end & (PAGE_SIZE - 1);
2384 +
2385 ++retry:
2386 + page = find_or_create_page(mapping, page_index, GFP_NOFS);
2387 + if (!page) {
2388 + ret = -ENOMEM;
2389 +@@ -2954,11 +2955,18 @@ int ocfs2_duplicate_clusters_by_page(handle_t *handle,
2390 + }
2391 +
2392 + /*
2393 +- * In case PAGE_SIZE <= CLUSTER_SIZE, This page
2394 +- * can't be dirtied before we CoW it out.
2395 ++ * In case PAGE_SIZE <= CLUSTER_SIZE, we do not expect a dirty
2396 ++ * page, so write it back.
2397 + */
2398 +- if (PAGE_SIZE <= OCFS2_SB(sb)->s_clustersize)
2399 +- BUG_ON(PageDirty(page));
2400 ++ if (PAGE_SIZE <= OCFS2_SB(sb)->s_clustersize) {
2401 ++ if (PageDirty(page)) {
2402 ++ /*
2403 ++ * write_on_page will unlock the page on return
2404 ++ */
2405 ++ ret = write_one_page(page);
2406 ++ goto retry;
2407 ++ }
2408 ++ }
2409 +
2410 + if (!PageUptodate(page)) {
2411 + ret = block_read_full_page(page, ocfs2_get_block);
2412 +diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
2413 +index e373e2e10f6a..83b930988e21 100644
2414 +--- a/include/asm-generic/vmlinux.lds.h
2415 ++++ b/include/asm-generic/vmlinux.lds.h
2416 +@@ -70,7 +70,7 @@
2417 + */
2418 + #ifdef CONFIG_LD_DEAD_CODE_DATA_ELIMINATION
2419 + #define TEXT_MAIN .text .text.[0-9a-zA-Z_]*
2420 +-#define DATA_MAIN .data .data.[0-9a-zA-Z_]*
2421 ++#define DATA_MAIN .data .data.[0-9a-zA-Z_]* .data..LPBX*
2422 + #define SDATA_MAIN .sdata .sdata.[0-9a-zA-Z_]*
2423 + #define RODATA_MAIN .rodata .rodata.[0-9a-zA-Z_]*
2424 + #define BSS_MAIN .bss .bss.[0-9a-zA-Z_]*
2425 +@@ -617,8 +617,8 @@
2426 +
2427 + #define EXIT_DATA \
2428 + *(.exit.data .exit.data.*) \
2429 +- *(.fini_array) \
2430 +- *(.dtors) \
2431 ++ *(.fini_array .fini_array.*) \
2432 ++ *(.dtors .dtors.*) \
2433 + MEM_DISCARD(exit.data*) \
2434 + MEM_DISCARD(exit.rodata*)
2435 +
2436 +diff --git a/include/linux/compiler_types.h b/include/linux/compiler_types.h
2437 +index a8ba6b04152c..55e4be8b016b 100644
2438 +--- a/include/linux/compiler_types.h
2439 ++++ b/include/linux/compiler_types.h
2440 +@@ -78,6 +78,18 @@ extern void __chk_io_ptr(const volatile void __iomem *);
2441 + #include <linux/compiler-clang.h>
2442 + #endif
2443 +
2444 ++/*
2445 ++ * Some architectures need to provide custom definitions of macros provided
2446 ++ * by linux/compiler-*.h, and can do so using asm/compiler.h. We include that
2447 ++ * conditionally rather than using an asm-generic wrapper in order to avoid
2448 ++ * build failures if any C compilation, which will include this file via an
2449 ++ * -include argument in c_flags, occurs prior to the asm-generic wrappers being
2450 ++ * generated.
2451 ++ */
2452 ++#ifdef CONFIG_HAVE_ARCH_COMPILER_H
2453 ++#include <asm/compiler.h>
2454 ++#endif
2455 ++
2456 + /*
2457 + * Generic compiler-dependent macros required for kernel
2458 + * build go below this comment. Actual compiler/compiler version
2459 +diff --git a/include/linux/gpio/driver.h b/include/linux/gpio/driver.h
2460 +index 5382b5183b7e..82a953ec5ef0 100644
2461 +--- a/include/linux/gpio/driver.h
2462 ++++ b/include/linux/gpio/driver.h
2463 +@@ -94,6 +94,13 @@ struct gpio_irq_chip {
2464 + */
2465 + unsigned int num_parents;
2466 +
2467 ++ /**
2468 ++ * @parent_irq:
2469 ++ *
2470 ++ * For use by gpiochip_set_cascaded_irqchip()
2471 ++ */
2472 ++ unsigned int parent_irq;
2473 ++
2474 + /**
2475 + * @parents:
2476 + *
2477 +diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
2478 +index 64f450593b54..b49bfc8e68b0 100644
2479 +--- a/include/linux/mlx5/driver.h
2480 ++++ b/include/linux/mlx5/driver.h
2481 +@@ -1022,6 +1022,14 @@ static inline void *mlx5_frag_buf_get_wqe(struct mlx5_frag_buf_ctrl *fbc,
2482 + ((fbc->frag_sz_m1 & ix) << fbc->log_stride);
2483 + }
2484 +
2485 ++static inline u32
2486 ++mlx5_frag_buf_get_idx_last_contig_stride(struct mlx5_frag_buf_ctrl *fbc, u32 ix)
2487 ++{
2488 ++ u32 last_frag_stride_idx = (ix + fbc->strides_offset) | fbc->frag_sz_m1;
2489 ++
2490 ++ return min_t(u32, last_frag_stride_idx - fbc->strides_offset, fbc->sz_m1);
2491 ++}
2492 ++
2493 + int mlx5_cmd_init(struct mlx5_core_dev *dev);
2494 + void mlx5_cmd_cleanup(struct mlx5_core_dev *dev);
2495 + void mlx5_cmd_use_events(struct mlx5_core_dev *dev);
2496 +diff --git a/include/linux/netfilter.h b/include/linux/netfilter.h
2497 +index dd2052f0efb7..11b7b8ab0696 100644
2498 +--- a/include/linux/netfilter.h
2499 ++++ b/include/linux/netfilter.h
2500 +@@ -215,6 +215,8 @@ static inline int nf_hook(u_int8_t pf, unsigned int hook, struct net *net,
2501 + break;
2502 + case NFPROTO_ARP:
2503 + #ifdef CONFIG_NETFILTER_FAMILY_ARP
2504 ++ if (WARN_ON_ONCE(hook >= ARRAY_SIZE(net->nf.hooks_arp)))
2505 ++ break;
2506 + hook_head = rcu_dereference(net->nf.hooks_arp[hook]);
2507 + #endif
2508 + break;
2509 +diff --git a/include/net/ip6_fib.h b/include/net/ip6_fib.h
2510 +index 3d4930528db0..2d31e22babd8 100644
2511 +--- a/include/net/ip6_fib.h
2512 ++++ b/include/net/ip6_fib.h
2513 +@@ -159,6 +159,10 @@ struct fib6_info {
2514 + struct rt6_info * __percpu *rt6i_pcpu;
2515 + struct rt6_exception_bucket __rcu *rt6i_exception_bucket;
2516 +
2517 ++#ifdef CONFIG_IPV6_ROUTER_PREF
2518 ++ unsigned long last_probe;
2519 ++#endif
2520 ++
2521 + u32 fib6_metric;
2522 + u8 fib6_protocol;
2523 + u8 fib6_type;
2524 +diff --git a/include/net/sctp/sm.h b/include/net/sctp/sm.h
2525 +index 5ef1bad81ef5..9e3d32746430 100644
2526 +--- a/include/net/sctp/sm.h
2527 ++++ b/include/net/sctp/sm.h
2528 +@@ -347,7 +347,7 @@ static inline __u16 sctp_data_size(struct sctp_chunk *chunk)
2529 + __u16 size;
2530 +
2531 + size = ntohs(chunk->chunk_hdr->length);
2532 +- size -= sctp_datahdr_len(&chunk->asoc->stream);
2533 ++ size -= sctp_datachk_len(&chunk->asoc->stream);
2534 +
2535 + return size;
2536 + }
2537 +diff --git a/include/trace/events/rxrpc.h b/include/trace/events/rxrpc.h
2538 +index 4fff00e9da8a..0a774b64fc29 100644
2539 +--- a/include/trace/events/rxrpc.h
2540 ++++ b/include/trace/events/rxrpc.h
2541 +@@ -56,7 +56,6 @@ enum rxrpc_peer_trace {
2542 + rxrpc_peer_new,
2543 + rxrpc_peer_processing,
2544 + rxrpc_peer_put,
2545 +- rxrpc_peer_queued_error,
2546 + };
2547 +
2548 + enum rxrpc_conn_trace {
2549 +@@ -257,8 +256,7 @@ enum rxrpc_tx_fail_trace {
2550 + EM(rxrpc_peer_got, "GOT") \
2551 + EM(rxrpc_peer_new, "NEW") \
2552 + EM(rxrpc_peer_processing, "PRO") \
2553 +- EM(rxrpc_peer_put, "PUT") \
2554 +- E_(rxrpc_peer_queued_error, "QER")
2555 ++ E_(rxrpc_peer_put, "PUT")
2556 +
2557 + #define rxrpc_conn_traces \
2558 + EM(rxrpc_conn_got, "GOT") \
2559 +diff --git a/kernel/events/core.c b/kernel/events/core.c
2560 +index ae22d93701db..fc072b7f839d 100644
2561 +--- a/kernel/events/core.c
2562 ++++ b/kernel/events/core.c
2563 +@@ -8319,6 +8319,8 @@ void perf_tp_event(u16 event_type, u64 count, void *record, int entry_size,
2564 + goto unlock;
2565 +
2566 + list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
2567 ++ if (event->cpu != smp_processor_id())
2568 ++ continue;
2569 + if (event->attr.type != PERF_TYPE_TRACEPOINT)
2570 + continue;
2571 + if (event->attr.config != entry->type)
2572 +@@ -9436,9 +9438,7 @@ static void free_pmu_context(struct pmu *pmu)
2573 + if (pmu->task_ctx_nr > perf_invalid_context)
2574 + return;
2575 +
2576 +- mutex_lock(&pmus_lock);
2577 + free_percpu(pmu->pmu_cpu_context);
2578 +- mutex_unlock(&pmus_lock);
2579 + }
2580 +
2581 + /*
2582 +@@ -9694,12 +9694,8 @@ EXPORT_SYMBOL_GPL(perf_pmu_register);
2583 +
2584 + void perf_pmu_unregister(struct pmu *pmu)
2585 + {
2586 +- int remove_device;
2587 +-
2588 + mutex_lock(&pmus_lock);
2589 +- remove_device = pmu_bus_running;
2590 + list_del_rcu(&pmu->entry);
2591 +- mutex_unlock(&pmus_lock);
2592 +
2593 + /*
2594 + * We dereference the pmu list under both SRCU and regular RCU, so
2595 +@@ -9711,13 +9707,14 @@ void perf_pmu_unregister(struct pmu *pmu)
2596 + free_percpu(pmu->pmu_disable_count);
2597 + if (pmu->type >= PERF_TYPE_MAX)
2598 + idr_remove(&pmu_idr, pmu->type);
2599 +- if (remove_device) {
2600 ++ if (pmu_bus_running) {
2601 + if (pmu->nr_addr_filters)
2602 + device_remove_file(pmu->dev, &dev_attr_nr_addr_filters);
2603 + device_del(pmu->dev);
2604 + put_device(pmu->dev);
2605 + }
2606 + free_pmu_context(pmu);
2607 ++ mutex_unlock(&pmus_lock);
2608 + }
2609 + EXPORT_SYMBOL_GPL(perf_pmu_unregister);
2610 +
2611 +diff --git a/kernel/locking/test-ww_mutex.c b/kernel/locking/test-ww_mutex.c
2612 +index 0e4cd64ad2c0..654977862b06 100644
2613 +--- a/kernel/locking/test-ww_mutex.c
2614 ++++ b/kernel/locking/test-ww_mutex.c
2615 +@@ -260,7 +260,7 @@ static void test_cycle_work(struct work_struct *work)
2616 + {
2617 + struct test_cycle *cycle = container_of(work, typeof(*cycle), work);
2618 + struct ww_acquire_ctx ctx;
2619 +- int err;
2620 ++ int err, erra = 0;
2621 +
2622 + ww_acquire_init(&ctx, &ww_class);
2623 + ww_mutex_lock(&cycle->a_mutex, &ctx);
2624 +@@ -270,17 +270,19 @@ static void test_cycle_work(struct work_struct *work)
2625 +
2626 + err = ww_mutex_lock(cycle->b_mutex, &ctx);
2627 + if (err == -EDEADLK) {
2628 ++ err = 0;
2629 + ww_mutex_unlock(&cycle->a_mutex);
2630 + ww_mutex_lock_slow(cycle->b_mutex, &ctx);
2631 +- err = ww_mutex_lock(&cycle->a_mutex, &ctx);
2632 ++ erra = ww_mutex_lock(&cycle->a_mutex, &ctx);
2633 + }
2634 +
2635 + if (!err)
2636 + ww_mutex_unlock(cycle->b_mutex);
2637 +- ww_mutex_unlock(&cycle->a_mutex);
2638 ++ if (!erra)
2639 ++ ww_mutex_unlock(&cycle->a_mutex);
2640 + ww_acquire_fini(&ctx);
2641 +
2642 +- cycle->result = err;
2643 ++ cycle->result = err ?: erra;
2644 + }
2645 +
2646 + static int __test_cycle(unsigned int nthreads)
2647 +diff --git a/mm/gup_benchmark.c b/mm/gup_benchmark.c
2648 +index 6a473709e9b6..7405c9d89d65 100644
2649 +--- a/mm/gup_benchmark.c
2650 ++++ b/mm/gup_benchmark.c
2651 +@@ -19,7 +19,8 @@ static int __gup_benchmark_ioctl(unsigned int cmd,
2652 + struct gup_benchmark *gup)
2653 + {
2654 + ktime_t start_time, end_time;
2655 +- unsigned long i, nr, nr_pages, addr, next;
2656 ++ unsigned long i, nr_pages, addr, next;
2657 ++ int nr;
2658 + struct page **pages;
2659 +
2660 + nr_pages = gup->size / PAGE_SIZE;
2661 +diff --git a/mm/migrate.c b/mm/migrate.c
2662 +index 2a55289ee9f1..f49eb9589d73 100644
2663 +--- a/mm/migrate.c
2664 ++++ b/mm/migrate.c
2665 +@@ -1415,7 +1415,7 @@ retry:
2666 + * we encounter them after the rest of the list
2667 + * is processed.
2668 + */
2669 +- if (PageTransHuge(page)) {
2670 ++ if (PageTransHuge(page) && !PageHuge(page)) {
2671 + lock_page(page);
2672 + rc = split_huge_page_to_list(page, from);
2673 + unlock_page(page);
2674 +diff --git a/mm/vmscan.c b/mm/vmscan.c
2675 +index fc0436407471..03822f86f288 100644
2676 +--- a/mm/vmscan.c
2677 ++++ b/mm/vmscan.c
2678 +@@ -386,17 +386,6 @@ static unsigned long do_shrink_slab(struct shrink_control *shrinkctl,
2679 + delta = freeable >> priority;
2680 + delta *= 4;
2681 + do_div(delta, shrinker->seeks);
2682 +-
2683 +- /*
2684 +- * Make sure we apply some minimal pressure on default priority
2685 +- * even on small cgroups. Stale objects are not only consuming memory
2686 +- * by themselves, but can also hold a reference to a dying cgroup,
2687 +- * preventing it from being reclaimed. A dying cgroup with all
2688 +- * corresponding structures like per-cpu stats and kmem caches
2689 +- * can be really big, so it may lead to a significant waste of memory.
2690 +- */
2691 +- delta = max_t(unsigned long long, delta, min(freeable, batch_size));
2692 +-
2693 + total_scan += delta;
2694 + if (total_scan < 0) {
2695 + pr_err("shrink_slab: %pF negative objects to delete nr=%ld\n",
2696 +diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c
2697 +index 8a80d48d89c4..1b9984f653dd 100644
2698 +--- a/net/bluetooth/mgmt.c
2699 ++++ b/net/bluetooth/mgmt.c
2700 +@@ -2298,9 +2298,8 @@ static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2701 + /* LE address type */
2702 + addr_type = le_addr_type(cp->addr.type);
2703 +
2704 +- hci_remove_irk(hdev, &cp->addr.bdaddr, addr_type);
2705 +-
2706 +- err = hci_remove_ltk(hdev, &cp->addr.bdaddr, addr_type);
2707 ++ /* Abort any ongoing SMP pairing. Removes ltk and irk if they exist. */
2708 ++ err = smp_cancel_and_remove_pairing(hdev, &cp->addr.bdaddr, addr_type);
2709 + if (err < 0) {
2710 + err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2711 + MGMT_STATUS_NOT_PAIRED, &rp,
2712 +@@ -2314,8 +2313,6 @@ static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2713 + goto done;
2714 + }
2715 +
2716 +- /* Abort any ongoing SMP pairing */
2717 +- smp_cancel_pairing(conn);
2718 +
2719 + /* Defer clearing up the connection parameters until closing to
2720 + * give a chance of keeping them if a repairing happens.
2721 +diff --git a/net/bluetooth/smp.c b/net/bluetooth/smp.c
2722 +index 3a7b0773536b..73f7211d0431 100644
2723 +--- a/net/bluetooth/smp.c
2724 ++++ b/net/bluetooth/smp.c
2725 +@@ -2422,30 +2422,51 @@ unlock:
2726 + return ret;
2727 + }
2728 +
2729 +-void smp_cancel_pairing(struct hci_conn *hcon)
2730 ++int smp_cancel_and_remove_pairing(struct hci_dev *hdev, bdaddr_t *bdaddr,
2731 ++ u8 addr_type)
2732 + {
2733 +- struct l2cap_conn *conn = hcon->l2cap_data;
2734 ++ struct hci_conn *hcon;
2735 ++ struct l2cap_conn *conn;
2736 + struct l2cap_chan *chan;
2737 + struct smp_chan *smp;
2738 ++ int err;
2739 ++
2740 ++ err = hci_remove_ltk(hdev, bdaddr, addr_type);
2741 ++ hci_remove_irk(hdev, bdaddr, addr_type);
2742 ++
2743 ++ hcon = hci_conn_hash_lookup_le(hdev, bdaddr, addr_type);
2744 ++ if (!hcon)
2745 ++ goto done;
2746 +
2747 ++ conn = hcon->l2cap_data;
2748 + if (!conn)
2749 +- return;
2750 ++ goto done;
2751 +
2752 + chan = conn->smp;
2753 + if (!chan)
2754 +- return;
2755 ++ goto done;
2756 +
2757 + l2cap_chan_lock(chan);
2758 +
2759 + smp = chan->data;
2760 + if (smp) {
2761 ++ /* Set keys to NULL to make sure smp_failure() does not try to
2762 ++ * remove and free already invalidated rcu list entries. */
2763 ++ smp->ltk = NULL;
2764 ++ smp->slave_ltk = NULL;
2765 ++ smp->remote_irk = NULL;
2766 ++
2767 + if (test_bit(SMP_FLAG_COMPLETE, &smp->flags))
2768 + smp_failure(conn, 0);
2769 + else
2770 + smp_failure(conn, SMP_UNSPECIFIED);
2771 ++ err = 0;
2772 + }
2773 +
2774 + l2cap_chan_unlock(chan);
2775 ++
2776 ++done:
2777 ++ return err;
2778 + }
2779 +
2780 + static int smp_cmd_encrypt_info(struct l2cap_conn *conn, struct sk_buff *skb)
2781 +diff --git a/net/bluetooth/smp.h b/net/bluetooth/smp.h
2782 +index 0ff6247eaa6c..121edadd5f8d 100644
2783 +--- a/net/bluetooth/smp.h
2784 ++++ b/net/bluetooth/smp.h
2785 +@@ -181,7 +181,8 @@ enum smp_key_pref {
2786 + };
2787 +
2788 + /* SMP Commands */
2789 +-void smp_cancel_pairing(struct hci_conn *hcon);
2790 ++int smp_cancel_and_remove_pairing(struct hci_dev *hdev, bdaddr_t *bdaddr,
2791 ++ u8 addr_type);
2792 + bool smp_sufficient_security(struct hci_conn *hcon, u8 sec_level,
2793 + enum smp_key_pref key_pref);
2794 + int smp_conn_security(struct hci_conn *hcon, __u8 sec_level);
2795 +diff --git a/net/bpfilter/bpfilter_kern.c b/net/bpfilter/bpfilter_kern.c
2796 +index f0fc182d3db7..d5dd6b8b4248 100644
2797 +--- a/net/bpfilter/bpfilter_kern.c
2798 ++++ b/net/bpfilter/bpfilter_kern.c
2799 +@@ -23,9 +23,11 @@ static void shutdown_umh(struct umh_info *info)
2800 +
2801 + if (!info->pid)
2802 + return;
2803 +- tsk = pid_task(find_vpid(info->pid), PIDTYPE_PID);
2804 +- if (tsk)
2805 ++ tsk = get_pid_task(find_vpid(info->pid), PIDTYPE_PID);
2806 ++ if (tsk) {
2807 + force_sig(SIGKILL, tsk);
2808 ++ put_task_struct(tsk);
2809 ++ }
2810 + fput(info->pipe_to_umh);
2811 + fput(info->pipe_from_umh);
2812 + info->pid = 0;
2813 +diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
2814 +index 920665dd92db..6059a47f5e0c 100644
2815 +--- a/net/bridge/br_multicast.c
2816 ++++ b/net/bridge/br_multicast.c
2817 +@@ -1420,7 +1420,14 @@ static void br_multicast_query_received(struct net_bridge *br,
2818 + return;
2819 +
2820 + br_multicast_update_query_timer(br, query, max_delay);
2821 +- br_multicast_mark_router(br, port);
2822 ++
2823 ++ /* Based on RFC4541, section 2.1.1 IGMP Forwarding Rules,
2824 ++ * the arrival port for IGMP Queries where the source address
2825 ++ * is 0.0.0.0 should not be added to router port list.
2826 ++ */
2827 ++ if ((saddr->proto == htons(ETH_P_IP) && saddr->u.ip4) ||
2828 ++ saddr->proto == htons(ETH_P_IPV6))
2829 ++ br_multicast_mark_router(br, port);
2830 + }
2831 +
2832 + static int br_ip4_multicast_query(struct net_bridge *br,
2833 +diff --git a/net/bridge/br_netfilter_hooks.c b/net/bridge/br_netfilter_hooks.c
2834 +index 9b16eaf33819..58240cc185e7 100644
2835 +--- a/net/bridge/br_netfilter_hooks.c
2836 ++++ b/net/bridge/br_netfilter_hooks.c
2837 +@@ -834,7 +834,8 @@ static unsigned int ip_sabotage_in(void *priv,
2838 + struct sk_buff *skb,
2839 + const struct nf_hook_state *state)
2840 + {
2841 +- if (skb->nf_bridge && !skb->nf_bridge->in_prerouting) {
2842 ++ if (skb->nf_bridge && !skb->nf_bridge->in_prerouting &&
2843 ++ !netif_is_l3_master(skb->dev)) {
2844 + state->okfn(state->net, state->sk, skb);
2845 + return NF_STOLEN;
2846 + }
2847 +diff --git a/net/core/datagram.c b/net/core/datagram.c
2848 +index 9938952c5c78..16f0eb0970c4 100644
2849 +--- a/net/core/datagram.c
2850 ++++ b/net/core/datagram.c
2851 +@@ -808,8 +808,9 @@ int skb_copy_and_csum_datagram_msg(struct sk_buff *skb,
2852 + return -EINVAL;
2853 + }
2854 +
2855 +- if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE))
2856 +- netdev_rx_csum_fault(skb->dev);
2857 ++ if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE) &&
2858 ++ !skb->csum_complete_sw)
2859 ++ netdev_rx_csum_fault(NULL);
2860 + }
2861 + return 0;
2862 + fault:
2863 +diff --git a/net/core/ethtool.c b/net/core/ethtool.c
2864 +index 6c04f1bf377d..548d0e615bc7 100644
2865 +--- a/net/core/ethtool.c
2866 ++++ b/net/core/ethtool.c
2867 +@@ -2461,13 +2461,17 @@ roll_back:
2868 + return ret;
2869 + }
2870 +
2871 +-static int ethtool_set_per_queue(struct net_device *dev, void __user *useraddr)
2872 ++static int ethtool_set_per_queue(struct net_device *dev,
2873 ++ void __user *useraddr, u32 sub_cmd)
2874 + {
2875 + struct ethtool_per_queue_op per_queue_opt;
2876 +
2877 + if (copy_from_user(&per_queue_opt, useraddr, sizeof(per_queue_opt)))
2878 + return -EFAULT;
2879 +
2880 ++ if (per_queue_opt.sub_command != sub_cmd)
2881 ++ return -EINVAL;
2882 ++
2883 + switch (per_queue_opt.sub_command) {
2884 + case ETHTOOL_GCOALESCE:
2885 + return ethtool_get_per_queue_coalesce(dev, useraddr, &per_queue_opt);
2886 +@@ -2838,7 +2842,7 @@ int dev_ethtool(struct net *net, struct ifreq *ifr)
2887 + rc = ethtool_get_phy_stats(dev, useraddr);
2888 + break;
2889 + case ETHTOOL_PERQUEUE:
2890 +- rc = ethtool_set_per_queue(dev, useraddr);
2891 ++ rc = ethtool_set_per_queue(dev, useraddr, sub_cmd);
2892 + break;
2893 + case ETHTOOL_GLINKSETTINGS:
2894 + rc = ethtool_get_link_ksettings(dev, useraddr);
2895 +diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
2896 +index 18de39dbdc30..4b25fd14bc5a 100644
2897 +--- a/net/core/rtnetlink.c
2898 ++++ b/net/core/rtnetlink.c
2899 +@@ -3480,6 +3480,11 @@ static int rtnl_fdb_add(struct sk_buff *skb, struct nlmsghdr *nlh,
2900 + return -EINVAL;
2901 + }
2902 +
2903 ++ if (dev->type != ARPHRD_ETHER) {
2904 ++ NL_SET_ERR_MSG(extack, "FDB delete only supported for Ethernet devices");
2905 ++ return -EINVAL;
2906 ++ }
2907 ++
2908 + addr = nla_data(tb[NDA_LLADDR]);
2909 +
2910 + err = fdb_vid_parse(tb[NDA_VLAN], &vid, extack);
2911 +@@ -3584,6 +3589,11 @@ static int rtnl_fdb_del(struct sk_buff *skb, struct nlmsghdr *nlh,
2912 + return -EINVAL;
2913 + }
2914 +
2915 ++ if (dev->type != ARPHRD_ETHER) {
2916 ++ NL_SET_ERR_MSG(extack, "FDB add only supported for Ethernet devices");
2917 ++ return -EINVAL;
2918 ++ }
2919 ++
2920 + addr = nla_data(tb[NDA_LLADDR]);
2921 +
2922 + err = fdb_vid_parse(tb[NDA_VLAN], &vid, extack);
2923 +diff --git a/net/core/skbuff.c b/net/core/skbuff.c
2924 +index 3680912f056a..c45916b91a9c 100644
2925 +--- a/net/core/skbuff.c
2926 ++++ b/net/core/skbuff.c
2927 +@@ -1845,8 +1845,9 @@ int pskb_trim_rcsum_slow(struct sk_buff *skb, unsigned int len)
2928 + if (skb->ip_summed == CHECKSUM_COMPLETE) {
2929 + int delta = skb->len - len;
2930 +
2931 +- skb->csum = csum_sub(skb->csum,
2932 +- skb_checksum(skb, len, delta, 0));
2933 ++ skb->csum = csum_block_sub(skb->csum,
2934 ++ skb_checksum(skb, len, delta, 0),
2935 ++ len);
2936 + }
2937 + return __pskb_trim(skb, len);
2938 + }
2939 +diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
2940 +index d14d741fb05e..9d3bdce1ad8a 100644
2941 +--- a/net/ipv4/ip_fragment.c
2942 ++++ b/net/ipv4/ip_fragment.c
2943 +@@ -657,10 +657,14 @@ struct sk_buff *ip_check_defrag(struct net *net, struct sk_buff *skb, u32 user)
2944 + if (ip_is_fragment(&iph)) {
2945 + skb = skb_share_check(skb, GFP_ATOMIC);
2946 + if (skb) {
2947 +- if (!pskb_may_pull(skb, netoff + iph.ihl * 4))
2948 +- return skb;
2949 +- if (pskb_trim_rcsum(skb, netoff + len))
2950 +- return skb;
2951 ++ if (!pskb_may_pull(skb, netoff + iph.ihl * 4)) {
2952 ++ kfree_skb(skb);
2953 ++ return NULL;
2954 ++ }
2955 ++ if (pskb_trim_rcsum(skb, netoff + len)) {
2956 ++ kfree_skb(skb);
2957 ++ return NULL;
2958 ++ }
2959 + memset(IPCB(skb), 0, sizeof(struct inet_skb_parm));
2960 + if (ip_defrag(net, skb, user))
2961 + return NULL;
2962 +diff --git a/net/ipv4/ipmr_base.c b/net/ipv4/ipmr_base.c
2963 +index cafb0506c8c9..33be09791c74 100644
2964 +--- a/net/ipv4/ipmr_base.c
2965 ++++ b/net/ipv4/ipmr_base.c
2966 +@@ -295,8 +295,6 @@ int mr_rtm_dumproute(struct sk_buff *skb, struct netlink_callback *cb,
2967 + next_entry:
2968 + e++;
2969 + }
2970 +- e = 0;
2971 +- s_e = 0;
2972 +
2973 + spin_lock_bh(lock);
2974 + list_for_each_entry(mfc, &mrt->mfc_unres_queue, list) {
2975 +diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
2976 +index a12df801de94..2fe7e2713350 100644
2977 +--- a/net/ipv4/udp.c
2978 ++++ b/net/ipv4/udp.c
2979 +@@ -2124,8 +2124,24 @@ static inline int udp4_csum_init(struct sk_buff *skb, struct udphdr *uh,
2980 + /* Note, we are only interested in != 0 or == 0, thus the
2981 + * force to int.
2982 + */
2983 +- return (__force int)skb_checksum_init_zero_check(skb, proto, uh->check,
2984 +- inet_compute_pseudo);
2985 ++ err = (__force int)skb_checksum_init_zero_check(skb, proto, uh->check,
2986 ++ inet_compute_pseudo);
2987 ++ if (err)
2988 ++ return err;
2989 ++
2990 ++ if (skb->ip_summed == CHECKSUM_COMPLETE && !skb->csum_valid) {
2991 ++ /* If SW calculated the value, we know it's bad */
2992 ++ if (skb->csum_complete_sw)
2993 ++ return 1;
2994 ++
2995 ++ /* HW says the value is bad. Let's validate that.
2996 ++ * skb->csum is no longer the full packet checksum,
2997 ++ * so don't treat it as such.
2998 ++ */
2999 ++ skb_checksum_complete_unset(skb);
3000 ++ }
3001 ++
3002 ++ return 0;
3003 + }
3004 +
3005 + /* wrapper for udp_queue_rcv_skb tacking care of csum conversion and
3006 +diff --git a/net/ipv4/xfrm4_input.c b/net/ipv4/xfrm4_input.c
3007 +index bcfc00e88756..f8de2482a529 100644
3008 +--- a/net/ipv4/xfrm4_input.c
3009 ++++ b/net/ipv4/xfrm4_input.c
3010 +@@ -67,6 +67,7 @@ int xfrm4_transport_finish(struct sk_buff *skb, int async)
3011 +
3012 + if (xo && (xo->flags & XFRM_GRO)) {
3013 + skb_mac_header_rebuild(skb);
3014 ++ skb_reset_transport_header(skb);
3015 + return 0;
3016 + }
3017 +
3018 +diff --git a/net/ipv4/xfrm4_mode_transport.c b/net/ipv4/xfrm4_mode_transport.c
3019 +index 3d36644890bb..1ad2c2c4e250 100644
3020 +--- a/net/ipv4/xfrm4_mode_transport.c
3021 ++++ b/net/ipv4/xfrm4_mode_transport.c
3022 +@@ -46,7 +46,6 @@ static int xfrm4_transport_output(struct xfrm_state *x, struct sk_buff *skb)
3023 + static int xfrm4_transport_input(struct xfrm_state *x, struct sk_buff *skb)
3024 + {
3025 + int ihl = skb->data - skb_transport_header(skb);
3026 +- struct xfrm_offload *xo = xfrm_offload(skb);
3027 +
3028 + if (skb->transport_header != skb->network_header) {
3029 + memmove(skb_transport_header(skb),
3030 +@@ -54,8 +53,7 @@ static int xfrm4_transport_input(struct xfrm_state *x, struct sk_buff *skb)
3031 + skb->network_header = skb->transport_header;
3032 + }
3033 + ip_hdr(skb)->tot_len = htons(skb->len + ihl);
3034 +- if (!xo || !(xo->flags & XFRM_GRO))
3035 +- skb_reset_transport_header(skb);
3036 ++ skb_reset_transport_header(skb);
3037 + return 0;
3038 + }
3039 +
3040 +diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
3041 +index 3484c7020fd9..ac3de1aa1cd3 100644
3042 +--- a/net/ipv6/addrconf.c
3043 ++++ b/net/ipv6/addrconf.c
3044 +@@ -4930,8 +4930,8 @@ static int in6_dump_addrs(struct inet6_dev *idev, struct sk_buff *skb,
3045 +
3046 + /* unicast address incl. temp addr */
3047 + list_for_each_entry(ifa, &idev->addr_list, if_list) {
3048 +- if (++ip_idx < s_ip_idx)
3049 +- continue;
3050 ++ if (ip_idx < s_ip_idx)
3051 ++ goto next;
3052 + err = inet6_fill_ifaddr(skb, ifa,
3053 + NETLINK_CB(cb->skb).portid,
3054 + cb->nlh->nlmsg_seq,
3055 +@@ -4940,6 +4940,8 @@ static int in6_dump_addrs(struct inet6_dev *idev, struct sk_buff *skb,
3056 + if (err < 0)
3057 + break;
3058 + nl_dump_check_consistent(cb, nlmsg_hdr(skb));
3059 ++next:
3060 ++ ip_idx++;
3061 + }
3062 + break;
3063 + }
3064 +diff --git a/net/ipv6/ip6_checksum.c b/net/ipv6/ip6_checksum.c
3065 +index 547515e8450a..377717045f8f 100644
3066 +--- a/net/ipv6/ip6_checksum.c
3067 ++++ b/net/ipv6/ip6_checksum.c
3068 +@@ -88,8 +88,24 @@ int udp6_csum_init(struct sk_buff *skb, struct udphdr *uh, int proto)
3069 + * Note, we are only interested in != 0 or == 0, thus the
3070 + * force to int.
3071 + */
3072 +- return (__force int)skb_checksum_init_zero_check(skb, proto, uh->check,
3073 +- ip6_compute_pseudo);
3074 ++ err = (__force int)skb_checksum_init_zero_check(skb, proto, uh->check,
3075 ++ ip6_compute_pseudo);
3076 ++ if (err)
3077 ++ return err;
3078 ++
3079 ++ if (skb->ip_summed == CHECKSUM_COMPLETE && !skb->csum_valid) {
3080 ++ /* If SW calculated the value, we know it's bad */
3081 ++ if (skb->csum_complete_sw)
3082 ++ return 1;
3083 ++
3084 ++ /* HW says the value is bad. Let's validate that.
3085 ++ * skb->csum is no longer the full packet checksum,
3086 ++ * so don't treat is as such.
3087 ++ */
3088 ++ skb_checksum_complete_unset(skb);
3089 ++ }
3090 ++
3091 ++ return 0;
3092 + }
3093 + EXPORT_SYMBOL(udp6_csum_init);
3094 +
3095 +diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
3096 +index f5b5b0574a2d..009b508127e6 100644
3097 +--- a/net/ipv6/ip6_tunnel.c
3098 ++++ b/net/ipv6/ip6_tunnel.c
3099 +@@ -1184,10 +1184,6 @@ route_lookup:
3100 + }
3101 + skb_dst_set(skb, dst);
3102 +
3103 +- if (encap_limit >= 0) {
3104 +- init_tel_txopt(&opt, encap_limit);
3105 +- ipv6_push_frag_opts(skb, &opt.ops, &proto);
3106 +- }
3107 + hop_limit = hop_limit ? : ip6_dst_hoplimit(dst);
3108 +
3109 + /* Calculate max headroom for all the headers and adjust
3110 +@@ -1202,6 +1198,11 @@ route_lookup:
3111 + if (err)
3112 + return err;
3113 +
3114 ++ if (encap_limit >= 0) {
3115 ++ init_tel_txopt(&opt, encap_limit);
3116 ++ ipv6_push_frag_opts(skb, &opt.ops, &proto);
3117 ++ }
3118 ++
3119 + skb_push(skb, sizeof(struct ipv6hdr));
3120 + skb_reset_network_header(skb);
3121 + ipv6h = ipv6_hdr(skb);
3122 +diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c
3123 +index f60f310785fd..131440ea6b51 100644
3124 +--- a/net/ipv6/mcast.c
3125 ++++ b/net/ipv6/mcast.c
3126 +@@ -2436,17 +2436,17 @@ static int ip6_mc_leave_src(struct sock *sk, struct ipv6_mc_socklist *iml,
3127 + {
3128 + int err;
3129 +
3130 +- /* callers have the socket lock and rtnl lock
3131 +- * so no other readers or writers of iml or its sflist
3132 +- */
3133 ++ write_lock_bh(&iml->sflock);
3134 + if (!iml->sflist) {
3135 + /* any-source empty exclude case */
3136 +- return ip6_mc_del_src(idev, &iml->addr, iml->sfmode, 0, NULL, 0);
3137 ++ err = ip6_mc_del_src(idev, &iml->addr, iml->sfmode, 0, NULL, 0);
3138 ++ } else {
3139 ++ err = ip6_mc_del_src(idev, &iml->addr, iml->sfmode,
3140 ++ iml->sflist->sl_count, iml->sflist->sl_addr, 0);
3141 ++ sock_kfree_s(sk, iml->sflist, IP6_SFLSIZE(iml->sflist->sl_max));
3142 ++ iml->sflist = NULL;
3143 + }
3144 +- err = ip6_mc_del_src(idev, &iml->addr, iml->sfmode,
3145 +- iml->sflist->sl_count, iml->sflist->sl_addr, 0);
3146 +- sock_kfree_s(sk, iml->sflist, IP6_SFLSIZE(iml->sflist->sl_max));
3147 +- iml->sflist = NULL;
3148 ++ write_unlock_bh(&iml->sflock);
3149 + return err;
3150 + }
3151 +
3152 +diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c
3153 +index 0ec273997d1d..673a4a932f2a 100644
3154 +--- a/net/ipv6/ndisc.c
3155 ++++ b/net/ipv6/ndisc.c
3156 +@@ -1732,10 +1732,9 @@ int ndisc_rcv(struct sk_buff *skb)
3157 + return 0;
3158 + }
3159 +
3160 +- memset(NEIGH_CB(skb), 0, sizeof(struct neighbour_cb));
3161 +-
3162 + switch (msg->icmph.icmp6_type) {
3163 + case NDISC_NEIGHBOUR_SOLICITATION:
3164 ++ memset(NEIGH_CB(skb), 0, sizeof(struct neighbour_cb));
3165 + ndisc_recv_ns(skb);
3166 + break;
3167 +
3168 +diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c
3169 +index e4d9e6976d3c..a452d99c9f52 100644
3170 +--- a/net/ipv6/netfilter/nf_conntrack_reasm.c
3171 ++++ b/net/ipv6/netfilter/nf_conntrack_reasm.c
3172 +@@ -585,8 +585,6 @@ int nf_ct_frag6_gather(struct net *net, struct sk_buff *skb, u32 user)
3173 + fq->q.meat == fq->q.len &&
3174 + nf_ct_frag6_reasm(fq, skb, dev))
3175 + ret = 0;
3176 +- else
3177 +- skb_dst_drop(skb);
3178 +
3179 + out_unlock:
3180 + spin_unlock_bh(&fq->q.lock);
3181 +diff --git a/net/ipv6/route.c b/net/ipv6/route.c
3182 +index ed526e257da6..a243d5249b51 100644
3183 +--- a/net/ipv6/route.c
3184 ++++ b/net/ipv6/route.c
3185 +@@ -517,10 +517,11 @@ static void rt6_probe_deferred(struct work_struct *w)
3186 +
3187 + static void rt6_probe(struct fib6_info *rt)
3188 + {
3189 +- struct __rt6_probe_work *work;
3190 ++ struct __rt6_probe_work *work = NULL;
3191 + const struct in6_addr *nh_gw;
3192 + struct neighbour *neigh;
3193 + struct net_device *dev;
3194 ++ struct inet6_dev *idev;
3195 +
3196 + /*
3197 + * Okay, this does not seem to be appropriate
3198 +@@ -536,15 +537,12 @@ static void rt6_probe(struct fib6_info *rt)
3199 + nh_gw = &rt->fib6_nh.nh_gw;
3200 + dev = rt->fib6_nh.nh_dev;
3201 + rcu_read_lock_bh();
3202 ++ idev = __in6_dev_get(dev);
3203 + neigh = __ipv6_neigh_lookup_noref(dev, nh_gw);
3204 + if (neigh) {
3205 +- struct inet6_dev *idev;
3206 +-
3207 + if (neigh->nud_state & NUD_VALID)
3208 + goto out;
3209 +
3210 +- idev = __in6_dev_get(dev);
3211 +- work = NULL;
3212 + write_lock(&neigh->lock);
3213 + if (!(neigh->nud_state & NUD_VALID) &&
3214 + time_after(jiffies,
3215 +@@ -554,11 +552,13 @@ static void rt6_probe(struct fib6_info *rt)
3216 + __neigh_set_probe_once(neigh);
3217 + }
3218 + write_unlock(&neigh->lock);
3219 +- } else {
3220 ++ } else if (time_after(jiffies, rt->last_probe +
3221 ++ idev->cnf.rtr_probe_interval)) {
3222 + work = kmalloc(sizeof(*work), GFP_ATOMIC);
3223 + }
3224 +
3225 + if (work) {
3226 ++ rt->last_probe = jiffies;
3227 + INIT_WORK(&work->work, rt6_probe_deferred);
3228 + work->target = *nh_gw;
3229 + dev_hold(dev);
3230 +@@ -2792,6 +2792,8 @@ static int ip6_route_check_nh_onlink(struct net *net,
3231 + grt = ip6_nh_lookup_table(net, cfg, gw_addr, tbid, 0);
3232 + if (grt) {
3233 + if (!grt->dst.error &&
3234 ++ /* ignore match if it is the default route */
3235 ++ grt->from && !ipv6_addr_any(&grt->from->fib6_dst.addr) &&
3236 + (grt->rt6i_flags & flags || dev != grt->dst.dev)) {
3237 + NL_SET_ERR_MSG(extack,
3238 + "Nexthop has invalid gateway or device mismatch");
3239 +diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
3240 +index 39d0cab919bb..4f2c7a196365 100644
3241 +--- a/net/ipv6/udp.c
3242 ++++ b/net/ipv6/udp.c
3243 +@@ -762,11 +762,9 @@ static int udp6_unicast_rcv_skb(struct sock *sk, struct sk_buff *skb,
3244 +
3245 + ret = udpv6_queue_rcv_skb(sk, skb);
3246 +
3247 +- /* a return value > 0 means to resubmit the input, but
3248 +- * it wants the return to be -protocol, or 0
3249 +- */
3250 ++ /* a return value > 0 means to resubmit the input */
3251 + if (ret > 0)
3252 +- return -ret;
3253 ++ return ret;
3254 + return 0;
3255 + }
3256 +
3257 +diff --git a/net/ipv6/xfrm6_input.c b/net/ipv6/xfrm6_input.c
3258 +index 841f4a07438e..9ef490dddcea 100644
3259 +--- a/net/ipv6/xfrm6_input.c
3260 ++++ b/net/ipv6/xfrm6_input.c
3261 +@@ -59,6 +59,7 @@ int xfrm6_transport_finish(struct sk_buff *skb, int async)
3262 +
3263 + if (xo && (xo->flags & XFRM_GRO)) {
3264 + skb_mac_header_rebuild(skb);
3265 ++ skb_reset_transport_header(skb);
3266 + return -1;
3267 + }
3268 +
3269 +diff --git a/net/ipv6/xfrm6_mode_transport.c b/net/ipv6/xfrm6_mode_transport.c
3270 +index 9ad07a91708e..3c29da5defe6 100644
3271 +--- a/net/ipv6/xfrm6_mode_transport.c
3272 ++++ b/net/ipv6/xfrm6_mode_transport.c
3273 +@@ -51,7 +51,6 @@ static int xfrm6_transport_output(struct xfrm_state *x, struct sk_buff *skb)
3274 + static int xfrm6_transport_input(struct xfrm_state *x, struct sk_buff *skb)
3275 + {
3276 + int ihl = skb->data - skb_transport_header(skb);
3277 +- struct xfrm_offload *xo = xfrm_offload(skb);
3278 +
3279 + if (skb->transport_header != skb->network_header) {
3280 + memmove(skb_transport_header(skb),
3281 +@@ -60,8 +59,7 @@ static int xfrm6_transport_input(struct xfrm_state *x, struct sk_buff *skb)
3282 + }
3283 + ipv6_hdr(skb)->payload_len = htons(skb->len + ihl -
3284 + sizeof(struct ipv6hdr));
3285 +- if (!xo || !(xo->flags & XFRM_GRO))
3286 +- skb_reset_transport_header(skb);
3287 ++ skb_reset_transport_header(skb);
3288 + return 0;
3289 + }
3290 +
3291 +diff --git a/net/ipv6/xfrm6_output.c b/net/ipv6/xfrm6_output.c
3292 +index 5959ce9620eb..6a74080005cf 100644
3293 +--- a/net/ipv6/xfrm6_output.c
3294 ++++ b/net/ipv6/xfrm6_output.c
3295 +@@ -170,9 +170,11 @@ static int __xfrm6_output(struct net *net, struct sock *sk, struct sk_buff *skb)
3296 +
3297 + if (toobig && xfrm6_local_dontfrag(skb)) {
3298 + xfrm6_local_rxpmtu(skb, mtu);
3299 ++ kfree_skb(skb);
3300 + return -EMSGSIZE;
3301 + } else if (!skb->ignore_df && toobig && skb->sk) {
3302 + xfrm_local_error(skb, mtu);
3303 ++ kfree_skb(skb);
3304 + return -EMSGSIZE;
3305 + }
3306 +
3307 +diff --git a/net/llc/llc_conn.c b/net/llc/llc_conn.c
3308 +index c0ac522b48a1..4ff89cb7c86f 100644
3309 +--- a/net/llc/llc_conn.c
3310 ++++ b/net/llc/llc_conn.c
3311 +@@ -734,6 +734,7 @@ void llc_sap_add_socket(struct llc_sap *sap, struct sock *sk)
3312 + llc_sk(sk)->sap = sap;
3313 +
3314 + spin_lock_bh(&sap->sk_lock);
3315 ++ sock_set_flag(sk, SOCK_RCU_FREE);
3316 + sap->sk_count++;
3317 + sk_nulls_add_node_rcu(sk, laddr_hb);
3318 + hlist_add_head(&llc->dev_hash_node, dev_hb);
3319 +diff --git a/net/mac80211/mesh.h b/net/mac80211/mesh.h
3320 +index ee56f18cad3f..21526630bf65 100644
3321 +--- a/net/mac80211/mesh.h
3322 ++++ b/net/mac80211/mesh.h
3323 +@@ -217,7 +217,8 @@ void mesh_rmc_free(struct ieee80211_sub_if_data *sdata);
3324 + int mesh_rmc_init(struct ieee80211_sub_if_data *sdata);
3325 + void ieee80211s_init(void);
3326 + void ieee80211s_update_metric(struct ieee80211_local *local,
3327 +- struct sta_info *sta, struct sk_buff *skb);
3328 ++ struct sta_info *sta,
3329 ++ struct ieee80211_tx_status *st);
3330 + void ieee80211_mesh_init_sdata(struct ieee80211_sub_if_data *sdata);
3331 + void ieee80211_mesh_teardown_sdata(struct ieee80211_sub_if_data *sdata);
3332 + int ieee80211_start_mesh(struct ieee80211_sub_if_data *sdata);
3333 +diff --git a/net/mac80211/mesh_hwmp.c b/net/mac80211/mesh_hwmp.c
3334 +index daf9db3c8f24..6950cd0bf594 100644
3335 +--- a/net/mac80211/mesh_hwmp.c
3336 ++++ b/net/mac80211/mesh_hwmp.c
3337 +@@ -295,15 +295,12 @@ int mesh_path_error_tx(struct ieee80211_sub_if_data *sdata,
3338 + }
3339 +
3340 + void ieee80211s_update_metric(struct ieee80211_local *local,
3341 +- struct sta_info *sta, struct sk_buff *skb)
3342 ++ struct sta_info *sta,
3343 ++ struct ieee80211_tx_status *st)
3344 + {
3345 +- struct ieee80211_tx_info *txinfo = IEEE80211_SKB_CB(skb);
3346 +- struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
3347 ++ struct ieee80211_tx_info *txinfo = st->info;
3348 + int failed;
3349 +
3350 +- if (!ieee80211_is_data(hdr->frame_control))
3351 +- return;
3352 +-
3353 + failed = !(txinfo->flags & IEEE80211_TX_STAT_ACK);
3354 +
3355 + /* moving average, scaled to 100.
3356 +diff --git a/net/mac80211/status.c b/net/mac80211/status.c
3357 +index 9a6d7208bf4f..91d7c0cd1882 100644
3358 +--- a/net/mac80211/status.c
3359 ++++ b/net/mac80211/status.c
3360 +@@ -479,11 +479,6 @@ static void ieee80211_report_ack_skb(struct ieee80211_local *local,
3361 + if (!skb)
3362 + return;
3363 +
3364 +- if (dropped) {
3365 +- dev_kfree_skb_any(skb);
3366 +- return;
3367 +- }
3368 +-
3369 + if (info->flags & IEEE80211_TX_INTFL_NL80211_FRAME_TX) {
3370 + u64 cookie = IEEE80211_SKB_CB(skb)->ack.cookie;
3371 + struct ieee80211_sub_if_data *sdata;
3372 +@@ -506,6 +501,8 @@ static void ieee80211_report_ack_skb(struct ieee80211_local *local,
3373 + }
3374 + rcu_read_unlock();
3375 +
3376 ++ dev_kfree_skb_any(skb);
3377 ++ } else if (dropped) {
3378 + dev_kfree_skb_any(skb);
3379 + } else {
3380 + /* consumes skb */
3381 +@@ -811,7 +808,7 @@ static void __ieee80211_tx_status(struct ieee80211_hw *hw,
3382 +
3383 + rate_control_tx_status(local, sband, status);
3384 + if (ieee80211_vif_is_mesh(&sta->sdata->vif))
3385 +- ieee80211s_update_metric(local, sta, skb);
3386 ++ ieee80211s_update_metric(local, sta, status);
3387 +
3388 + if (!(info->flags & IEEE80211_TX_CTL_INJECTED) && acked)
3389 + ieee80211_frame_acked(sta, skb);
3390 +@@ -972,6 +969,8 @@ void ieee80211_tx_status_ext(struct ieee80211_hw *hw,
3391 + }
3392 +
3393 + rate_control_tx_status(local, sband, status);
3394 ++ if (ieee80211_vif_is_mesh(&sta->sdata->vif))
3395 ++ ieee80211s_update_metric(local, sta, status);
3396 + }
3397 +
3398 + if (acked || noack_success) {
3399 +diff --git a/net/mac80211/tdls.c b/net/mac80211/tdls.c
3400 +index 5cd5e6e5834e..6c647f425e05 100644
3401 +--- a/net/mac80211/tdls.c
3402 ++++ b/net/mac80211/tdls.c
3403 +@@ -16,6 +16,7 @@
3404 + #include "ieee80211_i.h"
3405 + #include "driver-ops.h"
3406 + #include "rate.h"
3407 ++#include "wme.h"
3408 +
3409 + /* give usermode some time for retries in setting up the TDLS session */
3410 + #define TDLS_PEER_SETUP_TIMEOUT (15 * HZ)
3411 +@@ -1010,14 +1011,13 @@ ieee80211_tdls_prep_mgmt_packet(struct wiphy *wiphy, struct net_device *dev,
3412 + switch (action_code) {
3413 + case WLAN_TDLS_SETUP_REQUEST:
3414 + case WLAN_TDLS_SETUP_RESPONSE:
3415 +- skb_set_queue_mapping(skb, IEEE80211_AC_BK);
3416 +- skb->priority = 2;
3417 ++ skb->priority = 256 + 2;
3418 + break;
3419 + default:
3420 +- skb_set_queue_mapping(skb, IEEE80211_AC_VI);
3421 +- skb->priority = 5;
3422 ++ skb->priority = 256 + 5;
3423 + break;
3424 + }
3425 ++ skb_set_queue_mapping(skb, ieee80211_select_queue(sdata, skb));
3426 +
3427 + /*
3428 + * Set the WLAN_TDLS_TEARDOWN flag to indicate a teardown in progress.
3429 +diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
3430 +index 9b3b069e418a..361f2f6cc839 100644
3431 +--- a/net/mac80211/tx.c
3432 ++++ b/net/mac80211/tx.c
3433 +@@ -1886,7 +1886,7 @@ static bool ieee80211_tx(struct ieee80211_sub_if_data *sdata,
3434 + sdata->vif.hw_queue[skb_get_queue_mapping(skb)];
3435 +
3436 + if (invoke_tx_handlers_early(&tx))
3437 +- return false;
3438 ++ return true;
3439 +
3440 + if (ieee80211_queue_skb(local, sdata, tx.sta, tx.skb))
3441 + return true;
3442 +diff --git a/net/netfilter/nf_conntrack_proto_tcp.c b/net/netfilter/nf_conntrack_proto_tcp.c
3443 +index 8e67910185a0..1004fb5930de 100644
3444 +--- a/net/netfilter/nf_conntrack_proto_tcp.c
3445 ++++ b/net/netfilter/nf_conntrack_proto_tcp.c
3446 +@@ -1239,8 +1239,8 @@ static const struct nla_policy tcp_nla_policy[CTA_PROTOINFO_TCP_MAX+1] = {
3447 + #define TCP_NLATTR_SIZE ( \
3448 + NLA_ALIGN(NLA_HDRLEN + 1) + \
3449 + NLA_ALIGN(NLA_HDRLEN + 1) + \
3450 +- NLA_ALIGN(NLA_HDRLEN + sizeof(sizeof(struct nf_ct_tcp_flags))) + \
3451 +- NLA_ALIGN(NLA_HDRLEN + sizeof(sizeof(struct nf_ct_tcp_flags))))
3452 ++ NLA_ALIGN(NLA_HDRLEN + sizeof(struct nf_ct_tcp_flags)) + \
3453 ++ NLA_ALIGN(NLA_HDRLEN + sizeof(struct nf_ct_tcp_flags)))
3454 +
3455 + static int nlattr_to_tcp(struct nlattr *cda[], struct nf_conn *ct)
3456 + {
3457 +diff --git a/net/netfilter/nft_set_rbtree.c b/net/netfilter/nft_set_rbtree.c
3458 +index 9873d734b494..8ad78b82c8e2 100644
3459 +--- a/net/netfilter/nft_set_rbtree.c
3460 ++++ b/net/netfilter/nft_set_rbtree.c
3461 +@@ -355,12 +355,11 @@ cont:
3462 +
3463 + static void nft_rbtree_gc(struct work_struct *work)
3464 + {
3465 ++ struct nft_rbtree_elem *rbe, *rbe_end = NULL, *rbe_prev = NULL;
3466 + struct nft_set_gc_batch *gcb = NULL;
3467 +- struct rb_node *node, *prev = NULL;
3468 +- struct nft_rbtree_elem *rbe;
3469 + struct nft_rbtree *priv;
3470 ++ struct rb_node *node;
3471 + struct nft_set *set;
3472 +- int i;
3473 +
3474 + priv = container_of(work, struct nft_rbtree, gc_work.work);
3475 + set = nft_set_container_of(priv);
3476 +@@ -371,7 +370,7 @@ static void nft_rbtree_gc(struct work_struct *work)
3477 + rbe = rb_entry(node, struct nft_rbtree_elem, node);
3478 +
3479 + if (nft_rbtree_interval_end(rbe)) {
3480 +- prev = node;
3481 ++ rbe_end = rbe;
3482 + continue;
3483 + }
3484 + if (!nft_set_elem_expired(&rbe->ext))
3485 +@@ -379,29 +378,30 @@ static void nft_rbtree_gc(struct work_struct *work)
3486 + if (nft_set_elem_mark_busy(&rbe->ext))
3487 + continue;
3488 +
3489 ++ if (rbe_prev) {
3490 ++ rb_erase(&rbe_prev->node, &priv->root);
3491 ++ rbe_prev = NULL;
3492 ++ }
3493 + gcb = nft_set_gc_batch_check(set, gcb, GFP_ATOMIC);
3494 + if (!gcb)
3495 + break;
3496 +
3497 + atomic_dec(&set->nelems);
3498 + nft_set_gc_batch_add(gcb, rbe);
3499 ++ rbe_prev = rbe;
3500 +
3501 +- if (prev) {
3502 +- rbe = rb_entry(prev, struct nft_rbtree_elem, node);
3503 ++ if (rbe_end) {
3504 + atomic_dec(&set->nelems);
3505 +- nft_set_gc_batch_add(gcb, rbe);
3506 +- prev = NULL;
3507 ++ nft_set_gc_batch_add(gcb, rbe_end);
3508 ++ rb_erase(&rbe_end->node, &priv->root);
3509 ++ rbe_end = NULL;
3510 + }
3511 + node = rb_next(node);
3512 + if (!node)
3513 + break;
3514 + }
3515 +- if (gcb) {
3516 +- for (i = 0; i < gcb->head.cnt; i++) {
3517 +- rbe = gcb->elems[i];
3518 +- rb_erase(&rbe->node, &priv->root);
3519 +- }
3520 +- }
3521 ++ if (rbe_prev)
3522 ++ rb_erase(&rbe_prev->node, &priv->root);
3523 + write_seqcount_end(&priv->count);
3524 + write_unlock_bh(&priv->lock);
3525 +
3526 +diff --git a/net/openvswitch/flow_netlink.c b/net/openvswitch/flow_netlink.c
3527 +index 492ab0c36f7c..8b1ba43b1ece 100644
3528 +--- a/net/openvswitch/flow_netlink.c
3529 ++++ b/net/openvswitch/flow_netlink.c
3530 +@@ -2990,7 +2990,7 @@ static int __ovs_nla_copy_actions(struct net *net, const struct nlattr *attr,
3531 + * is already present */
3532 + if (mac_proto != MAC_PROTO_NONE)
3533 + return -EINVAL;
3534 +- mac_proto = MAC_PROTO_NONE;
3535 ++ mac_proto = MAC_PROTO_ETHERNET;
3536 + break;
3537 +
3538 + case OVS_ACTION_ATTR_POP_ETH:
3539 +@@ -2998,7 +2998,7 @@ static int __ovs_nla_copy_actions(struct net *net, const struct nlattr *attr,
3540 + return -EINVAL;
3541 + if (vlan_tci & htons(VLAN_TAG_PRESENT))
3542 + return -EINVAL;
3543 +- mac_proto = MAC_PROTO_ETHERNET;
3544 ++ mac_proto = MAC_PROTO_NONE;
3545 + break;
3546 +
3547 + case OVS_ACTION_ATTR_PUSH_NSH:
3548 +diff --git a/net/rds/send.c b/net/rds/send.c
3549 +index 59f17a2335f4..0e54ca0f4e9e 100644
3550 +--- a/net/rds/send.c
3551 ++++ b/net/rds/send.c
3552 +@@ -1006,7 +1006,8 @@ static int rds_cmsg_send(struct rds_sock *rs, struct rds_message *rm,
3553 + return ret;
3554 + }
3555 +
3556 +-static int rds_send_mprds_hash(struct rds_sock *rs, struct rds_connection *conn)
3557 ++static int rds_send_mprds_hash(struct rds_sock *rs,
3558 ++ struct rds_connection *conn, int nonblock)
3559 + {
3560 + int hash;
3561 +
3562 +@@ -1022,10 +1023,16 @@ static int rds_send_mprds_hash(struct rds_sock *rs, struct rds_connection *conn)
3563 + * used. But if we are interrupted, we have to use the zero
3564 + * c_path in case the connection ends up being non-MP capable.
3565 + */
3566 +- if (conn->c_npaths == 0)
3567 ++ if (conn->c_npaths == 0) {
3568 ++ /* Cannot wait for the connection be made, so just use
3569 ++ * the base c_path.
3570 ++ */
3571 ++ if (nonblock)
3572 ++ return 0;
3573 + if (wait_event_interruptible(conn->c_hs_waitq,
3574 + conn->c_npaths != 0))
3575 + hash = 0;
3576 ++ }
3577 + if (conn->c_npaths == 1)
3578 + hash = 0;
3579 + }
3580 +@@ -1170,7 +1177,7 @@ int rds_sendmsg(struct socket *sock, struct msghdr *msg, size_t payload_len)
3581 + }
3582 +
3583 + if (conn->c_trans->t_mp_capable)
3584 +- cpath = &conn->c_path[rds_send_mprds_hash(rs, conn)];
3585 ++ cpath = &conn->c_path[rds_send_mprds_hash(rs, conn, nonblock)];
3586 + else
3587 + cpath = &conn->c_path[0];
3588 +
3589 +diff --git a/net/rxrpc/ar-internal.h b/net/rxrpc/ar-internal.h
3590 +index 707630ab4713..330372c04940 100644
3591 +--- a/net/rxrpc/ar-internal.h
3592 ++++ b/net/rxrpc/ar-internal.h
3593 +@@ -293,7 +293,6 @@ struct rxrpc_peer {
3594 + struct hlist_node hash_link;
3595 + struct rxrpc_local *local;
3596 + struct hlist_head error_targets; /* targets for net error distribution */
3597 +- struct work_struct error_distributor;
3598 + struct rb_root service_conns; /* Service connections */
3599 + struct list_head keepalive_link; /* Link in net->peer_keepalive[] */
3600 + time64_t last_tx_at; /* Last time packet sent here */
3601 +@@ -304,8 +303,6 @@ struct rxrpc_peer {
3602 + unsigned int maxdata; /* data size (MTU - hdrsize) */
3603 + unsigned short hdrsize; /* header size (IP + UDP + RxRPC) */
3604 + int debug_id; /* debug ID for printks */
3605 +- int error_report; /* Net (+0) or local (+1000000) to distribute */
3606 +-#define RXRPC_LOCAL_ERROR_OFFSET 1000000
3607 + struct sockaddr_rxrpc srx; /* remote address */
3608 +
3609 + /* calculated RTT cache */
3610 +@@ -449,8 +446,7 @@ struct rxrpc_connection {
3611 + spinlock_t state_lock; /* state-change lock */
3612 + enum rxrpc_conn_cache_state cache_state;
3613 + enum rxrpc_conn_proto_state state; /* current state of connection */
3614 +- u32 local_abort; /* local abort code */
3615 +- u32 remote_abort; /* remote abort code */
3616 ++ u32 abort_code; /* Abort code of connection abort */
3617 + int debug_id; /* debug ID for printks */
3618 + atomic_t serial; /* packet serial number counter */
3619 + unsigned int hi_serial; /* highest serial number received */
3620 +@@ -460,8 +456,19 @@ struct rxrpc_connection {
3621 + u8 security_size; /* security header size */
3622 + u8 security_ix; /* security type */
3623 + u8 out_clientflag; /* RXRPC_CLIENT_INITIATED if we are client */
3624 ++ short error; /* Local error code */
3625 + };
3626 +
3627 ++static inline bool rxrpc_to_server(const struct rxrpc_skb_priv *sp)
3628 ++{
3629 ++ return sp->hdr.flags & RXRPC_CLIENT_INITIATED;
3630 ++}
3631 ++
3632 ++static inline bool rxrpc_to_client(const struct rxrpc_skb_priv *sp)
3633 ++{
3634 ++ return !rxrpc_to_server(sp);
3635 ++}
3636 ++
3637 + /*
3638 + * Flags in call->flags.
3639 + */
3640 +@@ -1029,7 +1036,6 @@ void rxrpc_send_keepalive(struct rxrpc_peer *);
3641 + * peer_event.c
3642 + */
3643 + void rxrpc_error_report(struct sock *);
3644 +-void rxrpc_peer_error_distributor(struct work_struct *);
3645 + void rxrpc_peer_add_rtt(struct rxrpc_call *, enum rxrpc_rtt_rx_trace,
3646 + rxrpc_serial_t, rxrpc_serial_t, ktime_t, ktime_t);
3647 + void rxrpc_peer_keepalive_worker(struct work_struct *);
3648 +@@ -1048,7 +1054,6 @@ void rxrpc_destroy_all_peers(struct rxrpc_net *);
3649 + struct rxrpc_peer *rxrpc_get_peer(struct rxrpc_peer *);
3650 + struct rxrpc_peer *rxrpc_get_peer_maybe(struct rxrpc_peer *);
3651 + void rxrpc_put_peer(struct rxrpc_peer *);
3652 +-void __rxrpc_queue_peer_error(struct rxrpc_peer *);
3653 +
3654 + /*
3655 + * proc.c
3656 +diff --git a/net/rxrpc/call_accept.c b/net/rxrpc/call_accept.c
3657 +index 9d1e298b784c..0e378d73e856 100644
3658 +--- a/net/rxrpc/call_accept.c
3659 ++++ b/net/rxrpc/call_accept.c
3660 +@@ -422,11 +422,11 @@ found_service:
3661 +
3662 + case RXRPC_CONN_REMOTELY_ABORTED:
3663 + rxrpc_set_call_completion(call, RXRPC_CALL_REMOTELY_ABORTED,
3664 +- conn->remote_abort, -ECONNABORTED);
3665 ++ conn->abort_code, conn->error);
3666 + break;
3667 + case RXRPC_CONN_LOCALLY_ABORTED:
3668 + rxrpc_abort_call("CON", call, sp->hdr.seq,
3669 +- conn->local_abort, -ECONNABORTED);
3670 ++ conn->abort_code, conn->error);
3671 + break;
3672 + default:
3673 + BUG();
3674 +diff --git a/net/rxrpc/call_object.c b/net/rxrpc/call_object.c
3675 +index f6734d8cb01a..ed69257203c2 100644
3676 +--- a/net/rxrpc/call_object.c
3677 ++++ b/net/rxrpc/call_object.c
3678 +@@ -400,7 +400,7 @@ void rxrpc_incoming_call(struct rxrpc_sock *rx,
3679 + rcu_assign_pointer(conn->channels[chan].call, call);
3680 +
3681 + spin_lock(&conn->params.peer->lock);
3682 +- hlist_add_head(&call->error_link, &conn->params.peer->error_targets);
3683 ++ hlist_add_head_rcu(&call->error_link, &conn->params.peer->error_targets);
3684 + spin_unlock(&conn->params.peer->lock);
3685 +
3686 + _net("CALL incoming %d on CONN %d", call->debug_id, call->conn->debug_id);
3687 +diff --git a/net/rxrpc/conn_client.c b/net/rxrpc/conn_client.c
3688 +index 5736f643c516..0be19132202b 100644
3689 +--- a/net/rxrpc/conn_client.c
3690 ++++ b/net/rxrpc/conn_client.c
3691 +@@ -709,8 +709,8 @@ int rxrpc_connect_call(struct rxrpc_call *call,
3692 + }
3693 +
3694 + spin_lock_bh(&call->conn->params.peer->lock);
3695 +- hlist_add_head(&call->error_link,
3696 +- &call->conn->params.peer->error_targets);
3697 ++ hlist_add_head_rcu(&call->error_link,
3698 ++ &call->conn->params.peer->error_targets);
3699 + spin_unlock_bh(&call->conn->params.peer->lock);
3700 +
3701 + out:
3702 +diff --git a/net/rxrpc/conn_event.c b/net/rxrpc/conn_event.c
3703 +index 3fde001fcc39..5e7c8239e703 100644
3704 +--- a/net/rxrpc/conn_event.c
3705 ++++ b/net/rxrpc/conn_event.c
3706 +@@ -126,7 +126,7 @@ static void rxrpc_conn_retransmit_call(struct rxrpc_connection *conn,
3707 +
3708 + switch (chan->last_type) {
3709 + case RXRPC_PACKET_TYPE_ABORT:
3710 +- _proto("Tx ABORT %%%u { %d } [re]", serial, conn->local_abort);
3711 ++ _proto("Tx ABORT %%%u { %d } [re]", serial, conn->abort_code);
3712 + break;
3713 + case RXRPC_PACKET_TYPE_ACK:
3714 + trace_rxrpc_tx_ack(NULL, serial, chan->last_seq, 0,
3715 +@@ -148,13 +148,12 @@ static void rxrpc_conn_retransmit_call(struct rxrpc_connection *conn,
3716 + * pass a connection-level abort onto all calls on that connection
3717 + */
3718 + static void rxrpc_abort_calls(struct rxrpc_connection *conn,
3719 +- enum rxrpc_call_completion compl,
3720 +- u32 abort_code, int error)
3721 ++ enum rxrpc_call_completion compl)
3722 + {
3723 + struct rxrpc_call *call;
3724 + int i;
3725 +
3726 +- _enter("{%d},%x", conn->debug_id, abort_code);
3727 ++ _enter("{%d},%x", conn->debug_id, conn->abort_code);
3728 +
3729 + spin_lock(&conn->channel_lock);
3730 +
3731 +@@ -167,9 +166,11 @@ static void rxrpc_abort_calls(struct rxrpc_connection *conn,
3732 + trace_rxrpc_abort(call->debug_id,
3733 + "CON", call->cid,
3734 + call->call_id, 0,
3735 +- abort_code, error);
3736 ++ conn->abort_code,
3737 ++ conn->error);
3738 + if (rxrpc_set_call_completion(call, compl,
3739 +- abort_code, error))
3740 ++ conn->abort_code,
3741 ++ conn->error))
3742 + rxrpc_notify_socket(call);
3743 + }
3744 + }
3745 +@@ -202,10 +203,12 @@ static int rxrpc_abort_connection(struct rxrpc_connection *conn,
3746 + return 0;
3747 + }
3748 +
3749 ++ conn->error = error;
3750 ++ conn->abort_code = abort_code;
3751 + conn->state = RXRPC_CONN_LOCALLY_ABORTED;
3752 + spin_unlock_bh(&conn->state_lock);
3753 +
3754 +- rxrpc_abort_calls(conn, RXRPC_CALL_LOCALLY_ABORTED, abort_code, error);
3755 ++ rxrpc_abort_calls(conn, RXRPC_CALL_LOCALLY_ABORTED);
3756 +
3757 + msg.msg_name = &conn->params.peer->srx.transport;
3758 + msg.msg_namelen = conn->params.peer->srx.transport_len;
3759 +@@ -224,7 +227,7 @@ static int rxrpc_abort_connection(struct rxrpc_connection *conn,
3760 + whdr._rsvd = 0;
3761 + whdr.serviceId = htons(conn->service_id);
3762 +
3763 +- word = htonl(conn->local_abort);
3764 ++ word = htonl(conn->abort_code);
3765 +
3766 + iov[0].iov_base = &whdr;
3767 + iov[0].iov_len = sizeof(whdr);
3768 +@@ -235,7 +238,7 @@ static int rxrpc_abort_connection(struct rxrpc_connection *conn,
3769 +
3770 + serial = atomic_inc_return(&conn->serial);
3771 + whdr.serial = htonl(serial);
3772 +- _proto("Tx CONN ABORT %%%u { %d }", serial, conn->local_abort);
3773 ++ _proto("Tx CONN ABORT %%%u { %d }", serial, conn->abort_code);
3774 +
3775 + ret = kernel_sendmsg(conn->params.local->socket, &msg, iov, 2, len);
3776 + if (ret < 0) {
3777 +@@ -308,9 +311,10 @@ static int rxrpc_process_event(struct rxrpc_connection *conn,
3778 + abort_code = ntohl(wtmp);
3779 + _proto("Rx ABORT %%%u { ac=%d }", sp->hdr.serial, abort_code);
3780 +
3781 ++ conn->error = -ECONNABORTED;
3782 ++ conn->abort_code = abort_code;
3783 + conn->state = RXRPC_CONN_REMOTELY_ABORTED;
3784 +- rxrpc_abort_calls(conn, RXRPC_CALL_REMOTELY_ABORTED,
3785 +- abort_code, -ECONNABORTED);
3786 ++ rxrpc_abort_calls(conn, RXRPC_CALL_REMOTELY_ABORTED);
3787 + return -ECONNABORTED;
3788 +
3789 + case RXRPC_PACKET_TYPE_CHALLENGE:
3790 +diff --git a/net/rxrpc/conn_object.c b/net/rxrpc/conn_object.c
3791 +index 4c77a78a252a..e0d6d0fb7426 100644
3792 +--- a/net/rxrpc/conn_object.c
3793 ++++ b/net/rxrpc/conn_object.c
3794 +@@ -99,7 +99,7 @@ struct rxrpc_connection *rxrpc_find_connection_rcu(struct rxrpc_local *local,
3795 + k.epoch = sp->hdr.epoch;
3796 + k.cid = sp->hdr.cid & RXRPC_CIDMASK;
3797 +
3798 +- if (sp->hdr.flags & RXRPC_CLIENT_INITIATED) {
3799 ++ if (rxrpc_to_server(sp)) {
3800 + /* We need to look up service connections by the full protocol
3801 + * parameter set. We look up the peer first as an intermediate
3802 + * step and then the connection from the peer's tree.
3803 +@@ -214,7 +214,7 @@ void rxrpc_disconnect_call(struct rxrpc_call *call)
3804 + call->peer->cong_cwnd = call->cong_cwnd;
3805 +
3806 + spin_lock_bh(&conn->params.peer->lock);
3807 +- hlist_del_init(&call->error_link);
3808 ++ hlist_del_rcu(&call->error_link);
3809 + spin_unlock_bh(&conn->params.peer->lock);
3810 +
3811 + if (rxrpc_is_client_call(call))
3812 +diff --git a/net/rxrpc/input.c b/net/rxrpc/input.c
3813 +index 608d078a4981..a81240845224 100644
3814 +--- a/net/rxrpc/input.c
3815 ++++ b/net/rxrpc/input.c
3816 +@@ -216,10 +216,11 @@ static void rxrpc_send_ping(struct rxrpc_call *call, struct sk_buff *skb,
3817 + /*
3818 + * Apply a hard ACK by advancing the Tx window.
3819 + */
3820 +-static void rxrpc_rotate_tx_window(struct rxrpc_call *call, rxrpc_seq_t to,
3821 ++static bool rxrpc_rotate_tx_window(struct rxrpc_call *call, rxrpc_seq_t to,
3822 + struct rxrpc_ack_summary *summary)
3823 + {
3824 + struct sk_buff *skb, *list = NULL;
3825 ++ bool rot_last = false;
3826 + int ix;
3827 + u8 annotation;
3828 +
3829 +@@ -243,15 +244,17 @@ static void rxrpc_rotate_tx_window(struct rxrpc_call *call, rxrpc_seq_t to,
3830 + skb->next = list;
3831 + list = skb;
3832 +
3833 +- if (annotation & RXRPC_TX_ANNO_LAST)
3834 ++ if (annotation & RXRPC_TX_ANNO_LAST) {
3835 + set_bit(RXRPC_CALL_TX_LAST, &call->flags);
3836 ++ rot_last = true;
3837 ++ }
3838 + if ((annotation & RXRPC_TX_ANNO_MASK) != RXRPC_TX_ANNO_ACK)
3839 + summary->nr_rot_new_acks++;
3840 + }
3841 +
3842 + spin_unlock(&call->lock);
3843 +
3844 +- trace_rxrpc_transmit(call, (test_bit(RXRPC_CALL_TX_LAST, &call->flags) ?
3845 ++ trace_rxrpc_transmit(call, (rot_last ?
3846 + rxrpc_transmit_rotate_last :
3847 + rxrpc_transmit_rotate));
3848 + wake_up(&call->waitq);
3849 +@@ -262,6 +265,8 @@ static void rxrpc_rotate_tx_window(struct rxrpc_call *call, rxrpc_seq_t to,
3850 + skb->next = NULL;
3851 + rxrpc_free_skb(skb, rxrpc_skb_tx_freed);
3852 + }
3853 ++
3854 ++ return rot_last;
3855 + }
3856 +
3857 + /*
3858 +@@ -273,23 +278,26 @@ static void rxrpc_rotate_tx_window(struct rxrpc_call *call, rxrpc_seq_t to,
3859 + static bool rxrpc_end_tx_phase(struct rxrpc_call *call, bool reply_begun,
3860 + const char *abort_why)
3861 + {
3862 ++ unsigned int state;
3863 +
3864 + ASSERT(test_bit(RXRPC_CALL_TX_LAST, &call->flags));
3865 +
3866 + write_lock(&call->state_lock);
3867 +
3868 +- switch (call->state) {
3869 ++ state = call->state;
3870 ++ switch (state) {
3871 + case RXRPC_CALL_CLIENT_SEND_REQUEST:
3872 + case RXRPC_CALL_CLIENT_AWAIT_REPLY:
3873 + if (reply_begun)
3874 +- call->state = RXRPC_CALL_CLIENT_RECV_REPLY;
3875 ++ call->state = state = RXRPC_CALL_CLIENT_RECV_REPLY;
3876 + else
3877 +- call->state = RXRPC_CALL_CLIENT_AWAIT_REPLY;
3878 ++ call->state = state = RXRPC_CALL_CLIENT_AWAIT_REPLY;
3879 + break;
3880 +
3881 + case RXRPC_CALL_SERVER_AWAIT_ACK:
3882 + __rxrpc_call_completed(call);
3883 + rxrpc_notify_socket(call);
3884 ++ state = call->state;
3885 + break;
3886 +
3887 + default:
3888 +@@ -297,11 +305,10 @@ static bool rxrpc_end_tx_phase(struct rxrpc_call *call, bool reply_begun,
3889 + }
3890 +
3891 + write_unlock(&call->state_lock);
3892 +- if (call->state == RXRPC_CALL_CLIENT_AWAIT_REPLY) {
3893 ++ if (state == RXRPC_CALL_CLIENT_AWAIT_REPLY)
3894 + trace_rxrpc_transmit(call, rxrpc_transmit_await_reply);
3895 +- } else {
3896 ++ else
3897 + trace_rxrpc_transmit(call, rxrpc_transmit_end);
3898 +- }
3899 + _leave(" = ok");
3900 + return true;
3901 +
3902 +@@ -332,11 +339,11 @@ static bool rxrpc_receiving_reply(struct rxrpc_call *call)
3903 + trace_rxrpc_timer(call, rxrpc_timer_init_for_reply, now);
3904 + }
3905 +
3906 +- if (!test_bit(RXRPC_CALL_TX_LAST, &call->flags))
3907 +- rxrpc_rotate_tx_window(call, top, &summary);
3908 + if (!test_bit(RXRPC_CALL_TX_LAST, &call->flags)) {
3909 +- rxrpc_proto_abort("TXL", call, top);
3910 +- return false;
3911 ++ if (!rxrpc_rotate_tx_window(call, top, &summary)) {
3912 ++ rxrpc_proto_abort("TXL", call, top);
3913 ++ return false;
3914 ++ }
3915 + }
3916 + if (!rxrpc_end_tx_phase(call, true, "ETD"))
3917 + return false;
3918 +@@ -616,13 +623,14 @@ static void rxrpc_input_requested_ack(struct rxrpc_call *call,
3919 + if (!skb)
3920 + continue;
3921 +
3922 ++ sent_at = skb->tstamp;
3923 ++ smp_rmb(); /* Read timestamp before serial. */
3924 + sp = rxrpc_skb(skb);
3925 + if (sp->hdr.serial != orig_serial)
3926 + continue;
3927 +- smp_rmb();
3928 +- sent_at = skb->tstamp;
3929 + goto found;
3930 + }
3931 ++
3932 + return;
3933 +
3934 + found:
3935 +@@ -854,6 +862,16 @@ static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb,
3936 + rxrpc_propose_ack_respond_to_ack);
3937 + }
3938 +
3939 ++ /* Discard any out-of-order or duplicate ACKs. */
3940 ++ if (before_eq(sp->hdr.serial, call->acks_latest)) {
3941 ++ _debug("discard ACK %d <= %d",
3942 ++ sp->hdr.serial, call->acks_latest);
3943 ++ return;
3944 ++ }
3945 ++ call->acks_latest_ts = skb->tstamp;
3946 ++ call->acks_latest = sp->hdr.serial;
3947 ++
3948 ++ /* Parse rwind and mtu sizes if provided. */
3949 + ioffset = offset + nr_acks + 3;
3950 + if (skb->len >= ioffset + sizeof(buf.info)) {
3951 + if (skb_copy_bits(skb, ioffset, &buf.info, sizeof(buf.info)) < 0)
3952 +@@ -875,23 +893,18 @@ static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb,
3953 + return;
3954 + }
3955 +
3956 +- /* Discard any out-of-order or duplicate ACKs. */
3957 +- if (before_eq(sp->hdr.serial, call->acks_latest)) {
3958 +- _debug("discard ACK %d <= %d",
3959 +- sp->hdr.serial, call->acks_latest);
3960 +- return;
3961 +- }
3962 +- call->acks_latest_ts = skb->tstamp;
3963 +- call->acks_latest = sp->hdr.serial;
3964 +-
3965 + if (before(hard_ack, call->tx_hard_ack) ||
3966 + after(hard_ack, call->tx_top))
3967 + return rxrpc_proto_abort("AKW", call, 0);
3968 + if (nr_acks > call->tx_top - hard_ack)
3969 + return rxrpc_proto_abort("AKN", call, 0);
3970 +
3971 +- if (after(hard_ack, call->tx_hard_ack))
3972 +- rxrpc_rotate_tx_window(call, hard_ack, &summary);
3973 ++ if (after(hard_ack, call->tx_hard_ack)) {
3974 ++ if (rxrpc_rotate_tx_window(call, hard_ack, &summary)) {
3975 ++ rxrpc_end_tx_phase(call, false, "ETA");
3976 ++ return;
3977 ++ }
3978 ++ }
3979 +
3980 + if (nr_acks > 0) {
3981 + if (skb_copy_bits(skb, offset, buf.acks, nr_acks) < 0)
3982 +@@ -900,11 +913,6 @@ static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb,
3983 + &summary);
3984 + }
3985 +
3986 +- if (test_bit(RXRPC_CALL_TX_LAST, &call->flags)) {
3987 +- rxrpc_end_tx_phase(call, false, "ETA");
3988 +- return;
3989 +- }
3990 +-
3991 + if (call->rxtx_annotations[call->tx_top & RXRPC_RXTX_BUFF_MASK] &
3992 + RXRPC_TX_ANNO_LAST &&
3993 + summary.nr_acks == call->tx_top - hard_ack &&
3994 +@@ -926,8 +934,7 @@ static void rxrpc_input_ackall(struct rxrpc_call *call, struct sk_buff *skb)
3995 +
3996 + _proto("Rx ACKALL %%%u", sp->hdr.serial);
3997 +
3998 +- rxrpc_rotate_tx_window(call, call->tx_top, &summary);
3999 +- if (test_bit(RXRPC_CALL_TX_LAST, &call->flags))
4000 ++ if (rxrpc_rotate_tx_window(call, call->tx_top, &summary))
4001 + rxrpc_end_tx_phase(call, false, "ETL");
4002 + }
4003 +
4004 +@@ -1137,6 +1144,9 @@ void rxrpc_data_ready(struct sock *udp_sk)
4005 + return;
4006 + }
4007 +
4008 ++ if (skb->tstamp == 0)
4009 ++ skb->tstamp = ktime_get_real();
4010 ++
4011 + rxrpc_new_skb(skb, rxrpc_skb_rx_received);
4012 +
4013 + _net("recv skb %p", skb);
4014 +@@ -1171,10 +1181,6 @@ void rxrpc_data_ready(struct sock *udp_sk)
4015 +
4016 + trace_rxrpc_rx_packet(sp);
4017 +
4018 +- _net("Rx RxRPC %s ep=%x call=%x:%x",
4019 +- sp->hdr.flags & RXRPC_CLIENT_INITIATED ? "ToServer" : "ToClient",
4020 +- sp->hdr.epoch, sp->hdr.cid, sp->hdr.callNumber);
4021 +-
4022 + if (sp->hdr.type >= RXRPC_N_PACKET_TYPES ||
4023 + !((RXRPC_SUPPORTED_PACKET_TYPES >> sp->hdr.type) & 1)) {
4024 + _proto("Rx Bad Packet Type %u", sp->hdr.type);
4025 +@@ -1183,13 +1189,13 @@ void rxrpc_data_ready(struct sock *udp_sk)
4026 +
4027 + switch (sp->hdr.type) {
4028 + case RXRPC_PACKET_TYPE_VERSION:
4029 +- if (!(sp->hdr.flags & RXRPC_CLIENT_INITIATED))
4030 ++ if (rxrpc_to_client(sp))
4031 + goto discard;
4032 + rxrpc_post_packet_to_local(local, skb);
4033 + goto out;
4034 +
4035 + case RXRPC_PACKET_TYPE_BUSY:
4036 +- if (sp->hdr.flags & RXRPC_CLIENT_INITIATED)
4037 ++ if (rxrpc_to_server(sp))
4038 + goto discard;
4039 + /* Fall through */
4040 +
4041 +@@ -1269,7 +1275,7 @@ void rxrpc_data_ready(struct sock *udp_sk)
4042 + call = rcu_dereference(chan->call);
4043 +
4044 + if (sp->hdr.callNumber > chan->call_id) {
4045 +- if (!(sp->hdr.flags & RXRPC_CLIENT_INITIATED)) {
4046 ++ if (rxrpc_to_client(sp)) {
4047 + rcu_read_unlock();
4048 + goto reject_packet;
4049 + }
4050 +@@ -1292,7 +1298,7 @@ void rxrpc_data_ready(struct sock *udp_sk)
4051 + }
4052 +
4053 + if (!call || atomic_read(&call->usage) == 0) {
4054 +- if (!(sp->hdr.type & RXRPC_CLIENT_INITIATED) ||
4055 ++ if (rxrpc_to_client(sp) ||
4056 + sp->hdr.callNumber == 0 ||
4057 + sp->hdr.type != RXRPC_PACKET_TYPE_DATA)
4058 + goto bad_message_unlock;
4059 +diff --git a/net/rxrpc/local_object.c b/net/rxrpc/local_object.c
4060 +index b493e6b62740..386dc1f20c73 100644
4061 +--- a/net/rxrpc/local_object.c
4062 ++++ b/net/rxrpc/local_object.c
4063 +@@ -135,10 +135,10 @@ static int rxrpc_open_socket(struct rxrpc_local *local, struct net *net)
4064 + }
4065 +
4066 + switch (local->srx.transport.family) {
4067 +- case AF_INET:
4068 +- /* we want to receive ICMP errors */
4069 ++ case AF_INET6:
4070 ++ /* we want to receive ICMPv6 errors */
4071 + opt = 1;
4072 +- ret = kernel_setsockopt(local->socket, SOL_IP, IP_RECVERR,
4073 ++ ret = kernel_setsockopt(local->socket, SOL_IPV6, IPV6_RECVERR,
4074 + (char *) &opt, sizeof(opt));
4075 + if (ret < 0) {
4076 + _debug("setsockopt failed");
4077 +@@ -146,19 +146,22 @@ static int rxrpc_open_socket(struct rxrpc_local *local, struct net *net)
4078 + }
4079 +
4080 + /* we want to set the don't fragment bit */
4081 +- opt = IP_PMTUDISC_DO;
4082 +- ret = kernel_setsockopt(local->socket, SOL_IP, IP_MTU_DISCOVER,
4083 ++ opt = IPV6_PMTUDISC_DO;
4084 ++ ret = kernel_setsockopt(local->socket, SOL_IPV6, IPV6_MTU_DISCOVER,
4085 + (char *) &opt, sizeof(opt));
4086 + if (ret < 0) {
4087 + _debug("setsockopt failed");
4088 + goto error;
4089 + }
4090 +- break;
4091 +
4092 +- case AF_INET6:
4093 ++ /* Fall through and set IPv4 options too otherwise we don't get
4094 ++ * errors from IPv4 packets sent through the IPv6 socket.
4095 ++ */
4096 ++
4097 ++ case AF_INET:
4098 + /* we want to receive ICMP errors */
4099 + opt = 1;
4100 +- ret = kernel_setsockopt(local->socket, SOL_IPV6, IPV6_RECVERR,
4101 ++ ret = kernel_setsockopt(local->socket, SOL_IP, IP_RECVERR,
4102 + (char *) &opt, sizeof(opt));
4103 + if (ret < 0) {
4104 + _debug("setsockopt failed");
4105 +@@ -166,13 +169,22 @@ static int rxrpc_open_socket(struct rxrpc_local *local, struct net *net)
4106 + }
4107 +
4108 + /* we want to set the don't fragment bit */
4109 +- opt = IPV6_PMTUDISC_DO;
4110 +- ret = kernel_setsockopt(local->socket, SOL_IPV6, IPV6_MTU_DISCOVER,
4111 ++ opt = IP_PMTUDISC_DO;
4112 ++ ret = kernel_setsockopt(local->socket, SOL_IP, IP_MTU_DISCOVER,
4113 + (char *) &opt, sizeof(opt));
4114 + if (ret < 0) {
4115 + _debug("setsockopt failed");
4116 + goto error;
4117 + }
4118 ++
4119 ++ /* We want receive timestamps. */
4120 ++ opt = 1;
4121 ++ ret = kernel_setsockopt(local->socket, SOL_SOCKET, SO_TIMESTAMPNS,
4122 ++ (char *)&opt, sizeof(opt));
4123 ++ if (ret < 0) {
4124 ++ _debug("setsockopt failed");
4125 ++ goto error;
4126 ++ }
4127 + break;
4128 +
4129 + default:
4130 +diff --git a/net/rxrpc/output.c b/net/rxrpc/output.c
4131 +index 4774c8f5634d..6ac21bb2071d 100644
4132 +--- a/net/rxrpc/output.c
4133 ++++ b/net/rxrpc/output.c
4134 +@@ -124,7 +124,6 @@ int rxrpc_send_ack_packet(struct rxrpc_call *call, bool ping,
4135 + struct kvec iov[2];
4136 + rxrpc_serial_t serial;
4137 + rxrpc_seq_t hard_ack, top;
4138 +- ktime_t now;
4139 + size_t len, n;
4140 + int ret;
4141 + u8 reason;
4142 +@@ -196,9 +195,7 @@ int rxrpc_send_ack_packet(struct rxrpc_call *call, bool ping,
4143 + /* We need to stick a time in before we send the packet in case
4144 + * the reply gets back before kernel_sendmsg() completes - but
4145 + * asking UDP to send the packet can take a relatively long
4146 +- * time, so we update the time after, on the assumption that
4147 +- * the packet transmission is more likely to happen towards the
4148 +- * end of the kernel_sendmsg() call.
4149 ++ * time.
4150 + */
4151 + call->ping_time = ktime_get_real();
4152 + set_bit(RXRPC_CALL_PINGING, &call->flags);
4153 +@@ -206,9 +203,6 @@ int rxrpc_send_ack_packet(struct rxrpc_call *call, bool ping,
4154 + }
4155 +
4156 + ret = kernel_sendmsg(conn->params.local->socket, &msg, iov, 2, len);
4157 +- now = ktime_get_real();
4158 +- if (ping)
4159 +- call->ping_time = now;
4160 + conn->params.peer->last_tx_at = ktime_get_seconds();
4161 + if (ret < 0)
4162 + trace_rxrpc_tx_fail(call->debug_id, serial, ret,
4163 +@@ -357,8 +351,14 @@ int rxrpc_send_data_packet(struct rxrpc_call *call, struct sk_buff *skb,
4164 +
4165 + /* If our RTT cache needs working on, request an ACK. Also request
4166 + * ACKs if a DATA packet appears to have been lost.
4167 ++ *
4168 ++ * However, we mustn't request an ACK on the last reply packet of a
4169 ++ * service call, lest OpenAFS incorrectly send us an ACK with some
4170 ++ * soft-ACKs in it and then never follow up with a proper hard ACK.
4171 + */
4172 +- if (!(sp->hdr.flags & RXRPC_LAST_PACKET) &&
4173 ++ if ((!(sp->hdr.flags & RXRPC_LAST_PACKET) ||
4174 ++ rxrpc_to_server(sp)
4175 ++ ) &&
4176 + (test_and_clear_bit(RXRPC_CALL_EV_ACK_LOST, &call->events) ||
4177 + retrans ||
4178 + call->cong_mode == RXRPC_CALL_SLOW_START ||
4179 +@@ -384,6 +384,11 @@ int rxrpc_send_data_packet(struct rxrpc_call *call, struct sk_buff *skb,
4180 + goto send_fragmentable;
4181 +
4182 + down_read(&conn->params.local->defrag_sem);
4183 ++
4184 ++ sp->hdr.serial = serial;
4185 ++ smp_wmb(); /* Set serial before timestamp */
4186 ++ skb->tstamp = ktime_get_real();
4187 ++
4188 + /* send the packet by UDP
4189 + * - returns -EMSGSIZE if UDP would have to fragment the packet
4190 + * to go out of the interface
4191 +@@ -404,12 +409,8 @@ done:
4192 + trace_rxrpc_tx_data(call, sp->hdr.seq, serial, whdr.flags,
4193 + retrans, lost);
4194 + if (ret >= 0) {
4195 +- ktime_t now = ktime_get_real();
4196 +- skb->tstamp = now;
4197 +- smp_wmb();
4198 +- sp->hdr.serial = serial;
4199 + if (whdr.flags & RXRPC_REQUEST_ACK) {
4200 +- call->peer->rtt_last_req = now;
4201 ++ call->peer->rtt_last_req = skb->tstamp;
4202 + trace_rxrpc_rtt_tx(call, rxrpc_rtt_tx_data, serial);
4203 + if (call->peer->rtt_usage > 1) {
4204 + unsigned long nowj = jiffies, ack_lost_at;
4205 +@@ -448,6 +449,10 @@ send_fragmentable:
4206 +
4207 + down_write(&conn->params.local->defrag_sem);
4208 +
4209 ++ sp->hdr.serial = serial;
4210 ++ smp_wmb(); /* Set serial before timestamp */
4211 ++ skb->tstamp = ktime_get_real();
4212 ++
4213 + switch (conn->params.local->srx.transport.family) {
4214 + case AF_INET:
4215 + opt = IP_PMTUDISC_DONT;
4216 +diff --git a/net/rxrpc/peer_event.c b/net/rxrpc/peer_event.c
4217 +index 4f9da2f51c69..f3e6fc670da2 100644
4218 +--- a/net/rxrpc/peer_event.c
4219 ++++ b/net/rxrpc/peer_event.c
4220 +@@ -23,6 +23,8 @@
4221 + #include "ar-internal.h"
4222 +
4223 + static void rxrpc_store_error(struct rxrpc_peer *, struct sock_exterr_skb *);
4224 ++static void rxrpc_distribute_error(struct rxrpc_peer *, int,
4225 ++ enum rxrpc_call_completion);
4226 +
4227 + /*
4228 + * Find the peer associated with an ICMP packet.
4229 +@@ -194,8 +196,6 @@ void rxrpc_error_report(struct sock *sk)
4230 + rcu_read_unlock();
4231 + rxrpc_free_skb(skb, rxrpc_skb_rx_freed);
4232 +
4233 +- /* The ref we obtained is passed off to the work item */
4234 +- __rxrpc_queue_peer_error(peer);
4235 + _leave("");
4236 + }
4237 +
4238 +@@ -205,6 +205,7 @@ void rxrpc_error_report(struct sock *sk)
4239 + static void rxrpc_store_error(struct rxrpc_peer *peer,
4240 + struct sock_exterr_skb *serr)
4241 + {
4242 ++ enum rxrpc_call_completion compl = RXRPC_CALL_NETWORK_ERROR;
4243 + struct sock_extended_err *ee;
4244 + int err;
4245 +
4246 +@@ -255,7 +256,7 @@ static void rxrpc_store_error(struct rxrpc_peer *peer,
4247 + case SO_EE_ORIGIN_NONE:
4248 + case SO_EE_ORIGIN_LOCAL:
4249 + _proto("Rx Received local error { error=%d }", err);
4250 +- err += RXRPC_LOCAL_ERROR_OFFSET;
4251 ++ compl = RXRPC_CALL_LOCAL_ERROR;
4252 + break;
4253 +
4254 + case SO_EE_ORIGIN_ICMP6:
4255 +@@ -264,48 +265,23 @@ static void rxrpc_store_error(struct rxrpc_peer *peer,
4256 + break;
4257 + }
4258 +
4259 +- peer->error_report = err;
4260 ++ rxrpc_distribute_error(peer, err, compl);
4261 + }
4262 +
4263 + /*
4264 +- * Distribute an error that occurred on a peer
4265 ++ * Distribute an error that occurred on a peer.
4266 + */
4267 +-void rxrpc_peer_error_distributor(struct work_struct *work)
4268 ++static void rxrpc_distribute_error(struct rxrpc_peer *peer, int error,
4269 ++ enum rxrpc_call_completion compl)
4270 + {
4271 +- struct rxrpc_peer *peer =
4272 +- container_of(work, struct rxrpc_peer, error_distributor);
4273 + struct rxrpc_call *call;
4274 +- enum rxrpc_call_completion compl;
4275 +- int error;
4276 +-
4277 +- _enter("");
4278 +-
4279 +- error = READ_ONCE(peer->error_report);
4280 +- if (error < RXRPC_LOCAL_ERROR_OFFSET) {
4281 +- compl = RXRPC_CALL_NETWORK_ERROR;
4282 +- } else {
4283 +- compl = RXRPC_CALL_LOCAL_ERROR;
4284 +- error -= RXRPC_LOCAL_ERROR_OFFSET;
4285 +- }
4286 +
4287 +- _debug("ISSUE ERROR %s %d", rxrpc_call_completions[compl], error);
4288 +-
4289 +- spin_lock_bh(&peer->lock);
4290 +-
4291 +- while (!hlist_empty(&peer->error_targets)) {
4292 +- call = hlist_entry(peer->error_targets.first,
4293 +- struct rxrpc_call, error_link);
4294 +- hlist_del_init(&call->error_link);
4295 ++ hlist_for_each_entry_rcu(call, &peer->error_targets, error_link) {
4296 + rxrpc_see_call(call);
4297 +-
4298 +- if (rxrpc_set_call_completion(call, compl, 0, -error))
4299 ++ if (call->state < RXRPC_CALL_COMPLETE &&
4300 ++ rxrpc_set_call_completion(call, compl, 0, -error))
4301 + rxrpc_notify_socket(call);
4302 + }
4303 +-
4304 +- spin_unlock_bh(&peer->lock);
4305 +-
4306 +- rxrpc_put_peer(peer);
4307 +- _leave("");
4308 + }
4309 +
4310 + /*
4311 +diff --git a/net/rxrpc/peer_object.c b/net/rxrpc/peer_object.c
4312 +index 24ec7cdcf332..ef4c2e8a35cc 100644
4313 +--- a/net/rxrpc/peer_object.c
4314 ++++ b/net/rxrpc/peer_object.c
4315 +@@ -222,8 +222,6 @@ struct rxrpc_peer *rxrpc_alloc_peer(struct rxrpc_local *local, gfp_t gfp)
4316 + atomic_set(&peer->usage, 1);
4317 + peer->local = local;
4318 + INIT_HLIST_HEAD(&peer->error_targets);
4319 +- INIT_WORK(&peer->error_distributor,
4320 +- &rxrpc_peer_error_distributor);
4321 + peer->service_conns = RB_ROOT;
4322 + seqlock_init(&peer->service_conn_lock);
4323 + spin_lock_init(&peer->lock);
4324 +@@ -415,21 +413,6 @@ struct rxrpc_peer *rxrpc_get_peer_maybe(struct rxrpc_peer *peer)
4325 + return peer;
4326 + }
4327 +
4328 +-/*
4329 +- * Queue a peer record. This passes the caller's ref to the workqueue.
4330 +- */
4331 +-void __rxrpc_queue_peer_error(struct rxrpc_peer *peer)
4332 +-{
4333 +- const void *here = __builtin_return_address(0);
4334 +- int n;
4335 +-
4336 +- n = atomic_read(&peer->usage);
4337 +- if (rxrpc_queue_work(&peer->error_distributor))
4338 +- trace_rxrpc_peer(peer, rxrpc_peer_queued_error, n, here);
4339 +- else
4340 +- rxrpc_put_peer(peer);
4341 +-}
4342 +-
4343 + /*
4344 + * Discard a peer record.
4345 + */
4346 +diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c
4347 +index f74513a7c7a8..c855fd045a3c 100644
4348 +--- a/net/sched/cls_api.c
4349 ++++ b/net/sched/cls_api.c
4350 +@@ -31,6 +31,8 @@
4351 + #include <net/pkt_sched.h>
4352 + #include <net/pkt_cls.h>
4353 +
4354 ++extern const struct nla_policy rtm_tca_policy[TCA_MAX + 1];
4355 ++
4356 + /* The list of all installed classifier types */
4357 + static LIST_HEAD(tcf_proto_base);
4358 +
4359 +@@ -1083,7 +1085,7 @@ static int tc_new_tfilter(struct sk_buff *skb, struct nlmsghdr *n,
4360 + replay:
4361 + tp_created = 0;
4362 +
4363 +- err = nlmsg_parse(n, sizeof(*t), tca, TCA_MAX, NULL, extack);
4364 ++ err = nlmsg_parse(n, sizeof(*t), tca, TCA_MAX, rtm_tca_policy, extack);
4365 + if (err < 0)
4366 + return err;
4367 +
4368 +@@ -1226,7 +1228,7 @@ static int tc_del_tfilter(struct sk_buff *skb, struct nlmsghdr *n,
4369 + if (!netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN))
4370 + return -EPERM;
4371 +
4372 +- err = nlmsg_parse(n, sizeof(*t), tca, TCA_MAX, NULL, extack);
4373 ++ err = nlmsg_parse(n, sizeof(*t), tca, TCA_MAX, rtm_tca_policy, extack);
4374 + if (err < 0)
4375 + return err;
4376 +
4377 +@@ -1334,7 +1336,7 @@ static int tc_get_tfilter(struct sk_buff *skb, struct nlmsghdr *n,
4378 + void *fh = NULL;
4379 + int err;
4380 +
4381 +- err = nlmsg_parse(n, sizeof(*t), tca, TCA_MAX, NULL, extack);
4382 ++ err = nlmsg_parse(n, sizeof(*t), tca, TCA_MAX, rtm_tca_policy, extack);
4383 + if (err < 0)
4384 + return err;
4385 +
4386 +@@ -1488,7 +1490,8 @@ static int tc_dump_tfilter(struct sk_buff *skb, struct netlink_callback *cb)
4387 + if (nlmsg_len(cb->nlh) < sizeof(*tcm))
4388 + return skb->len;
4389 +
4390 +- err = nlmsg_parse(cb->nlh, sizeof(*tcm), tca, TCA_MAX, NULL, NULL);
4391 ++ err = nlmsg_parse(cb->nlh, sizeof(*tcm), tca, TCA_MAX, rtm_tca_policy,
4392 ++ NULL);
4393 + if (err)
4394 + return err;
4395 +
4396 +diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
4397 +index 99cc25aae503..57f71765febe 100644
4398 +--- a/net/sched/sch_api.c
4399 ++++ b/net/sched/sch_api.c
4400 +@@ -2052,7 +2052,8 @@ static int tc_dump_tclass_root(struct Qdisc *root, struct sk_buff *skb,
4401 +
4402 + if (tcm->tcm_parent) {
4403 + q = qdisc_match_from_root(root, TC_H_MAJ(tcm->tcm_parent));
4404 +- if (q && tc_dump_tclass_qdisc(q, skb, tcm, cb, t_p, s_t) < 0)
4405 ++ if (q && q != root &&
4406 ++ tc_dump_tclass_qdisc(q, skb, tcm, cb, t_p, s_t) < 0)
4407 + return -1;
4408 + return 0;
4409 + }
4410 +diff --git a/net/sched/sch_gred.c b/net/sched/sch_gred.c
4411 +index cbe4831f46f4..4a042abf844c 100644
4412 +--- a/net/sched/sch_gred.c
4413 ++++ b/net/sched/sch_gred.c
4414 +@@ -413,7 +413,7 @@ static int gred_change(struct Qdisc *sch, struct nlattr *opt,
4415 + if (tb[TCA_GRED_PARMS] == NULL && tb[TCA_GRED_STAB] == NULL) {
4416 + if (tb[TCA_GRED_LIMIT] != NULL)
4417 + sch->limit = nla_get_u32(tb[TCA_GRED_LIMIT]);
4418 +- return gred_change_table_def(sch, opt);
4419 ++ return gred_change_table_def(sch, tb[TCA_GRED_DPS]);
4420 + }
4421 +
4422 + if (tb[TCA_GRED_PARMS] == NULL ||
4423 +diff --git a/net/sctp/socket.c b/net/sctp/socket.c
4424 +index 50ee07cd20c4..9d903b870790 100644
4425 +--- a/net/sctp/socket.c
4426 ++++ b/net/sctp/socket.c
4427 +@@ -270,11 +270,10 @@ struct sctp_association *sctp_id2assoc(struct sock *sk, sctp_assoc_t id)
4428 +
4429 + spin_lock_bh(&sctp_assocs_id_lock);
4430 + asoc = (struct sctp_association *)idr_find(&sctp_assocs_id, (int)id);
4431 ++ if (asoc && (asoc->base.sk != sk || asoc->base.dead))
4432 ++ asoc = NULL;
4433 + spin_unlock_bh(&sctp_assocs_id_lock);
4434 +
4435 +- if (!asoc || (asoc->base.sk != sk) || asoc->base.dead)
4436 +- return NULL;
4437 +-
4438 + return asoc;
4439 + }
4440 +
4441 +@@ -1940,8 +1939,10 @@ static int sctp_sendmsg_to_asoc(struct sctp_association *asoc,
4442 + if (sp->strm_interleave) {
4443 + timeo = sock_sndtimeo(sk, 0);
4444 + err = sctp_wait_for_connect(asoc, &timeo);
4445 +- if (err)
4446 ++ if (err) {
4447 ++ err = -ESRCH;
4448 + goto err;
4449 ++ }
4450 + } else {
4451 + wait_connect = true;
4452 + }
4453 +diff --git a/net/smc/smc_core.c b/net/smc/smc_core.c
4454 +index add82b0266f3..3be95f77ec7f 100644
4455 +--- a/net/smc/smc_core.c
4456 ++++ b/net/smc/smc_core.c
4457 +@@ -114,22 +114,17 @@ static void __smc_lgr_unregister_conn(struct smc_connection *conn)
4458 + sock_put(&smc->sk); /* sock_hold in smc_lgr_register_conn() */
4459 + }
4460 +
4461 +-/* Unregister connection and trigger lgr freeing if applicable
4462 ++/* Unregister connection from lgr
4463 + */
4464 + static void smc_lgr_unregister_conn(struct smc_connection *conn)
4465 + {
4466 + struct smc_link_group *lgr = conn->lgr;
4467 +- int reduced = 0;
4468 +
4469 + write_lock_bh(&lgr->conns_lock);
4470 + if (conn->alert_token_local) {
4471 +- reduced = 1;
4472 + __smc_lgr_unregister_conn(conn);
4473 + }
4474 + write_unlock_bh(&lgr->conns_lock);
4475 +- if (!reduced || lgr->conns_num)
4476 +- return;
4477 +- smc_lgr_schedule_free_work(lgr);
4478 + }
4479 +
4480 + static void smc_lgr_free_work(struct work_struct *work)
4481 +@@ -238,7 +233,8 @@ out:
4482 + return rc;
4483 + }
4484 +
4485 +-static void smc_buf_unuse(struct smc_connection *conn)
4486 ++static void smc_buf_unuse(struct smc_connection *conn,
4487 ++ struct smc_link_group *lgr)
4488 + {
4489 + if (conn->sndbuf_desc)
4490 + conn->sndbuf_desc->used = 0;
4491 +@@ -248,8 +244,6 @@ static void smc_buf_unuse(struct smc_connection *conn)
4492 + conn->rmb_desc->used = 0;
4493 + } else {
4494 + /* buf registration failed, reuse not possible */
4495 +- struct smc_link_group *lgr = conn->lgr;
4496 +-
4497 + write_lock_bh(&lgr->rmbs_lock);
4498 + list_del(&conn->rmb_desc->list);
4499 + write_unlock_bh(&lgr->rmbs_lock);
4500 +@@ -262,11 +256,16 @@ static void smc_buf_unuse(struct smc_connection *conn)
4501 + /* remove a finished connection from its link group */
4502 + void smc_conn_free(struct smc_connection *conn)
4503 + {
4504 +- if (!conn->lgr)
4505 ++ struct smc_link_group *lgr = conn->lgr;
4506 ++
4507 ++ if (!lgr)
4508 + return;
4509 + smc_cdc_tx_dismiss_slots(conn);
4510 +- smc_lgr_unregister_conn(conn);
4511 +- smc_buf_unuse(conn);
4512 ++ smc_lgr_unregister_conn(conn); /* unsets conn->lgr */
4513 ++ smc_buf_unuse(conn, lgr); /* allow buffer reuse */
4514 ++
4515 ++ if (!lgr->conns_num)
4516 ++ smc_lgr_schedule_free_work(lgr);
4517 + }
4518 +
4519 + static void smc_link_clear(struct smc_link *lnk)
4520 +diff --git a/net/socket.c b/net/socket.c
4521 +index d4187ac17d55..fcb18a7ed14b 100644
4522 +--- a/net/socket.c
4523 ++++ b/net/socket.c
4524 +@@ -2887,9 +2887,14 @@ static int ethtool_ioctl(struct net *net, struct compat_ifreq __user *ifr32)
4525 + copy_in_user(&rxnfc->fs.ring_cookie,
4526 + &compat_rxnfc->fs.ring_cookie,
4527 + (void __user *)(&rxnfc->fs.location + 1) -
4528 +- (void __user *)&rxnfc->fs.ring_cookie) ||
4529 +- copy_in_user(&rxnfc->rule_cnt, &compat_rxnfc->rule_cnt,
4530 +- sizeof(rxnfc->rule_cnt)))
4531 ++ (void __user *)&rxnfc->fs.ring_cookie))
4532 ++ return -EFAULT;
4533 ++ if (ethcmd == ETHTOOL_GRXCLSRLALL) {
4534 ++ if (put_user(rule_cnt, &rxnfc->rule_cnt))
4535 ++ return -EFAULT;
4536 ++ } else if (copy_in_user(&rxnfc->rule_cnt,
4537 ++ &compat_rxnfc->rule_cnt,
4538 ++ sizeof(rxnfc->rule_cnt)))
4539 + return -EFAULT;
4540 + }
4541 +
4542 +diff --git a/net/tipc/name_distr.c b/net/tipc/name_distr.c
4543 +index 51b4b96f89db..3cfeb9df64b0 100644
4544 +--- a/net/tipc/name_distr.c
4545 ++++ b/net/tipc/name_distr.c
4546 +@@ -115,7 +115,7 @@ struct sk_buff *tipc_named_withdraw(struct net *net, struct publication *publ)
4547 + struct sk_buff *buf;
4548 + struct distr_item *item;
4549 +
4550 +- list_del(&publ->binding_node);
4551 ++ list_del_rcu(&publ->binding_node);
4552 +
4553 + if (publ->scope == TIPC_NODE_SCOPE)
4554 + return NULL;
4555 +@@ -147,7 +147,7 @@ static void named_distribute(struct net *net, struct sk_buff_head *list,
4556 + ITEM_SIZE) * ITEM_SIZE;
4557 + u32 msg_rem = msg_dsz;
4558 +
4559 +- list_for_each_entry(publ, pls, binding_node) {
4560 ++ list_for_each_entry_rcu(publ, pls, binding_node) {
4561 + /* Prepare next buffer: */
4562 + if (!skb) {
4563 + skb = named_prepare_buf(net, PUBLICATION, msg_rem,
4564 +diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c
4565 +index 9fab8e5a4a5b..994ddc7ec9b1 100644
4566 +--- a/net/tls/tls_sw.c
4567 ++++ b/net/tls/tls_sw.c
4568 +@@ -286,7 +286,7 @@ static int zerocopy_from_iter(struct sock *sk, struct iov_iter *from,
4569 + int length, int *pages_used,
4570 + unsigned int *size_used,
4571 + struct scatterlist *to, int to_max_pages,
4572 +- bool charge, bool revert)
4573 ++ bool charge)
4574 + {
4575 + struct page *pages[MAX_SKB_FRAGS];
4576 +
4577 +@@ -335,10 +335,10 @@ static int zerocopy_from_iter(struct sock *sk, struct iov_iter *from,
4578 + }
4579 +
4580 + out:
4581 ++ if (rc)
4582 ++ iov_iter_revert(from, size - *size_used);
4583 + *size_used = size;
4584 + *pages_used = num_elem;
4585 +- if (revert)
4586 +- iov_iter_revert(from, size);
4587 +
4588 + return rc;
4589 + }
4590 +@@ -440,7 +440,7 @@ alloc_encrypted:
4591 + &ctx->sg_plaintext_size,
4592 + ctx->sg_plaintext_data,
4593 + ARRAY_SIZE(ctx->sg_plaintext_data),
4594 +- true, false);
4595 ++ true);
4596 + if (ret)
4597 + goto fallback_to_reg_send;
4598 +
4599 +@@ -453,8 +453,6 @@ alloc_encrypted:
4600 +
4601 + copied -= try_to_copy;
4602 + fallback_to_reg_send:
4603 +- iov_iter_revert(&msg->msg_iter,
4604 +- ctx->sg_plaintext_size - orig_size);
4605 + trim_sg(sk, ctx->sg_plaintext_data,
4606 + &ctx->sg_plaintext_num_elem,
4607 + &ctx->sg_plaintext_size,
4608 +@@ -828,7 +826,7 @@ int tls_sw_recvmsg(struct sock *sk,
4609 + err = zerocopy_from_iter(sk, &msg->msg_iter,
4610 + to_copy, &pages,
4611 + &chunk, &sgin[1],
4612 +- MAX_SKB_FRAGS, false, true);
4613 ++ MAX_SKB_FRAGS, false);
4614 + if (err < 0)
4615 + goto fallback_to_reg_recv;
4616 +
4617 +diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
4618 +index 733ccf867972..214f9ef79a64 100644
4619 +--- a/net/wireless/nl80211.c
4620 ++++ b/net/wireless/nl80211.c
4621 +@@ -3699,6 +3699,7 @@ static bool ht_rateset_to_mask(struct ieee80211_supported_band *sband,
4622 + return false;
4623 +
4624 + /* check availability */
4625 ++ ridx = array_index_nospec(ridx, IEEE80211_HT_MCS_MASK_LEN);
4626 + if (sband->ht_cap.mcs.rx_mask[ridx] & rbit)
4627 + mcs[ridx] |= rbit;
4628 + else
4629 +@@ -10124,7 +10125,7 @@ static int cfg80211_cqm_rssi_update(struct cfg80211_registered_device *rdev,
4630 + struct wireless_dev *wdev = dev->ieee80211_ptr;
4631 + s32 last, low, high;
4632 + u32 hyst;
4633 +- int i, n;
4634 ++ int i, n, low_index;
4635 + int err;
4636 +
4637 + /* RSSI reporting disabled? */
4638 +@@ -10161,10 +10162,19 @@ static int cfg80211_cqm_rssi_update(struct cfg80211_registered_device *rdev,
4639 + if (last < wdev->cqm_config->rssi_thresholds[i])
4640 + break;
4641 +
4642 +- low = i > 0 ?
4643 +- (wdev->cqm_config->rssi_thresholds[i - 1] - hyst) : S32_MIN;
4644 +- high = i < n ?
4645 +- (wdev->cqm_config->rssi_thresholds[i] + hyst - 1) : S32_MAX;
4646 ++ low_index = i - 1;
4647 ++ if (low_index >= 0) {
4648 ++ low_index = array_index_nospec(low_index, n);
4649 ++ low = wdev->cqm_config->rssi_thresholds[low_index] - hyst;
4650 ++ } else {
4651 ++ low = S32_MIN;
4652 ++ }
4653 ++ if (i < n) {
4654 ++ i = array_index_nospec(i, n);
4655 ++ high = wdev->cqm_config->rssi_thresholds[i] + hyst - 1;
4656 ++ } else {
4657 ++ high = S32_MAX;
4658 ++ }
4659 +
4660 + return rdev_set_cqm_rssi_range_config(rdev, dev, low, high);
4661 + }
4662 +diff --git a/net/wireless/reg.c b/net/wireless/reg.c
4663 +index 2f702adf2912..24cfa2776f50 100644
4664 +--- a/net/wireless/reg.c
4665 ++++ b/net/wireless/reg.c
4666 +@@ -2661,11 +2661,12 @@ static void reg_process_hint(struct regulatory_request *reg_request)
4667 + {
4668 + struct wiphy *wiphy = NULL;
4669 + enum reg_request_treatment treatment;
4670 ++ enum nl80211_reg_initiator initiator = reg_request->initiator;
4671 +
4672 + if (reg_request->wiphy_idx != WIPHY_IDX_INVALID)
4673 + wiphy = wiphy_idx_to_wiphy(reg_request->wiphy_idx);
4674 +
4675 +- switch (reg_request->initiator) {
4676 ++ switch (initiator) {
4677 + case NL80211_REGDOM_SET_BY_CORE:
4678 + treatment = reg_process_hint_core(reg_request);
4679 + break;
4680 +@@ -2683,7 +2684,7 @@ static void reg_process_hint(struct regulatory_request *reg_request)
4681 + treatment = reg_process_hint_country_ie(wiphy, reg_request);
4682 + break;
4683 + default:
4684 +- WARN(1, "invalid initiator %d\n", reg_request->initiator);
4685 ++ WARN(1, "invalid initiator %d\n", initiator);
4686 + goto out_free;
4687 + }
4688 +
4689 +@@ -2698,7 +2699,7 @@ static void reg_process_hint(struct regulatory_request *reg_request)
4690 + */
4691 + if (treatment == REG_REQ_ALREADY_SET && wiphy &&
4692 + wiphy->regulatory_flags & REGULATORY_STRICT_REG) {
4693 +- wiphy_update_regulatory(wiphy, reg_request->initiator);
4694 ++ wiphy_update_regulatory(wiphy, initiator);
4695 + wiphy_all_share_dfs_chan_state(wiphy);
4696 + reg_check_channels();
4697 + }
4698 +@@ -2867,6 +2868,7 @@ static int regulatory_hint_core(const char *alpha2)
4699 + request->alpha2[0] = alpha2[0];
4700 + request->alpha2[1] = alpha2[1];
4701 + request->initiator = NL80211_REGDOM_SET_BY_CORE;
4702 ++ request->wiphy_idx = WIPHY_IDX_INVALID;
4703 +
4704 + queue_regulatory_request(request);
4705 +
4706 +diff --git a/net/wireless/scan.c b/net/wireless/scan.c
4707 +index d36c3eb7b931..d0e7472dd9fd 100644
4708 +--- a/net/wireless/scan.c
4709 ++++ b/net/wireless/scan.c
4710 +@@ -1058,13 +1058,23 @@ cfg80211_bss_update(struct cfg80211_registered_device *rdev,
4711 + return NULL;
4712 + }
4713 +
4714 ++/*
4715 ++ * Update RX channel information based on the available frame payload
4716 ++ * information. This is mainly for the 2.4 GHz band where frames can be received
4717 ++ * from neighboring channels and the Beacon frames use the DSSS Parameter Set
4718 ++ * element to indicate the current (transmitting) channel, but this might also
4719 ++ * be needed on other bands if RX frequency does not match with the actual
4720 ++ * operating channel of a BSS.
4721 ++ */
4722 + static struct ieee80211_channel *
4723 + cfg80211_get_bss_channel(struct wiphy *wiphy, const u8 *ie, size_t ielen,
4724 +- struct ieee80211_channel *channel)
4725 ++ struct ieee80211_channel *channel,
4726 ++ enum nl80211_bss_scan_width scan_width)
4727 + {
4728 + const u8 *tmp;
4729 + u32 freq;
4730 + int channel_number = -1;
4731 ++ struct ieee80211_channel *alt_channel;
4732 +
4733 + tmp = cfg80211_find_ie(WLAN_EID_DS_PARAMS, ie, ielen);
4734 + if (tmp && tmp[1] == 1) {
4735 +@@ -1078,16 +1088,45 @@ cfg80211_get_bss_channel(struct wiphy *wiphy, const u8 *ie, size_t ielen,
4736 + }
4737 + }
4738 +
4739 +- if (channel_number < 0)
4740 ++ if (channel_number < 0) {
4741 ++ /* No channel information in frame payload */
4742 + return channel;
4743 ++ }
4744 +
4745 + freq = ieee80211_channel_to_frequency(channel_number, channel->band);
4746 +- channel = ieee80211_get_channel(wiphy, freq);
4747 +- if (!channel)
4748 +- return NULL;
4749 +- if (channel->flags & IEEE80211_CHAN_DISABLED)
4750 ++ alt_channel = ieee80211_get_channel(wiphy, freq);
4751 ++ if (!alt_channel) {
4752 ++ if (channel->band == NL80211_BAND_2GHZ) {
4753 ++ /*
4754 ++ * Better not allow unexpected channels when that could
4755 ++ * be going beyond the 1-11 range (e.g., discovering
4756 ++ * BSS on channel 12 when radio is configured for
4757 ++ * channel 11.
4758 ++ */
4759 ++ return NULL;
4760 ++ }
4761 ++
4762 ++ /* No match for the payload channel number - ignore it */
4763 ++ return channel;
4764 ++ }
4765 ++
4766 ++ if (scan_width == NL80211_BSS_CHAN_WIDTH_10 ||
4767 ++ scan_width == NL80211_BSS_CHAN_WIDTH_5) {
4768 ++ /*
4769 ++ * Ignore channel number in 5 and 10 MHz channels where there
4770 ++ * may not be an n:1 or 1:n mapping between frequencies and
4771 ++ * channel numbers.
4772 ++ */
4773 ++ return channel;
4774 ++ }
4775 ++
4776 ++ /*
4777 ++ * Use the channel determined through the payload channel number
4778 ++ * instead of the RX channel reported by the driver.
4779 ++ */
4780 ++ if (alt_channel->flags & IEEE80211_CHAN_DISABLED)
4781 + return NULL;
4782 +- return channel;
4783 ++ return alt_channel;
4784 + }
4785 +
4786 + /* Returned bss is reference counted and must be cleaned up appropriately. */
4787 +@@ -1112,7 +1151,8 @@ cfg80211_inform_bss_data(struct wiphy *wiphy,
4788 + (data->signal < 0 || data->signal > 100)))
4789 + return NULL;
4790 +
4791 +- channel = cfg80211_get_bss_channel(wiphy, ie, ielen, data->chan);
4792 ++ channel = cfg80211_get_bss_channel(wiphy, ie, ielen, data->chan,
4793 ++ data->scan_width);
4794 + if (!channel)
4795 + return NULL;
4796 +
4797 +@@ -1210,7 +1250,7 @@ cfg80211_inform_bss_frame_data(struct wiphy *wiphy,
4798 + return NULL;
4799 +
4800 + channel = cfg80211_get_bss_channel(wiphy, mgmt->u.beacon.variable,
4801 +- ielen, data->chan);
4802 ++ ielen, data->chan, data->scan_width);
4803 + if (!channel)
4804 + return NULL;
4805 +
4806 +diff --git a/net/xfrm/xfrm_input.c b/net/xfrm/xfrm_input.c
4807 +index 352abca2605f..86f5afbd0a0c 100644
4808 +--- a/net/xfrm/xfrm_input.c
4809 ++++ b/net/xfrm/xfrm_input.c
4810 +@@ -453,6 +453,7 @@ resume:
4811 + XFRM_INC_STATS(net, LINUX_MIB_XFRMINHDRERROR);
4812 + goto drop;
4813 + }
4814 ++ crypto_done = false;
4815 + } while (!err);
4816 +
4817 + err = xfrm_rcv_cb(skb, family, x->type->proto, 0);
4818 +diff --git a/net/xfrm/xfrm_output.c b/net/xfrm/xfrm_output.c
4819 +index 89b178a78dc7..36d15a38ce5e 100644
4820 +--- a/net/xfrm/xfrm_output.c
4821 ++++ b/net/xfrm/xfrm_output.c
4822 +@@ -101,6 +101,10 @@ static int xfrm_output_one(struct sk_buff *skb, int err)
4823 + spin_unlock_bh(&x->lock);
4824 +
4825 + skb_dst_force(skb);
4826 ++ if (!skb_dst(skb)) {
4827 ++ XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTERROR);
4828 ++ goto error_nolock;
4829 ++ }
4830 +
4831 + if (xfrm_offload(skb)) {
4832 + x->type_offload->encap(x, skb);
4833 +diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
4834 +index a94983e03a8b..526e6814ed4b 100644
4835 +--- a/net/xfrm/xfrm_policy.c
4836 ++++ b/net/xfrm/xfrm_policy.c
4837 +@@ -2551,6 +2551,10 @@ int __xfrm_route_forward(struct sk_buff *skb, unsigned short family)
4838 + }
4839 +
4840 + skb_dst_force(skb);
4841 ++ if (!skb_dst(skb)) {
4842 ++ XFRM_INC_STATS(net, LINUX_MIB_XFRMFWDHDRERROR);
4843 ++ return 0;
4844 ++ }
4845 +
4846 + dst = xfrm_lookup(net, skb_dst(skb), &fl, NULL, XFRM_LOOKUP_QUEUE);
4847 + if (IS_ERR(dst)) {
4848 +diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
4849 +index 33878e6e0d0a..d0672c400c2f 100644
4850 +--- a/net/xfrm/xfrm_user.c
4851 ++++ b/net/xfrm/xfrm_user.c
4852 +@@ -151,10 +151,16 @@ static int verify_newsa_info(struct xfrm_usersa_info *p,
4853 + err = -EINVAL;
4854 + switch (p->family) {
4855 + case AF_INET:
4856 ++ if (p->sel.prefixlen_d > 32 || p->sel.prefixlen_s > 32)
4857 ++ goto out;
4858 ++
4859 + break;
4860 +
4861 + case AF_INET6:
4862 + #if IS_ENABLED(CONFIG_IPV6)
4863 ++ if (p->sel.prefixlen_d > 128 || p->sel.prefixlen_s > 128)
4864 ++ goto out;
4865 ++
4866 + break;
4867 + #else
4868 + err = -EAFNOSUPPORT;
4869 +@@ -1359,10 +1365,16 @@ static int verify_newpolicy_info(struct xfrm_userpolicy_info *p)
4870 +
4871 + switch (p->sel.family) {
4872 + case AF_INET:
4873 ++ if (p->sel.prefixlen_d > 32 || p->sel.prefixlen_s > 32)
4874 ++ return -EINVAL;
4875 ++
4876 + break;
4877 +
4878 + case AF_INET6:
4879 + #if IS_ENABLED(CONFIG_IPV6)
4880 ++ if (p->sel.prefixlen_d > 128 || p->sel.prefixlen_s > 128)
4881 ++ return -EINVAL;
4882 ++
4883 + break;
4884 + #else
4885 + return -EAFNOSUPPORT;
4886 +@@ -1443,6 +1455,9 @@ static int validate_tmpl(int nr, struct xfrm_user_tmpl *ut, u16 family)
4887 + (ut[i].family != prev_family))
4888 + return -EINVAL;
4889 +
4890 ++ if (ut[i].mode >= XFRM_MODE_MAX)
4891 ++ return -EINVAL;
4892 ++
4893 + prev_family = ut[i].family;
4894 +
4895 + switch (ut[i].family) {
4896 +diff --git a/tools/perf/Makefile b/tools/perf/Makefile
4897 +index 225454416ed5..7902a5681fc8 100644
4898 +--- a/tools/perf/Makefile
4899 ++++ b/tools/perf/Makefile
4900 +@@ -84,10 +84,10 @@ endif # has_clean
4901 + endif # MAKECMDGOALS
4902 +
4903 + #
4904 +-# The clean target is not really parallel, don't print the jobs info:
4905 ++# Explicitly disable parallelism for the clean target.
4906 + #
4907 + clean:
4908 +- $(make)
4909 ++ $(make) -j1
4910 +
4911 + #
4912 + # The build-test target is not really parallel, don't print the jobs info,
4913 +diff --git a/tools/perf/util/machine.c b/tools/perf/util/machine.c
4914 +index 22dbb6612b41..b70cce40ca97 100644
4915 +--- a/tools/perf/util/machine.c
4916 ++++ b/tools/perf/util/machine.c
4917 +@@ -2246,7 +2246,8 @@ static int append_inlines(struct callchain_cursor *cursor,
4918 + if (!symbol_conf.inline_name || !map || !sym)
4919 + return ret;
4920 +
4921 +- addr = map__rip_2objdump(map, ip);
4922 ++ addr = map__map_ip(map, ip);
4923 ++ addr = map__rip_2objdump(map, addr);
4924 +
4925 + inline_node = inlines__tree_find(&map->dso->inlined_nodes, addr);
4926 + if (!inline_node) {
4927 +@@ -2272,7 +2273,7 @@ static int unwind_entry(struct unwind_entry *entry, void *arg)
4928 + {
4929 + struct callchain_cursor *cursor = arg;
4930 + const char *srcline = NULL;
4931 +- u64 addr;
4932 ++ u64 addr = entry->ip;
4933 +
4934 + if (symbol_conf.hide_unresolved && entry->sym == NULL)
4935 + return 0;
4936 +@@ -2284,7 +2285,8 @@ static int unwind_entry(struct unwind_entry *entry, void *arg)
4937 + * Convert entry->ip from a virtual address to an offset in
4938 + * its corresponding binary.
4939 + */
4940 +- addr = map__map_ip(entry->map, entry->ip);
4941 ++ if (entry->map)
4942 ++ addr = map__map_ip(entry->map, entry->ip);
4943 +
4944 + srcline = callchain_srcline(entry->map, entry->sym, addr);
4945 + return callchain_cursor_append(cursor, entry->ip,
4946 +diff --git a/tools/perf/util/setup.py b/tools/perf/util/setup.py
4947 +index 001be4f9d3b9..a5f9e236cc71 100644
4948 +--- a/tools/perf/util/setup.py
4949 ++++ b/tools/perf/util/setup.py
4950 +@@ -27,7 +27,7 @@ class install_lib(_install_lib):
4951 +
4952 + cflags = getenv('CFLAGS', '').split()
4953 + # switch off several checks (need to be at the end of cflags list)
4954 +-cflags += ['-fno-strict-aliasing', '-Wno-write-strings', '-Wno-unused-parameter' ]
4955 ++cflags += ['-fno-strict-aliasing', '-Wno-write-strings', '-Wno-unused-parameter', '-Wno-redundant-decls' ]
4956 + if cc != "clang":
4957 + cflags += ['-Wno-cast-function-type' ]
4958 +
4959 +diff --git a/tools/testing/selftests/net/fib-onlink-tests.sh b/tools/testing/selftests/net/fib-onlink-tests.sh
4960 +index 3991ad1a368d..864f865eee55 100755
4961 +--- a/tools/testing/selftests/net/fib-onlink-tests.sh
4962 ++++ b/tools/testing/selftests/net/fib-onlink-tests.sh
4963 +@@ -167,8 +167,8 @@ setup()
4964 + # add vrf table
4965 + ip li add ${VRF} type vrf table ${VRF_TABLE}
4966 + ip li set ${VRF} up
4967 +- ip ro add table ${VRF_TABLE} unreachable default
4968 +- ip -6 ro add table ${VRF_TABLE} unreachable default
4969 ++ ip ro add table ${VRF_TABLE} unreachable default metric 8192
4970 ++ ip -6 ro add table ${VRF_TABLE} unreachable default metric 8192
4971 +
4972 + # create test interfaces
4973 + ip li add ${NETIFS[p1]} type veth peer name ${NETIFS[p2]}
4974 +@@ -185,20 +185,20 @@ setup()
4975 + for n in 1 3 5 7; do
4976 + ip li set ${NETIFS[p${n}]} up
4977 + ip addr add ${V4ADDRS[p${n}]}/24 dev ${NETIFS[p${n}]}
4978 +- ip addr add ${V6ADDRS[p${n}]}/64 dev ${NETIFS[p${n}]}
4979 ++ ip addr add ${V6ADDRS[p${n}]}/64 dev ${NETIFS[p${n}]} nodad
4980 + done
4981 +
4982 + # move peer interfaces to namespace and add addresses
4983 + for n in 2 4 6 8; do
4984 + ip li set ${NETIFS[p${n}]} netns ${PEER_NS} up
4985 + ip -netns ${PEER_NS} addr add ${V4ADDRS[p${n}]}/24 dev ${NETIFS[p${n}]}
4986 +- ip -netns ${PEER_NS} addr add ${V6ADDRS[p${n}]}/64 dev ${NETIFS[p${n}]}
4987 ++ ip -netns ${PEER_NS} addr add ${V6ADDRS[p${n}]}/64 dev ${NETIFS[p${n}]} nodad
4988 + done
4989 +
4990 +- set +e
4991 ++ ip -6 ro add default via ${V6ADDRS[p3]/::[0-9]/::64}
4992 ++ ip -6 ro add table ${VRF_TABLE} default via ${V6ADDRS[p7]/::[0-9]/::64}
4993 +
4994 +- # let DAD complete - assume default of 1 probe
4995 +- sleep 1
4996 ++ set +e
4997 + }
4998 +
4999 + cleanup()
5000 +diff --git a/tools/testing/selftests/net/rtnetlink.sh b/tools/testing/selftests/net/rtnetlink.sh
5001 +index 0d7a44fa30af..8e509cbcb209 100755
5002 +--- a/tools/testing/selftests/net/rtnetlink.sh
5003 ++++ b/tools/testing/selftests/net/rtnetlink.sh
5004 +@@ -1,4 +1,4 @@
5005 +-#!/bin/sh
5006 ++#!/bin/bash
5007 + #
5008 + # This test is for checking rtnetlink callpaths, and get as much coverage as possible.
5009 + #
5010 +diff --git a/tools/testing/selftests/net/udpgso_bench.sh b/tools/testing/selftests/net/udpgso_bench.sh
5011 +index 850767befa47..99e537ab5ad9 100755
5012 +--- a/tools/testing/selftests/net/udpgso_bench.sh
5013 ++++ b/tools/testing/selftests/net/udpgso_bench.sh
5014 +@@ -1,4 +1,4 @@
5015 +-#!/bin/sh
5016 ++#!/bin/bash
5017 + # SPDX-License-Identifier: GPL-2.0
5018 + #
5019 + # Run a series of udpgso benchmarks