Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:4.19 commit in: /
Date: Sat, 20 Apr 2019 23:26:39
Message-Id: 1555703473.0f24bbd911eccbda14a4813938f48fd974f5bdb2.mpagano@gentoo
1 commit: 0f24bbd911eccbda14a4813938f48fd974f5bdb2
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Fri Apr 19 19:51:13 2019 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Fri Apr 19 19:51:13 2019 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=0f24bbd9
7
8 Linux patch 4.19.35
9
10 Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>
11
12 0000_README | 4 +
13 1034_linux-4.19.35.patch | 3735 ++++++++++++++++++++++++++++++++++++++++++++++
14 2 files changed, 3739 insertions(+)
15
16 diff --git a/0000_README b/0000_README
17 index e752acc..fbfea55 100644
18 --- a/0000_README
19 +++ b/0000_README
20 @@ -179,6 +179,10 @@ Patch: 1033_linux-4.19.34.patch
21 From: http://www.kernel.org
22 Desc: Linux 4.19.34
23
24 +Patch: 1034_linux-4.19.35.patch
25 +From: http://www.kernel.org
26 +Desc: Linux 4.19.35
27 +
28 Patch: 1500_XATTR_USER_PREFIX.patch
29 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
30 Desc: Support for namespace user.pax.* on tmpfs.
31
32 diff --git a/1034_linux-4.19.35.patch b/1034_linux-4.19.35.patch
33 new file mode 100644
34 index 0000000..4caf535
35 --- /dev/null
36 +++ b/1034_linux-4.19.35.patch
37 @@ -0,0 +1,3735 @@
38 +diff --git a/Makefile b/Makefile
39 +index 8fdfe0af5862..f4229975b48c 100644
40 +--- a/Makefile
41 ++++ b/Makefile
42 +@@ -1,7 +1,7 @@
43 + # SPDX-License-Identifier: GPL-2.0
44 + VERSION = 4
45 + PATCHLEVEL = 19
46 +-SUBLEVEL = 34
47 ++SUBLEVEL = 35
48 + EXTRAVERSION =
49 + NAME = "People's Front"
50 +
51 +@@ -483,7 +483,7 @@ endif
52 + ifeq ($(cc-name),clang)
53 + ifneq ($(CROSS_COMPILE),)
54 + CLANG_FLAGS := --target=$(notdir $(CROSS_COMPILE:%-=%))
55 +-GCC_TOOLCHAIN_DIR := $(dir $(shell which $(LD)))
56 ++GCC_TOOLCHAIN_DIR := $(dir $(shell which $(CROSS_COMPILE)elfedit))
57 + CLANG_FLAGS += --prefix=$(GCC_TOOLCHAIN_DIR)
58 + GCC_TOOLCHAIN := $(realpath $(GCC_TOOLCHAIN_DIR)/..)
59 + endif
60 +diff --git a/arch/arm/boot/dts/am335x-evm.dts b/arch/arm/boot/dts/am335x-evm.dts
61 +index c87d01297a01..20bbb899b3b7 100644
62 +--- a/arch/arm/boot/dts/am335x-evm.dts
63 ++++ b/arch/arm/boot/dts/am335x-evm.dts
64 +@@ -57,6 +57,24 @@
65 + enable-active-high;
66 + };
67 +
68 ++ /* TPS79501 */
69 ++ v1_8d_reg: fixedregulator-v1_8d {
70 ++ compatible = "regulator-fixed";
71 ++ regulator-name = "v1_8d";
72 ++ vin-supply = <&vbat>;
73 ++ regulator-min-microvolt = <1800000>;
74 ++ regulator-max-microvolt = <1800000>;
75 ++ };
76 ++
77 ++ /* TPS79501 */
78 ++ v3_3d_reg: fixedregulator-v3_3d {
79 ++ compatible = "regulator-fixed";
80 ++ regulator-name = "v3_3d";
81 ++ vin-supply = <&vbat>;
82 ++ regulator-min-microvolt = <3300000>;
83 ++ regulator-max-microvolt = <3300000>;
84 ++ };
85 ++
86 + matrix_keypad: matrix_keypad0 {
87 + compatible = "gpio-matrix-keypad";
88 + debounce-delay-ms = <5>;
89 +@@ -499,10 +517,10 @@
90 + status = "okay";
91 +
92 + /* Regulators */
93 +- AVDD-supply = <&vaux2_reg>;
94 +- IOVDD-supply = <&vaux2_reg>;
95 +- DRVDD-supply = <&vaux2_reg>;
96 +- DVDD-supply = <&vbat>;
97 ++ AVDD-supply = <&v3_3d_reg>;
98 ++ IOVDD-supply = <&v3_3d_reg>;
99 ++ DRVDD-supply = <&v3_3d_reg>;
100 ++ DVDD-supply = <&v1_8d_reg>;
101 + };
102 + };
103 +
104 +diff --git a/arch/arm/boot/dts/am335x-evmsk.dts b/arch/arm/boot/dts/am335x-evmsk.dts
105 +index bf1a40e45c97..ba589bc41a57 100644
106 +--- a/arch/arm/boot/dts/am335x-evmsk.dts
107 ++++ b/arch/arm/boot/dts/am335x-evmsk.dts
108 +@@ -73,6 +73,24 @@
109 + enable-active-high;
110 + };
111 +
112 ++ /* TPS79518 */
113 ++ v1_8d_reg: fixedregulator-v1_8d {
114 ++ compatible = "regulator-fixed";
115 ++ regulator-name = "v1_8d";
116 ++ vin-supply = <&vbat>;
117 ++ regulator-min-microvolt = <1800000>;
118 ++ regulator-max-microvolt = <1800000>;
119 ++ };
120 ++
121 ++ /* TPS78633 */
122 ++ v3_3d_reg: fixedregulator-v3_3d {
123 ++ compatible = "regulator-fixed";
124 ++ regulator-name = "v3_3d";
125 ++ vin-supply = <&vbat>;
126 ++ regulator-min-microvolt = <3300000>;
127 ++ regulator-max-microvolt = <3300000>;
128 ++ };
129 ++
130 + leds {
131 + pinctrl-names = "default";
132 + pinctrl-0 = <&user_leds_s0>;
133 +@@ -501,10 +519,10 @@
134 + status = "okay";
135 +
136 + /* Regulators */
137 +- AVDD-supply = <&vaux2_reg>;
138 +- IOVDD-supply = <&vaux2_reg>;
139 +- DRVDD-supply = <&vaux2_reg>;
140 +- DVDD-supply = <&vbat>;
141 ++ AVDD-supply = <&v3_3d_reg>;
142 ++ IOVDD-supply = <&v3_3d_reg>;
143 ++ DRVDD-supply = <&v3_3d_reg>;
144 ++ DVDD-supply = <&v1_8d_reg>;
145 + };
146 + };
147 +
148 +diff --git a/arch/arm/boot/dts/rk3288.dtsi b/arch/arm/boot/dts/rk3288.dtsi
149 +index 0840ffb3205c..e6a36a792bae 100644
150 +--- a/arch/arm/boot/dts/rk3288.dtsi
151 ++++ b/arch/arm/boot/dts/rk3288.dtsi
152 +@@ -70,7 +70,7 @@
153 + compatible = "arm,cortex-a12";
154 + reg = <0x501>;
155 + resets = <&cru SRST_CORE1>;
156 +- operating-points = <&cpu_opp_table>;
157 ++ operating-points-v2 = <&cpu_opp_table>;
158 + #cooling-cells = <2>; /* min followed by max */
159 + clock-latency = <40000>;
160 + clocks = <&cru ARMCLK>;
161 +@@ -80,7 +80,7 @@
162 + compatible = "arm,cortex-a12";
163 + reg = <0x502>;
164 + resets = <&cru SRST_CORE2>;
165 +- operating-points = <&cpu_opp_table>;
166 ++ operating-points-v2 = <&cpu_opp_table>;
167 + #cooling-cells = <2>; /* min followed by max */
168 + clock-latency = <40000>;
169 + clocks = <&cru ARMCLK>;
170 +@@ -90,7 +90,7 @@
171 + compatible = "arm,cortex-a12";
172 + reg = <0x503>;
173 + resets = <&cru SRST_CORE3>;
174 +- operating-points = <&cpu_opp_table>;
175 ++ operating-points-v2 = <&cpu_opp_table>;
176 + #cooling-cells = <2>; /* min followed by max */
177 + clock-latency = <40000>;
178 + clocks = <&cru ARMCLK>;
179 +diff --git a/arch/arm/boot/dts/sama5d2-pinfunc.h b/arch/arm/boot/dts/sama5d2-pinfunc.h
180 +index 1c01a6f843d8..28a2e45752fe 100644
181 +--- a/arch/arm/boot/dts/sama5d2-pinfunc.h
182 ++++ b/arch/arm/boot/dts/sama5d2-pinfunc.h
183 +@@ -518,7 +518,7 @@
184 + #define PIN_PC9__GPIO PINMUX_PIN(PIN_PC9, 0, 0)
185 + #define PIN_PC9__FIQ PINMUX_PIN(PIN_PC9, 1, 3)
186 + #define PIN_PC9__GTSUCOMP PINMUX_PIN(PIN_PC9, 2, 1)
187 +-#define PIN_PC9__ISC_D0 PINMUX_PIN(PIN_PC9, 2, 1)
188 ++#define PIN_PC9__ISC_D0 PINMUX_PIN(PIN_PC9, 3, 1)
189 + #define PIN_PC9__TIOA4 PINMUX_PIN(PIN_PC9, 4, 2)
190 + #define PIN_PC10 74
191 + #define PIN_PC10__GPIO PINMUX_PIN(PIN_PC10, 0, 0)
192 +diff --git a/arch/arm64/boot/dts/rockchip/rk3328-rock64.dts b/arch/arm64/boot/dts/rockchip/rk3328-rock64.dts
193 +index 5272e887a434..c142169a58fc 100644
194 +--- a/arch/arm64/boot/dts/rockchip/rk3328-rock64.dts
195 ++++ b/arch/arm64/boot/dts/rockchip/rk3328-rock64.dts
196 +@@ -45,8 +45,7 @@
197 +
198 + vcc_host1_5v: vcc_otg_5v: vcc-host1-5v-regulator {
199 + compatible = "regulator-fixed";
200 +- enable-active-high;
201 +- gpio = <&gpio0 RK_PD3 GPIO_ACTIVE_HIGH>;
202 ++ gpio = <&gpio0 RK_PA2 GPIO_ACTIVE_LOW>;
203 + pinctrl-names = "default";
204 + pinctrl-0 = <&usb20_host_drv>;
205 + regulator-name = "vcc_host1_5v";
206 +@@ -238,7 +237,7 @@
207 +
208 + usb2 {
209 + usb20_host_drv: usb20-host-drv {
210 +- rockchip,pins = <0 RK_PD3 RK_FUNC_GPIO &pcfg_pull_none>;
211 ++ rockchip,pins = <0 RK_PA2 RK_FUNC_GPIO &pcfg_pull_none>;
212 + };
213 + };
214 +
215 +diff --git a/arch/arm64/boot/dts/rockchip/rk3328.dtsi b/arch/arm64/boot/dts/rockchip/rk3328.dtsi
216 +index 3f5a2944300f..e065394360bb 100644
217 +--- a/arch/arm64/boot/dts/rockchip/rk3328.dtsi
218 ++++ b/arch/arm64/boot/dts/rockchip/rk3328.dtsi
219 +@@ -1356,11 +1356,11 @@
220 +
221 + sdmmc0 {
222 + sdmmc0_clk: sdmmc0-clk {
223 +- rockchip,pins = <1 RK_PA6 1 &pcfg_pull_none_4ma>;
224 ++ rockchip,pins = <1 RK_PA6 1 &pcfg_pull_none_8ma>;
225 + };
226 +
227 + sdmmc0_cmd: sdmmc0-cmd {
228 +- rockchip,pins = <1 RK_PA4 1 &pcfg_pull_up_4ma>;
229 ++ rockchip,pins = <1 RK_PA4 1 &pcfg_pull_up_8ma>;
230 + };
231 +
232 + sdmmc0_dectn: sdmmc0-dectn {
233 +@@ -1372,14 +1372,14 @@
234 + };
235 +
236 + sdmmc0_bus1: sdmmc0-bus1 {
237 +- rockchip,pins = <1 RK_PA0 1 &pcfg_pull_up_4ma>;
238 ++ rockchip,pins = <1 RK_PA0 1 &pcfg_pull_up_8ma>;
239 + };
240 +
241 + sdmmc0_bus4: sdmmc0-bus4 {
242 +- rockchip,pins = <1 RK_PA0 1 &pcfg_pull_up_4ma>,
243 +- <1 RK_PA1 1 &pcfg_pull_up_4ma>,
244 +- <1 RK_PA2 1 &pcfg_pull_up_4ma>,
245 +- <1 RK_PA3 1 &pcfg_pull_up_4ma>;
246 ++ rockchip,pins = <1 RK_PA0 1 &pcfg_pull_up_8ma>,
247 ++ <1 RK_PA1 1 &pcfg_pull_up_8ma>,
248 ++ <1 RK_PA2 1 &pcfg_pull_up_8ma>,
249 ++ <1 RK_PA3 1 &pcfg_pull_up_8ma>;
250 + };
251 +
252 + sdmmc0_gpio: sdmmc0-gpio {
253 +@@ -1553,50 +1553,50 @@
254 + rgmiim1_pins: rgmiim1-pins {
255 + rockchip,pins =
256 + /* mac_txclk */
257 +- <1 RK_PB4 2 &pcfg_pull_none_12ma>,
258 ++ <1 RK_PB4 2 &pcfg_pull_none_8ma>,
259 + /* mac_rxclk */
260 +- <1 RK_PB5 2 &pcfg_pull_none_2ma>,
261 ++ <1 RK_PB5 2 &pcfg_pull_none_4ma>,
262 + /* mac_mdio */
263 +- <1 RK_PC3 2 &pcfg_pull_none_2ma>,
264 ++ <1 RK_PC3 2 &pcfg_pull_none_4ma>,
265 + /* mac_txen */
266 +- <1 RK_PD1 2 &pcfg_pull_none_12ma>,
267 ++ <1 RK_PD1 2 &pcfg_pull_none_8ma>,
268 + /* mac_clk */
269 +- <1 RK_PC5 2 &pcfg_pull_none_2ma>,
270 ++ <1 RK_PC5 2 &pcfg_pull_none_4ma>,
271 + /* mac_rxdv */
272 +- <1 RK_PC6 2 &pcfg_pull_none_2ma>,
273 ++ <1 RK_PC6 2 &pcfg_pull_none_4ma>,
274 + /* mac_mdc */
275 +- <1 RK_PC7 2 &pcfg_pull_none_2ma>,
276 ++ <1 RK_PC7 2 &pcfg_pull_none_4ma>,
277 + /* mac_rxd1 */
278 +- <1 RK_PB2 2 &pcfg_pull_none_2ma>,
279 ++ <1 RK_PB2 2 &pcfg_pull_none_4ma>,
280 + /* mac_rxd0 */
281 +- <1 RK_PB3 2 &pcfg_pull_none_2ma>,
282 ++ <1 RK_PB3 2 &pcfg_pull_none_4ma>,
283 + /* mac_txd1 */
284 +- <1 RK_PB0 2 &pcfg_pull_none_12ma>,
285 ++ <1 RK_PB0 2 &pcfg_pull_none_8ma>,
286 + /* mac_txd0 */
287 +- <1 RK_PB1 2 &pcfg_pull_none_12ma>,
288 ++ <1 RK_PB1 2 &pcfg_pull_none_8ma>,
289 + /* mac_rxd3 */
290 +- <1 RK_PB6 2 &pcfg_pull_none_2ma>,
291 ++ <1 RK_PB6 2 &pcfg_pull_none_4ma>,
292 + /* mac_rxd2 */
293 +- <1 RK_PB7 2 &pcfg_pull_none_2ma>,
294 ++ <1 RK_PB7 2 &pcfg_pull_none_4ma>,
295 + /* mac_txd3 */
296 +- <1 RK_PC0 2 &pcfg_pull_none_12ma>,
297 ++ <1 RK_PC0 2 &pcfg_pull_none_8ma>,
298 + /* mac_txd2 */
299 +- <1 RK_PC1 2 &pcfg_pull_none_12ma>,
300 ++ <1 RK_PC1 2 &pcfg_pull_none_8ma>,
301 +
302 + /* mac_txclk */
303 +- <0 RK_PB0 1 &pcfg_pull_none>,
304 ++ <0 RK_PB0 1 &pcfg_pull_none_8ma>,
305 + /* mac_txen */
306 +- <0 RK_PB4 1 &pcfg_pull_none>,
307 ++ <0 RK_PB4 1 &pcfg_pull_none_8ma>,
308 + /* mac_clk */
309 +- <0 RK_PD0 1 &pcfg_pull_none>,
310 ++ <0 RK_PD0 1 &pcfg_pull_none_4ma>,
311 + /* mac_txd1 */
312 +- <0 RK_PC0 1 &pcfg_pull_none>,
313 ++ <0 RK_PC0 1 &pcfg_pull_none_8ma>,
314 + /* mac_txd0 */
315 +- <0 RK_PC1 1 &pcfg_pull_none>,
316 ++ <0 RK_PC1 1 &pcfg_pull_none_8ma>,
317 + /* mac_txd3 */
318 +- <0 RK_PC7 1 &pcfg_pull_none>,
319 ++ <0 RK_PC7 1 &pcfg_pull_none_8ma>,
320 + /* mac_txd2 */
321 +- <0 RK_PC6 1 &pcfg_pull_none>;
322 ++ <0 RK_PC6 1 &pcfg_pull_none_8ma>;
323 + };
324 +
325 + rmiim1_pins: rmiim1-pins {
326 +diff --git a/arch/arm64/include/asm/futex.h b/arch/arm64/include/asm/futex.h
327 +index 07fe2479d310..b447b4db423a 100644
328 +--- a/arch/arm64/include/asm/futex.h
329 ++++ b/arch/arm64/include/asm/futex.h
330 +@@ -30,8 +30,8 @@ do { \
331 + " prfm pstl1strm, %2\n" \
332 + "1: ldxr %w1, %2\n" \
333 + insn "\n" \
334 +-"2: stlxr %w3, %w0, %2\n" \
335 +-" cbnz %w3, 1b\n" \
336 ++"2: stlxr %w0, %w3, %2\n" \
337 ++" cbnz %w0, 1b\n" \
338 + " dmb ish\n" \
339 + "3:\n" \
340 + " .pushsection .fixup,\"ax\"\n" \
341 +@@ -50,30 +50,30 @@ do { \
342 + static inline int
343 + arch_futex_atomic_op_inuser(int op, int oparg, int *oval, u32 __user *_uaddr)
344 + {
345 +- int oldval = 0, ret, tmp;
346 ++ int oldval, ret, tmp;
347 + u32 __user *uaddr = __uaccess_mask_ptr(_uaddr);
348 +
349 + pagefault_disable();
350 +
351 + switch (op) {
352 + case FUTEX_OP_SET:
353 +- __futex_atomic_op("mov %w0, %w4",
354 ++ __futex_atomic_op("mov %w3, %w4",
355 + ret, oldval, uaddr, tmp, oparg);
356 + break;
357 + case FUTEX_OP_ADD:
358 +- __futex_atomic_op("add %w0, %w1, %w4",
359 ++ __futex_atomic_op("add %w3, %w1, %w4",
360 + ret, oldval, uaddr, tmp, oparg);
361 + break;
362 + case FUTEX_OP_OR:
363 +- __futex_atomic_op("orr %w0, %w1, %w4",
364 ++ __futex_atomic_op("orr %w3, %w1, %w4",
365 + ret, oldval, uaddr, tmp, oparg);
366 + break;
367 + case FUTEX_OP_ANDN:
368 +- __futex_atomic_op("and %w0, %w1, %w4",
369 ++ __futex_atomic_op("and %w3, %w1, %w4",
370 + ret, oldval, uaddr, tmp, ~oparg);
371 + break;
372 + case FUTEX_OP_XOR:
373 +- __futex_atomic_op("eor %w0, %w1, %w4",
374 ++ __futex_atomic_op("eor %w3, %w1, %w4",
375 + ret, oldval, uaddr, tmp, oparg);
376 + break;
377 + default:
378 +diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c
379 +index b9da093e0341..a0099be4311a 100644
380 +--- a/arch/arm64/kernel/traps.c
381 ++++ b/arch/arm64/kernel/traps.c
382 +@@ -101,10 +101,16 @@ static void dump_instr(const char *lvl, struct pt_regs *regs)
383 + void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk)
384 + {
385 + struct stackframe frame;
386 +- int skip;
387 ++ int skip = 0;
388 +
389 + pr_debug("%s(regs = %p tsk = %p)\n", __func__, regs, tsk);
390 +
391 ++ if (regs) {
392 ++ if (user_mode(regs))
393 ++ return;
394 ++ skip = 1;
395 ++ }
396 ++
397 + if (!tsk)
398 + tsk = current;
399 +
400 +@@ -125,7 +131,6 @@ void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk)
401 + frame.graph = tsk->curr_ret_stack;
402 + #endif
403 +
404 +- skip = !!regs;
405 + printk("Call trace:\n");
406 + do {
407 + /* skip until specified stack frame */
408 +@@ -175,15 +180,13 @@ static int __die(const char *str, int err, struct pt_regs *regs)
409 + return ret;
410 +
411 + print_modules();
412 +- __show_regs(regs);
413 + pr_emerg("Process %.*s (pid: %d, stack limit = 0x%p)\n",
414 + TASK_COMM_LEN, tsk->comm, task_pid_nr(tsk),
415 + end_of_stack(tsk));
416 ++ show_regs(regs);
417 +
418 +- if (!user_mode(regs)) {
419 +- dump_backtrace(regs, tsk);
420 ++ if (!user_mode(regs))
421 + dump_instr(KERN_EMERG, regs);
422 +- }
423 +
424 + return ret;
425 + }
426 +diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c
427 +index 787e27964ab9..774c3e17c798 100644
428 +--- a/arch/arm64/mm/init.c
429 ++++ b/arch/arm64/mm/init.c
430 +@@ -450,7 +450,7 @@ void __init arm64_memblock_init(void)
431 + * memory spans, randomize the linear region as well.
432 + */
433 + if (memstart_offset_seed > 0 && range >= ARM64_MEMSTART_ALIGN) {
434 +- range = range / ARM64_MEMSTART_ALIGN + 1;
435 ++ range /= ARM64_MEMSTART_ALIGN;
436 + memstart_addr -= ARM64_MEMSTART_ALIGN *
437 + ((range * memstart_offset_seed) >> 16);
438 + }
439 +diff --git a/arch/parisc/include/asm/ptrace.h b/arch/parisc/include/asm/ptrace.h
440 +index 2a27b275ab09..9ff033d261ab 100644
441 +--- a/arch/parisc/include/asm/ptrace.h
442 ++++ b/arch/parisc/include/asm/ptrace.h
443 +@@ -22,13 +22,14 @@ unsigned long profile_pc(struct pt_regs *);
444 +
445 + static inline unsigned long regs_return_value(struct pt_regs *regs)
446 + {
447 +- return regs->gr[20];
448 ++ return regs->gr[28];
449 + }
450 +
451 + static inline void instruction_pointer_set(struct pt_regs *regs,
452 + unsigned long val)
453 + {
454 +- regs->iaoq[0] = val;
455 ++ regs->iaoq[0] = val;
456 ++ regs->iaoq[1] = val + 4;
457 + }
458 +
459 + /* Query offset/name of register from its name/offset */
460 +diff --git a/arch/parisc/kernel/process.c b/arch/parisc/kernel/process.c
461 +index eb39e7e380d7..841db71958cd 100644
462 +--- a/arch/parisc/kernel/process.c
463 ++++ b/arch/parisc/kernel/process.c
464 +@@ -210,12 +210,6 @@ void __cpuidle arch_cpu_idle(void)
465 +
466 + static int __init parisc_idle_init(void)
467 + {
468 +- const char *marker;
469 +-
470 +- /* check QEMU/SeaBIOS marker in PAGE0 */
471 +- marker = (char *) &PAGE0->pad0;
472 +- running_on_qemu = (memcmp(marker, "SeaBIOS", 8) == 0);
473 +-
474 + if (!running_on_qemu)
475 + cpu_idle_poll_ctrl(1);
476 +
477 +diff --git a/arch/parisc/kernel/setup.c b/arch/parisc/kernel/setup.c
478 +index 4e87c35c22b7..79c8b994e7d1 100644
479 +--- a/arch/parisc/kernel/setup.c
480 ++++ b/arch/parisc/kernel/setup.c
481 +@@ -399,6 +399,9 @@ void __init start_parisc(void)
482 + int ret, cpunum;
483 + struct pdc_coproc_cfg coproc_cfg;
484 +
485 ++ /* check QEMU/SeaBIOS marker in PAGE0 */
486 ++ running_on_qemu = (memcmp(&PAGE0->pad0, "SeaBIOS", 8) == 0);
487 ++
488 + cpunum = smp_processor_id();
489 +
490 + init_cpu_topology();
491 +diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c
492 +index bbd1c73243d7..14b0f5b6a373 100644
493 +--- a/arch/powerpc/kernel/signal_64.c
494 ++++ b/arch/powerpc/kernel/signal_64.c
495 +@@ -755,12 +755,25 @@ SYSCALL_DEFINE0(rt_sigreturn)
496 + if (restore_tm_sigcontexts(current, &uc->uc_mcontext,
497 + &uc_transact->uc_mcontext))
498 + goto badframe;
499 +- }
500 +- else
501 +- /* Fall through, for non-TM restore */
502 ++ } else
503 + #endif
504 +- if (restore_sigcontext(current, NULL, 1, &uc->uc_mcontext))
505 +- goto badframe;
506 ++ {
507 ++ /*
508 ++ * Fall through, for non-TM restore
509 ++ *
510 ++ * Unset MSR[TS] on the thread regs since MSR from user
511 ++ * context does not have MSR active, and recheckpoint was
512 ++ * not called since restore_tm_sigcontexts() was not called
513 ++ * also.
514 ++ *
515 ++ * If not unsetting it, the code can RFID to userspace with
516 ++ * MSR[TS] set, but without CPU in the proper state,
517 ++ * causing a TM bad thing.
518 ++ */
519 ++ current->thread.regs->msr &= ~MSR_TS_MASK;
520 ++ if (restore_sigcontext(current, NULL, 1, &uc->uc_mcontext))
521 ++ goto badframe;
522 ++ }
523 +
524 + if (restore_altstack(&uc->uc_stack))
525 + goto badframe;
526 +diff --git a/arch/riscv/include/asm/syscall.h b/arch/riscv/include/asm/syscall.h
527 +index 8d25f8904c00..1dcde0fda435 100644
528 +--- a/arch/riscv/include/asm/syscall.h
529 ++++ b/arch/riscv/include/asm/syscall.h
530 +@@ -78,10 +78,11 @@ static inline void syscall_get_arguments(struct task_struct *task,
531 + if (i == 0) {
532 + args[0] = regs->orig_a0;
533 + args++;
534 +- i++;
535 + n--;
536 ++ } else {
537 ++ i--;
538 + }
539 +- memcpy(args, &regs->a1 + i * sizeof(regs->a1), n * sizeof(args[0]));
540 ++ memcpy(args, &regs->a1 + i, n * sizeof(args[0]));
541 + }
542 +
543 + static inline void syscall_set_arguments(struct task_struct *task,
544 +@@ -93,10 +94,11 @@ static inline void syscall_set_arguments(struct task_struct *task,
545 + if (i == 0) {
546 + regs->orig_a0 = args[0];
547 + args++;
548 +- i++;
549 + n--;
550 +- }
551 +- memcpy(&regs->a1 + i * sizeof(regs->a1), args, n * sizeof(regs->a0));
552 ++ } else {
553 ++ i--;
554 ++ }
555 ++ memcpy(&regs->a1 + i, args, n * sizeof(regs->a1));
556 + }
557 +
558 + #endif /* _ASM_RISCV_SYSCALL_H */
559 +diff --git a/arch/x86/entry/vdso/Makefile b/arch/x86/entry/vdso/Makefile
560 +index c3d7ccd25381..5bfe2243a08f 100644
561 +--- a/arch/x86/entry/vdso/Makefile
562 ++++ b/arch/x86/entry/vdso/Makefile
563 +@@ -47,7 +47,7 @@ targets += $(vdso_img_sodbg) $(vdso_img-y:%=vdso%.so)
564 + CPPFLAGS_vdso.lds += -P -C
565 +
566 + VDSO_LDFLAGS_vdso.lds = -m elf_x86_64 -soname linux-vdso.so.1 --no-undefined \
567 +- -z max-page-size=4096 -z common-page-size=4096
568 ++ -z max-page-size=4096
569 +
570 + $(obj)/vdso64.so.dbg: $(obj)/vdso.lds $(vobjs) FORCE
571 + $(call if_changed,vdso)
572 +@@ -98,7 +98,7 @@ CFLAGS_REMOVE_vvar.o = -pg
573 +
574 + CPPFLAGS_vdsox32.lds = $(CPPFLAGS_vdso.lds)
575 + VDSO_LDFLAGS_vdsox32.lds = -m elf32_x86_64 -soname linux-vdso.so.1 \
576 +- -z max-page-size=4096 -z common-page-size=4096
577 ++ -z max-page-size=4096
578 +
579 + # x32-rebranded versions
580 + vobjx32s-y := $(vobjs-y:.o=-x32.o)
581 +diff --git a/arch/x86/events/amd/core.c b/arch/x86/events/amd/core.c
582 +index c84584bb9402..3e5dd85b019a 100644
583 +--- a/arch/x86/events/amd/core.c
584 ++++ b/arch/x86/events/amd/core.c
585 +@@ -3,10 +3,14 @@
586 + #include <linux/types.h>
587 + #include <linux/init.h>
588 + #include <linux/slab.h>
589 ++#include <linux/delay.h>
590 + #include <asm/apicdef.h>
591 ++#include <asm/nmi.h>
592 +
593 + #include "../perf_event.h"
594 +
595 ++static DEFINE_PER_CPU(unsigned int, perf_nmi_counter);
596 ++
597 + static __initconst const u64 amd_hw_cache_event_ids
598 + [PERF_COUNT_HW_CACHE_MAX]
599 + [PERF_COUNT_HW_CACHE_OP_MAX]
600 +@@ -429,6 +433,132 @@ static void amd_pmu_cpu_dead(int cpu)
601 + }
602 + }
603 +
604 ++/*
605 ++ * When a PMC counter overflows, an NMI is used to process the event and
606 ++ * reset the counter. NMI latency can result in the counter being updated
607 ++ * before the NMI can run, which can result in what appear to be spurious
608 ++ * NMIs. This function is intended to wait for the NMI to run and reset
609 ++ * the counter to avoid possible unhandled NMI messages.
610 ++ */
611 ++#define OVERFLOW_WAIT_COUNT 50
612 ++
613 ++static void amd_pmu_wait_on_overflow(int idx)
614 ++{
615 ++ unsigned int i;
616 ++ u64 counter;
617 ++
618 ++ /*
619 ++ * Wait for the counter to be reset if it has overflowed. This loop
620 ++ * should exit very, very quickly, but just in case, don't wait
621 ++ * forever...
622 ++ */
623 ++ for (i = 0; i < OVERFLOW_WAIT_COUNT; i++) {
624 ++ rdmsrl(x86_pmu_event_addr(idx), counter);
625 ++ if (counter & (1ULL << (x86_pmu.cntval_bits - 1)))
626 ++ break;
627 ++
628 ++ /* Might be in IRQ context, so can't sleep */
629 ++ udelay(1);
630 ++ }
631 ++}
632 ++
633 ++static void amd_pmu_disable_all(void)
634 ++{
635 ++ struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
636 ++ int idx;
637 ++
638 ++ x86_pmu_disable_all();
639 ++
640 ++ /*
641 ++ * This shouldn't be called from NMI context, but add a safeguard here
642 ++ * to return, since if we're in NMI context we can't wait for an NMI
643 ++ * to reset an overflowed counter value.
644 ++ */
645 ++ if (in_nmi())
646 ++ return;
647 ++
648 ++ /*
649 ++ * Check each counter for overflow and wait for it to be reset by the
650 ++ * NMI if it has overflowed. This relies on the fact that all active
651 ++ * counters are always enabled when this function is caled and
652 ++ * ARCH_PERFMON_EVENTSEL_INT is always set.
653 ++ */
654 ++ for (idx = 0; idx < x86_pmu.num_counters; idx++) {
655 ++ if (!test_bit(idx, cpuc->active_mask))
656 ++ continue;
657 ++
658 ++ amd_pmu_wait_on_overflow(idx);
659 ++ }
660 ++}
661 ++
662 ++static void amd_pmu_disable_event(struct perf_event *event)
663 ++{
664 ++ x86_pmu_disable_event(event);
665 ++
666 ++ /*
667 ++ * This can be called from NMI context (via x86_pmu_stop). The counter
668 ++ * may have overflowed, but either way, we'll never see it get reset
669 ++ * by the NMI if we're already in the NMI. And the NMI latency support
670 ++ * below will take care of any pending NMI that might have been
671 ++ * generated by the overflow.
672 ++ */
673 ++ if (in_nmi())
674 ++ return;
675 ++
676 ++ amd_pmu_wait_on_overflow(event->hw.idx);
677 ++}
678 ++
679 ++/*
680 ++ * Because of NMI latency, if multiple PMC counters are active or other sources
681 ++ * of NMIs are received, the perf NMI handler can handle one or more overflowed
682 ++ * PMC counters outside of the NMI associated with the PMC overflow. If the NMI
683 ++ * doesn't arrive at the LAPIC in time to become a pending NMI, then the kernel
684 ++ * back-to-back NMI support won't be active. This PMC handler needs to take into
685 ++ * account that this can occur, otherwise this could result in unknown NMI
686 ++ * messages being issued. Examples of this is PMC overflow while in the NMI
687 ++ * handler when multiple PMCs are active or PMC overflow while handling some
688 ++ * other source of an NMI.
689 ++ *
690 ++ * Attempt to mitigate this by using the number of active PMCs to determine
691 ++ * whether to return NMI_HANDLED if the perf NMI handler did not handle/reset
692 ++ * any PMCs. The per-CPU perf_nmi_counter variable is set to a minimum of the
693 ++ * number of active PMCs or 2. The value of 2 is used in case an NMI does not
694 ++ * arrive at the LAPIC in time to be collapsed into an already pending NMI.
695 ++ */
696 ++static int amd_pmu_handle_irq(struct pt_regs *regs)
697 ++{
698 ++ struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
699 ++ int active, handled;
700 ++
701 ++ /*
702 ++ * Obtain the active count before calling x86_pmu_handle_irq() since
703 ++ * it is possible that x86_pmu_handle_irq() may make a counter
704 ++ * inactive (through x86_pmu_stop).
705 ++ */
706 ++ active = __bitmap_weight(cpuc->active_mask, X86_PMC_IDX_MAX);
707 ++
708 ++ /* Process any counter overflows */
709 ++ handled = x86_pmu_handle_irq(regs);
710 ++
711 ++ /*
712 ++ * If a counter was handled, record the number of possible remaining
713 ++ * NMIs that can occur.
714 ++ */
715 ++ if (handled) {
716 ++ this_cpu_write(perf_nmi_counter,
717 ++ min_t(unsigned int, 2, active));
718 ++
719 ++ return handled;
720 ++ }
721 ++
722 ++ if (!this_cpu_read(perf_nmi_counter))
723 ++ return NMI_DONE;
724 ++
725 ++ this_cpu_dec(perf_nmi_counter);
726 ++
727 ++ return NMI_HANDLED;
728 ++}
729 ++
730 + static struct event_constraint *
731 + amd_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
732 + struct perf_event *event)
733 +@@ -621,11 +751,11 @@ static ssize_t amd_event_sysfs_show(char *page, u64 config)
734 +
735 + static __initconst const struct x86_pmu amd_pmu = {
736 + .name = "AMD",
737 +- .handle_irq = x86_pmu_handle_irq,
738 +- .disable_all = x86_pmu_disable_all,
739 ++ .handle_irq = amd_pmu_handle_irq,
740 ++ .disable_all = amd_pmu_disable_all,
741 + .enable_all = x86_pmu_enable_all,
742 + .enable = x86_pmu_enable_event,
743 +- .disable = x86_pmu_disable_event,
744 ++ .disable = amd_pmu_disable_event,
745 + .hw_config = amd_pmu_hw_config,
746 + .schedule_events = x86_schedule_events,
747 + .eventsel = MSR_K7_EVNTSEL0,
748 +@@ -728,7 +858,7 @@ void amd_pmu_enable_virt(void)
749 + cpuc->perf_ctr_virt_mask = 0;
750 +
751 + /* Reload all events */
752 +- x86_pmu_disable_all();
753 ++ amd_pmu_disable_all();
754 + x86_pmu_enable_all(0);
755 + }
756 + EXPORT_SYMBOL_GPL(amd_pmu_enable_virt);
757 +@@ -746,7 +876,7 @@ void amd_pmu_disable_virt(void)
758 + cpuc->perf_ctr_virt_mask = AMD64_EVENTSEL_HOSTONLY;
759 +
760 + /* Reload all events */
761 +- x86_pmu_disable_all();
762 ++ amd_pmu_disable_all();
763 + x86_pmu_enable_all(0);
764 + }
765 + EXPORT_SYMBOL_GPL(amd_pmu_disable_virt);
766 +diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c
767 +index a41554350893..c9625bff4328 100644
768 +--- a/arch/x86/events/core.c
769 ++++ b/arch/x86/events/core.c
770 +@@ -1328,8 +1328,9 @@ void x86_pmu_stop(struct perf_event *event, int flags)
771 + struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
772 + struct hw_perf_event *hwc = &event->hw;
773 +
774 +- if (__test_and_clear_bit(hwc->idx, cpuc->active_mask)) {
775 ++ if (test_bit(hwc->idx, cpuc->active_mask)) {
776 + x86_pmu.disable(event);
777 ++ __clear_bit(hwc->idx, cpuc->active_mask);
778 + cpuc->events[hwc->idx] = NULL;
779 + WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED);
780 + hwc->state |= PERF_HES_STOPPED;
781 +@@ -1426,16 +1427,8 @@ int x86_pmu_handle_irq(struct pt_regs *regs)
782 + apic_write(APIC_LVTPC, APIC_DM_NMI);
783 +
784 + for (idx = 0; idx < x86_pmu.num_counters; idx++) {
785 +- if (!test_bit(idx, cpuc->active_mask)) {
786 +- /*
787 +- * Though we deactivated the counter some cpus
788 +- * might still deliver spurious interrupts still
789 +- * in flight. Catch them:
790 +- */
791 +- if (__test_and_clear_bit(idx, cpuc->running))
792 +- handled++;
793 ++ if (!test_bit(idx, cpuc->active_mask))
794 + continue;
795 +- }
796 +
797 + event = cpuc->events[idx];
798 +
799 +diff --git a/arch/x86/include/asm/bitops.h b/arch/x86/include/asm/bitops.h
800 +index 9f645ba57dbb..33611a74bfff 100644
801 +--- a/arch/x86/include/asm/bitops.h
802 ++++ b/arch/x86/include/asm/bitops.h
803 +@@ -36,22 +36,17 @@
804 + * bit 0 is the LSB of addr; bit 32 is the LSB of (addr+1).
805 + */
806 +
807 +-#if __GNUC__ < 4 || (__GNUC__ == 4 && __GNUC_MINOR__ < 1)
808 +-/* Technically wrong, but this avoids compilation errors on some gcc
809 +- versions. */
810 +-#define BITOP_ADDR(x) "=m" (*(volatile long *) (x))
811 +-#else
812 +-#define BITOP_ADDR(x) "+m" (*(volatile long *) (x))
813 +-#endif
814 ++#define RLONG_ADDR(x) "m" (*(volatile long *) (x))
815 ++#define WBYTE_ADDR(x) "+m" (*(volatile char *) (x))
816 +
817 +-#define ADDR BITOP_ADDR(addr)
818 ++#define ADDR RLONG_ADDR(addr)
819 +
820 + /*
821 + * We do the locked ops that don't return the old value as
822 + * a mask operation on a byte.
823 + */
824 + #define IS_IMMEDIATE(nr) (__builtin_constant_p(nr))
825 +-#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((void *)(addr) + ((nr)>>3))
826 ++#define CONST_MASK_ADDR(nr, addr) WBYTE_ADDR((void *)(addr) + ((nr)>>3))
827 + #define CONST_MASK(nr) (1 << ((nr) & 7))
828 +
829 + /**
830 +@@ -79,7 +74,7 @@ set_bit(long nr, volatile unsigned long *addr)
831 + : "memory");
832 + } else {
833 + asm volatile(LOCK_PREFIX __ASM_SIZE(bts) " %1,%0"
834 +- : BITOP_ADDR(addr) : "Ir" (nr) : "memory");
835 ++ : : RLONG_ADDR(addr), "Ir" (nr) : "memory");
836 + }
837 + }
838 +
839 +@@ -94,7 +89,7 @@ set_bit(long nr, volatile unsigned long *addr)
840 + */
841 + static __always_inline void __set_bit(long nr, volatile unsigned long *addr)
842 + {
843 +- asm volatile(__ASM_SIZE(bts) " %1,%0" : ADDR : "Ir" (nr) : "memory");
844 ++ asm volatile(__ASM_SIZE(bts) " %1,%0" : : ADDR, "Ir" (nr) : "memory");
845 + }
846 +
847 + /**
848 +@@ -116,8 +111,7 @@ clear_bit(long nr, volatile unsigned long *addr)
849 + : "iq" ((u8)~CONST_MASK(nr)));
850 + } else {
851 + asm volatile(LOCK_PREFIX __ASM_SIZE(btr) " %1,%0"
852 +- : BITOP_ADDR(addr)
853 +- : "Ir" (nr));
854 ++ : : RLONG_ADDR(addr), "Ir" (nr) : "memory");
855 + }
856 + }
857 +
858 +@@ -137,7 +131,7 @@ static __always_inline void clear_bit_unlock(long nr, volatile unsigned long *ad
859 +
860 + static __always_inline void __clear_bit(long nr, volatile unsigned long *addr)
861 + {
862 +- asm volatile(__ASM_SIZE(btr) " %1,%0" : ADDR : "Ir" (nr));
863 ++ asm volatile(__ASM_SIZE(btr) " %1,%0" : : ADDR, "Ir" (nr) : "memory");
864 + }
865 +
866 + static __always_inline bool clear_bit_unlock_is_negative_byte(long nr, volatile unsigned long *addr)
867 +@@ -145,7 +139,7 @@ static __always_inline bool clear_bit_unlock_is_negative_byte(long nr, volatile
868 + bool negative;
869 + asm volatile(LOCK_PREFIX "andb %2,%1"
870 + CC_SET(s)
871 +- : CC_OUT(s) (negative), ADDR
872 ++ : CC_OUT(s) (negative), WBYTE_ADDR(addr)
873 + : "ir" ((char) ~(1 << nr)) : "memory");
874 + return negative;
875 + }
876 +@@ -161,13 +155,9 @@ static __always_inline bool clear_bit_unlock_is_negative_byte(long nr, volatile
877 + * __clear_bit() is non-atomic and implies release semantics before the memory
878 + * operation. It can be used for an unlock if no other CPUs can concurrently
879 + * modify other bits in the word.
880 +- *
881 +- * No memory barrier is required here, because x86 cannot reorder stores past
882 +- * older loads. Same principle as spin_unlock.
883 + */
884 + static __always_inline void __clear_bit_unlock(long nr, volatile unsigned long *addr)
885 + {
886 +- barrier();
887 + __clear_bit(nr, addr);
888 + }
889 +
890 +@@ -182,7 +172,7 @@ static __always_inline void __clear_bit_unlock(long nr, volatile unsigned long *
891 + */
892 + static __always_inline void __change_bit(long nr, volatile unsigned long *addr)
893 + {
894 +- asm volatile(__ASM_SIZE(btc) " %1,%0" : ADDR : "Ir" (nr));
895 ++ asm volatile(__ASM_SIZE(btc) " %1,%0" : : ADDR, "Ir" (nr) : "memory");
896 + }
897 +
898 + /**
899 +@@ -202,8 +192,7 @@ static __always_inline void change_bit(long nr, volatile unsigned long *addr)
900 + : "iq" ((u8)CONST_MASK(nr)));
901 + } else {
902 + asm volatile(LOCK_PREFIX __ASM_SIZE(btc) " %1,%0"
903 +- : BITOP_ADDR(addr)
904 +- : "Ir" (nr));
905 ++ : : RLONG_ADDR(addr), "Ir" (nr) : "memory");
906 + }
907 + }
908 +
909 +@@ -249,8 +238,8 @@ static __always_inline bool __test_and_set_bit(long nr, volatile unsigned long *
910 +
911 + asm(__ASM_SIZE(bts) " %2,%1"
912 + CC_SET(c)
913 +- : CC_OUT(c) (oldbit), ADDR
914 +- : "Ir" (nr));
915 ++ : CC_OUT(c) (oldbit)
916 ++ : ADDR, "Ir" (nr) : "memory");
917 + return oldbit;
918 + }
919 +
920 +@@ -290,8 +279,8 @@ static __always_inline bool __test_and_clear_bit(long nr, volatile unsigned long
921 +
922 + asm volatile(__ASM_SIZE(btr) " %2,%1"
923 + CC_SET(c)
924 +- : CC_OUT(c) (oldbit), ADDR
925 +- : "Ir" (nr));
926 ++ : CC_OUT(c) (oldbit)
927 ++ : ADDR, "Ir" (nr) : "memory");
928 + return oldbit;
929 + }
930 +
931 +@@ -302,8 +291,8 @@ static __always_inline bool __test_and_change_bit(long nr, volatile unsigned lon
932 +
933 + asm volatile(__ASM_SIZE(btc) " %2,%1"
934 + CC_SET(c)
935 +- : CC_OUT(c) (oldbit), ADDR
936 +- : "Ir" (nr) : "memory");
937 ++ : CC_OUT(c) (oldbit)
938 ++ : ADDR, "Ir" (nr) : "memory");
939 +
940 + return oldbit;
941 + }
942 +@@ -335,7 +324,7 @@ static __always_inline bool variable_test_bit(long nr, volatile const unsigned l
943 + asm volatile(__ASM_SIZE(bt) " %2,%1"
944 + CC_SET(c)
945 + : CC_OUT(c) (oldbit)
946 +- : "m" (*(unsigned long *)addr), "Ir" (nr));
947 ++ : "m" (*(unsigned long *)addr), "Ir" (nr) : "memory");
948 +
949 + return oldbit;
950 + }
951 +diff --git a/arch/x86/include/asm/string_32.h b/arch/x86/include/asm/string_32.h
952 +index 55d392c6bd29..2fd165f1cffa 100644
953 +--- a/arch/x86/include/asm/string_32.h
954 ++++ b/arch/x86/include/asm/string_32.h
955 +@@ -179,14 +179,7 @@ static inline void *__memcpy3d(void *to, const void *from, size_t len)
956 + * No 3D Now!
957 + */
958 +
959 +-#if (__GNUC__ >= 4)
960 + #define memcpy(t, f, n) __builtin_memcpy(t, f, n)
961 +-#else
962 +-#define memcpy(t, f, n) \
963 +- (__builtin_constant_p((n)) \
964 +- ? __constant_memcpy((t), (f), (n)) \
965 +- : __memcpy((t), (f), (n)))
966 +-#endif
967 +
968 + #endif
969 + #endif /* !CONFIG_FORTIFY_SOURCE */
970 +@@ -282,12 +275,7 @@ void *__constant_c_and_count_memset(void *s, unsigned long pattern,
971 +
972 + {
973 + int d0, d1;
974 +-#if __GNUC__ == 4 && __GNUC_MINOR__ == 0
975 +- /* Workaround for broken gcc 4.0 */
976 +- register unsigned long eax asm("%eax") = pattern;
977 +-#else
978 + unsigned long eax = pattern;
979 +-#endif
980 +
981 + switch (count % 4) {
982 + case 0:
983 +@@ -321,15 +309,7 @@ void *__constant_c_and_count_memset(void *s, unsigned long pattern,
984 + #define __HAVE_ARCH_MEMSET
985 + extern void *memset(void *, int, size_t);
986 + #ifndef CONFIG_FORTIFY_SOURCE
987 +-#if (__GNUC__ >= 4)
988 + #define memset(s, c, count) __builtin_memset(s, c, count)
989 +-#else
990 +-#define memset(s, c, count) \
991 +- (__builtin_constant_p(c) \
992 +- ? __constant_c_x_memset((s), (0x01010101UL * (unsigned char)(c)), \
993 +- (count)) \
994 +- : __memset((s), (c), (count)))
995 +-#endif
996 + #endif /* !CONFIG_FORTIFY_SOURCE */
997 +
998 + #define __HAVE_ARCH_MEMSET16
999 +diff --git a/arch/x86/include/asm/string_64.h b/arch/x86/include/asm/string_64.h
1000 +index d33f92b9fa22..052a7a4ac025 100644
1001 +--- a/arch/x86/include/asm/string_64.h
1002 ++++ b/arch/x86/include/asm/string_64.h
1003 +@@ -32,21 +32,6 @@ static __always_inline void *__inline_memcpy(void *to, const void *from, size_t
1004 + extern void *memcpy(void *to, const void *from, size_t len);
1005 + extern void *__memcpy(void *to, const void *from, size_t len);
1006 +
1007 +-#ifndef CONFIG_FORTIFY_SOURCE
1008 +-#if (__GNUC__ == 4 && __GNUC_MINOR__ < 3) || __GNUC__ < 4
1009 +-#define memcpy(dst, src, len) \
1010 +-({ \
1011 +- size_t __len = (len); \
1012 +- void *__ret; \
1013 +- if (__builtin_constant_p(len) && __len >= 64) \
1014 +- __ret = __memcpy((dst), (src), __len); \
1015 +- else \
1016 +- __ret = __builtin_memcpy((dst), (src), __len); \
1017 +- __ret; \
1018 +-})
1019 +-#endif
1020 +-#endif /* !CONFIG_FORTIFY_SOURCE */
1021 +-
1022 + #define __HAVE_ARCH_MEMSET
1023 + void *memset(void *s, int c, size_t n);
1024 + void *__memset(void *s, int c, size_t n);
1025 +diff --git a/arch/x86/include/asm/xen/hypercall.h b/arch/x86/include/asm/xen/hypercall.h
1026 +index ef05bea7010d..6b5c710846f5 100644
1027 +--- a/arch/x86/include/asm/xen/hypercall.h
1028 ++++ b/arch/x86/include/asm/xen/hypercall.h
1029 +@@ -206,6 +206,9 @@ xen_single_call(unsigned int call,
1030 + __HYPERCALL_DECLS;
1031 + __HYPERCALL_5ARG(a1, a2, a3, a4, a5);
1032 +
1033 ++ if (call >= PAGE_SIZE / sizeof(hypercall_page[0]))
1034 ++ return -EINVAL;
1035 ++
1036 + asm volatile(CALL_NOSPEC
1037 + : __HYPERCALL_5PARAM
1038 + : [thunk_target] "a" (&hypercall_page[call])
1039 +diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
1040 +index b47541962012..6dc72804fe6e 100644
1041 +--- a/arch/x86/kvm/svm.c
1042 ++++ b/arch/x86/kvm/svm.c
1043 +@@ -6398,11 +6398,11 @@ e_free:
1044 + return ret;
1045 + }
1046 +
1047 +-static int get_num_contig_pages(int idx, struct page **inpages,
1048 +- unsigned long npages)
1049 ++static unsigned long get_num_contig_pages(unsigned long idx,
1050 ++ struct page **inpages, unsigned long npages)
1051 + {
1052 + unsigned long paddr, next_paddr;
1053 +- int i = idx + 1, pages = 1;
1054 ++ unsigned long i = idx + 1, pages = 1;
1055 +
1056 + /* find the number of contiguous pages starting from idx */
1057 + paddr = __sme_page_pa(inpages[idx]);
1058 +@@ -6421,12 +6421,12 @@ static int get_num_contig_pages(int idx, struct page **inpages,
1059 +
1060 + static int sev_launch_update_data(struct kvm *kvm, struct kvm_sev_cmd *argp)
1061 + {
1062 +- unsigned long vaddr, vaddr_end, next_vaddr, npages, size;
1063 ++ unsigned long vaddr, vaddr_end, next_vaddr, npages, pages, size, i;
1064 + struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
1065 + struct kvm_sev_launch_update_data params;
1066 + struct sev_data_launch_update_data *data;
1067 + struct page **inpages;
1068 +- int i, ret, pages;
1069 ++ int ret;
1070 +
1071 + if (!sev_guest(kvm))
1072 + return -ENOTTY;
1073 +diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
1074 +index f99f59625da5..6b6bcafd1d2c 100644
1075 +--- a/arch/x86/kvm/vmx.c
1076 ++++ b/arch/x86/kvm/vmx.c
1077 +@@ -11582,6 +11582,17 @@ static int nested_vmx_check_tpr_shadow_controls(struct kvm_vcpu *vcpu,
1078 + return 0;
1079 + }
1080 +
1081 ++static inline void enable_x2apic_msr_intercepts(unsigned long *msr_bitmap) {
1082 ++ int msr;
1083 ++
1084 ++ for (msr = 0x800; msr <= 0x8ff; msr += BITS_PER_LONG) {
1085 ++ unsigned word = msr / BITS_PER_LONG;
1086 ++
1087 ++ msr_bitmap[word] = ~0;
1088 ++ msr_bitmap[word + (0x800 / sizeof(long))] = ~0;
1089 ++ }
1090 ++}
1091 ++
1092 + /*
1093 + * Merge L0's and L1's MSR bitmap, return false to indicate that
1094 + * we do not use the hardware.
1095 +@@ -11623,39 +11634,44 @@ static inline bool nested_vmx_prepare_msr_bitmap(struct kvm_vcpu *vcpu,
1096 + return false;
1097 +
1098 + msr_bitmap_l1 = (unsigned long *)kmap(page);
1099 +- if (nested_cpu_has_apic_reg_virt(vmcs12)) {
1100 +- /*
1101 +- * L0 need not intercept reads for MSRs between 0x800 and 0x8ff, it
1102 +- * just lets the processor take the value from the virtual-APIC page;
1103 +- * take those 256 bits directly from the L1 bitmap.
1104 +- */
1105 +- for (msr = 0x800; msr <= 0x8ff; msr += BITS_PER_LONG) {
1106 +- unsigned word = msr / BITS_PER_LONG;
1107 +- msr_bitmap_l0[word] = msr_bitmap_l1[word];
1108 +- msr_bitmap_l0[word + (0x800 / sizeof(long))] = ~0;
1109 +- }
1110 +- } else {
1111 +- for (msr = 0x800; msr <= 0x8ff; msr += BITS_PER_LONG) {
1112 +- unsigned word = msr / BITS_PER_LONG;
1113 +- msr_bitmap_l0[word] = ~0;
1114 +- msr_bitmap_l0[word + (0x800 / sizeof(long))] = ~0;
1115 +- }
1116 +- }
1117 +
1118 +- nested_vmx_disable_intercept_for_msr(
1119 +- msr_bitmap_l1, msr_bitmap_l0,
1120 +- X2APIC_MSR(APIC_TASKPRI),
1121 +- MSR_TYPE_W);
1122 ++ /*
1123 ++ * To keep the control flow simple, pay eight 8-byte writes (sixteen
1124 ++ * 4-byte writes on 32-bit systems) up front to enable intercepts for
1125 ++ * the x2APIC MSR range and selectively disable them below.
1126 ++ */
1127 ++ enable_x2apic_msr_intercepts(msr_bitmap_l0);
1128 ++
1129 ++ if (nested_cpu_has_virt_x2apic_mode(vmcs12)) {
1130 ++ if (nested_cpu_has_apic_reg_virt(vmcs12)) {
1131 ++ /*
1132 ++ * L0 need not intercept reads for MSRs between 0x800
1133 ++ * and 0x8ff, it just lets the processor take the value
1134 ++ * from the virtual-APIC page; take those 256 bits
1135 ++ * directly from the L1 bitmap.
1136 ++ */
1137 ++ for (msr = 0x800; msr <= 0x8ff; msr += BITS_PER_LONG) {
1138 ++ unsigned word = msr / BITS_PER_LONG;
1139 ++
1140 ++ msr_bitmap_l0[word] = msr_bitmap_l1[word];
1141 ++ }
1142 ++ }
1143 +
1144 +- if (nested_cpu_has_vid(vmcs12)) {
1145 +- nested_vmx_disable_intercept_for_msr(
1146 +- msr_bitmap_l1, msr_bitmap_l0,
1147 +- X2APIC_MSR(APIC_EOI),
1148 +- MSR_TYPE_W);
1149 + nested_vmx_disable_intercept_for_msr(
1150 + msr_bitmap_l1, msr_bitmap_l0,
1151 +- X2APIC_MSR(APIC_SELF_IPI),
1152 +- MSR_TYPE_W);
1153 ++ X2APIC_MSR(APIC_TASKPRI),
1154 ++ MSR_TYPE_R | MSR_TYPE_W);
1155 ++
1156 ++ if (nested_cpu_has_vid(vmcs12)) {
1157 ++ nested_vmx_disable_intercept_for_msr(
1158 ++ msr_bitmap_l1, msr_bitmap_l0,
1159 ++ X2APIC_MSR(APIC_EOI),
1160 ++ MSR_TYPE_W);
1161 ++ nested_vmx_disable_intercept_for_msr(
1162 ++ msr_bitmap_l1, msr_bitmap_l0,
1163 ++ X2APIC_MSR(APIC_SELF_IPI),
1164 ++ MSR_TYPE_W);
1165 ++ }
1166 + }
1167 +
1168 + if (spec_ctrl)
1169 +@@ -12836,11 +12852,15 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch)
1170 + nested_cache_shadow_vmcs12(vcpu, vmcs12);
1171 +
1172 + /*
1173 +- * If we're entering a halted L2 vcpu and the L2 vcpu won't be woken
1174 +- * by event injection, halt vcpu.
1175 ++ * If we're entering a halted L2 vcpu and the L2 vcpu won't be
1176 ++ * awakened by event injection or by an NMI-window VM-exit or
1177 ++ * by an interrupt-window VM-exit, halt the vcpu.
1178 + */
1179 + if ((vmcs12->guest_activity_state == GUEST_ACTIVITY_HLT) &&
1180 +- !(vmcs12->vm_entry_intr_info_field & INTR_INFO_VALID_MASK)) {
1181 ++ !(vmcs12->vm_entry_intr_info_field & INTR_INFO_VALID_MASK) &&
1182 ++ !(vmcs12->cpu_based_vm_exec_control & CPU_BASED_VIRTUAL_NMI_PENDING) &&
1183 ++ !((vmcs12->cpu_based_vm_exec_control & CPU_BASED_VIRTUAL_INTR_PENDING) &&
1184 ++ (vmcs12->guest_rflags & X86_EFLAGS_IF))) {
1185 + vmx->nested.nested_run_pending = 0;
1186 + return kvm_vcpu_halt(vcpu);
1187 + }
1188 +diff --git a/arch/xtensa/kernel/stacktrace.c b/arch/xtensa/kernel/stacktrace.c
1189 +index 0df4080fa20f..a94da7dd3eae 100644
1190 +--- a/arch/xtensa/kernel/stacktrace.c
1191 ++++ b/arch/xtensa/kernel/stacktrace.c
1192 +@@ -253,10 +253,14 @@ static int return_address_cb(struct stackframe *frame, void *data)
1193 + return 1;
1194 + }
1195 +
1196 ++/*
1197 ++ * level == 0 is for the return address from the caller of this function,
1198 ++ * not from this function itself.
1199 ++ */
1200 + unsigned long return_address(unsigned level)
1201 + {
1202 + struct return_addr_data r = {
1203 +- .skip = level + 1,
1204 ++ .skip = level,
1205 + };
1206 + walk_stackframe(stack_pointer(NULL), return_address_cb, &r);
1207 + return r.addr;
1208 +diff --git a/block/bio.c b/block/bio.c
1209 +index 55a5386fd431..3d757055305f 100644
1210 +--- a/block/bio.c
1211 ++++ b/block/bio.c
1212 +@@ -1240,8 +1240,11 @@ struct bio *bio_copy_user_iov(struct request_queue *q,
1213 + }
1214 + }
1215 +
1216 +- if (bio_add_pc_page(q, bio, page, bytes, offset) < bytes)
1217 ++ if (bio_add_pc_page(q, bio, page, bytes, offset) < bytes) {
1218 ++ if (!map_data)
1219 ++ __free_page(page);
1220 + break;
1221 ++ }
1222 +
1223 + len -= bytes;
1224 + offset = 0;
1225 +diff --git a/drivers/acpi/acpica/dsopcode.c b/drivers/acpi/acpica/dsopcode.c
1226 +index 78f9de260d5f..2f4641e5ecde 100644
1227 +--- a/drivers/acpi/acpica/dsopcode.c
1228 ++++ b/drivers/acpi/acpica/dsopcode.c
1229 +@@ -523,6 +523,10 @@ acpi_ds_eval_table_region_operands(struct acpi_walk_state *walk_state,
1230 + ACPI_FORMAT_UINT64(obj_desc->region.address),
1231 + obj_desc->region.length));
1232 +
1233 ++ status = acpi_ut_add_address_range(obj_desc->region.space_id,
1234 ++ obj_desc->region.address,
1235 ++ obj_desc->region.length, node);
1236 ++
1237 + /* Now the address and length are valid for this opregion */
1238 +
1239 + obj_desc->region.flags |= AOPOBJ_DATA_VALID;
1240 +diff --git a/drivers/acpi/acpica/evgpe.c b/drivers/acpi/acpica/evgpe.c
1241 +index e10fec99a182..4424997ecf30 100644
1242 +--- a/drivers/acpi/acpica/evgpe.c
1243 ++++ b/drivers/acpi/acpica/evgpe.c
1244 +@@ -81,8 +81,12 @@ acpi_status acpi_ev_enable_gpe(struct acpi_gpe_event_info *gpe_event_info)
1245 +
1246 + ACPI_FUNCTION_TRACE(ev_enable_gpe);
1247 +
1248 +- /* Enable the requested GPE */
1249 ++ /* Clear the GPE status */
1250 ++ status = acpi_hw_clear_gpe(gpe_event_info);
1251 ++ if (ACPI_FAILURE(status))
1252 ++ return_ACPI_STATUS(status);
1253 +
1254 ++ /* Enable the requested GPE */
1255 + status = acpi_hw_low_set_gpe(gpe_event_info, ACPI_GPE_ENABLE);
1256 + return_ACPI_STATUS(status);
1257 + }
1258 +diff --git a/drivers/acpi/acpica/nsobject.c b/drivers/acpi/acpica/nsobject.c
1259 +index 8638f43cfc3d..79d86da1c892 100644
1260 +--- a/drivers/acpi/acpica/nsobject.c
1261 ++++ b/drivers/acpi/acpica/nsobject.c
1262 +@@ -186,6 +186,10 @@ void acpi_ns_detach_object(struct acpi_namespace_node *node)
1263 + }
1264 + }
1265 +
1266 ++ if (obj_desc->common.type == ACPI_TYPE_REGION) {
1267 ++ acpi_ut_remove_address_range(obj_desc->region.space_id, node);
1268 ++ }
1269 ++
1270 + /* Clear the Node entry in all cases */
1271 +
1272 + node->object = NULL;
1273 +diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
1274 +index 40728491f37b..1df9cb8e659e 100644
1275 +--- a/drivers/char/Kconfig
1276 ++++ b/drivers/char/Kconfig
1277 +@@ -343,7 +343,7 @@ config XILINX_HWICAP
1278 +
1279 + config R3964
1280 + tristate "Siemens R3964 line discipline"
1281 +- depends on TTY
1282 ++ depends on TTY && BROKEN
1283 + ---help---
1284 + This driver allows synchronous communication with devices using the
1285 + Siemens R3964 packet protocol. Unless you are dealing with special
1286 +diff --git a/drivers/clk/meson/meson-aoclk.c b/drivers/clk/meson/meson-aoclk.c
1287 +index 258c8d259ea1..f965845917e3 100644
1288 +--- a/drivers/clk/meson/meson-aoclk.c
1289 ++++ b/drivers/clk/meson/meson-aoclk.c
1290 +@@ -65,20 +65,15 @@ int meson_aoclkc_probe(struct platform_device *pdev)
1291 + return ret;
1292 + }
1293 +
1294 +- /* Populate regmap */
1295 +- for (clkid = 0; clkid < data->num_clks; clkid++)
1296 ++ /*
1297 ++ * Populate regmap and register all clks
1298 ++ */
1299 ++ for (clkid = 0; clkid < data->num_clks; clkid++) {
1300 + data->clks[clkid]->map = regmap;
1301 +
1302 +- /* Register all clks */
1303 +- for (clkid = 0; clkid < data->hw_data->num; clkid++) {
1304 +- if (!data->hw_data->hws[clkid])
1305 +- continue;
1306 +-
1307 + ret = devm_clk_hw_register(dev, data->hw_data->hws[clkid]);
1308 +- if (ret) {
1309 +- dev_err(dev, "Clock registration failed\n");
1310 ++ if (ret)
1311 + return ret;
1312 +- }
1313 + }
1314 +
1315 + return devm_of_clk_add_hw_provider(dev, of_clk_hw_onecell_get,
1316 +diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c
1317 +index 00aad8164dec..542f31ce108f 100644
1318 +--- a/drivers/gpu/drm/i915/gvt/gtt.c
1319 ++++ b/drivers/gpu/drm/i915/gvt/gtt.c
1320 +@@ -1940,7 +1940,7 @@ void _intel_vgpu_mm_release(struct kref *mm_ref)
1321 + */
1322 + void intel_vgpu_unpin_mm(struct intel_vgpu_mm *mm)
1323 + {
1324 +- atomic_dec(&mm->pincount);
1325 ++ atomic_dec_if_positive(&mm->pincount);
1326 + }
1327 +
1328 + /**
1329 +diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c
1330 +index 43aa058e29fc..663a7c9ca3d3 100644
1331 +--- a/drivers/gpu/drm/i915/gvt/scheduler.c
1332 ++++ b/drivers/gpu/drm/i915/gvt/scheduler.c
1333 +@@ -1389,8 +1389,9 @@ intel_vgpu_create_workload(struct intel_vgpu *vgpu, int ring_id,
1334 + intel_runtime_pm_put(dev_priv);
1335 + }
1336 +
1337 +- if (ret && (vgpu_is_vm_unhealthy(ret))) {
1338 +- enter_failsafe_mode(vgpu, GVT_FAILSAFE_GUEST_ERR);
1339 ++ if (ret) {
1340 ++ if (vgpu_is_vm_unhealthy(ret))
1341 ++ enter_failsafe_mode(vgpu, GVT_FAILSAFE_GUEST_ERR);
1342 + intel_vgpu_destroy_workload(workload);
1343 + return ERR_PTR(ret);
1344 + }
1345 +diff --git a/drivers/gpu/drm/udl/udl_drv.c b/drivers/gpu/drm/udl/udl_drv.c
1346 +index 9ef515df724b..54e767bd5ddb 100644
1347 +--- a/drivers/gpu/drm/udl/udl_drv.c
1348 ++++ b/drivers/gpu/drm/udl/udl_drv.c
1349 +@@ -51,6 +51,7 @@ static struct drm_driver driver = {
1350 + .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME,
1351 + .load = udl_driver_load,
1352 + .unload = udl_driver_unload,
1353 ++ .release = udl_driver_release,
1354 +
1355 + /* gem hooks */
1356 + .gem_free_object_unlocked = udl_gem_free_object,
1357 +diff --git a/drivers/gpu/drm/udl/udl_drv.h b/drivers/gpu/drm/udl/udl_drv.h
1358 +index e9e9b1ff678e..4ae67d882eae 100644
1359 +--- a/drivers/gpu/drm/udl/udl_drv.h
1360 ++++ b/drivers/gpu/drm/udl/udl_drv.h
1361 +@@ -104,6 +104,7 @@ void udl_urb_completion(struct urb *urb);
1362 +
1363 + int udl_driver_load(struct drm_device *dev, unsigned long flags);
1364 + void udl_driver_unload(struct drm_device *dev);
1365 ++void udl_driver_release(struct drm_device *dev);
1366 +
1367 + int udl_fbdev_init(struct drm_device *dev);
1368 + void udl_fbdev_cleanup(struct drm_device *dev);
1369 +diff --git a/drivers/gpu/drm/udl/udl_main.c b/drivers/gpu/drm/udl/udl_main.c
1370 +index 1b014d92855b..19055dda3140 100644
1371 +--- a/drivers/gpu/drm/udl/udl_main.c
1372 ++++ b/drivers/gpu/drm/udl/udl_main.c
1373 +@@ -378,6 +378,12 @@ void udl_driver_unload(struct drm_device *dev)
1374 + udl_free_urb_list(dev);
1375 +
1376 + udl_fbdev_cleanup(dev);
1377 +- udl_modeset_cleanup(dev);
1378 + kfree(udl);
1379 + }
1380 ++
1381 ++void udl_driver_release(struct drm_device *dev)
1382 ++{
1383 ++ udl_modeset_cleanup(dev);
1384 ++ drm_dev_fini(dev);
1385 ++ kfree(dev);
1386 ++}
1387 +diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig
1388 +index 81da17a42dc9..c7adaca2ab01 100644
1389 +--- a/drivers/hwmon/Kconfig
1390 ++++ b/drivers/hwmon/Kconfig
1391 +@@ -1755,6 +1755,7 @@ config SENSORS_VT8231
1392 + config SENSORS_W83773G
1393 + tristate "Nuvoton W83773G"
1394 + depends on I2C
1395 ++ select REGMAP_I2C
1396 + help
1397 + If you say yes here you get support for the Nuvoton W83773G hardware
1398 + monitoring chip.
1399 +diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c
1400 +index 96d5fb3f6199..bc6ef2303f0b 100644
1401 +--- a/drivers/md/dm-integrity.c
1402 ++++ b/drivers/md/dm-integrity.c
1403 +@@ -908,7 +908,7 @@ static void copy_from_journal(struct dm_integrity_c *ic, unsigned section, unsig
1404 + static bool ranges_overlap(struct dm_integrity_range *range1, struct dm_integrity_range *range2)
1405 + {
1406 + return range1->logical_sector < range2->logical_sector + range2->n_sectors &&
1407 +- range2->logical_sector + range2->n_sectors > range2->logical_sector;
1408 ++ range1->logical_sector + range1->n_sectors > range2->logical_sector;
1409 + }
1410 +
1411 + static bool add_new_range(struct dm_integrity_c *ic, struct dm_integrity_range *new_range, bool check_waiting)
1412 +@@ -954,8 +954,6 @@ static void remove_range_unlocked(struct dm_integrity_c *ic, struct dm_integrity
1413 + struct dm_integrity_range *last_range =
1414 + list_first_entry(&ic->wait_list, struct dm_integrity_range, wait_entry);
1415 + struct task_struct *last_range_task;
1416 +- if (!ranges_overlap(range, last_range))
1417 +- break;
1418 + last_range_task = last_range->task;
1419 + list_del(&last_range->wait_entry);
1420 + if (!add_new_range(ic, last_range, false)) {
1421 +@@ -3174,7 +3172,7 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned argc, char **argv)
1422 + journal_watermark = val;
1423 + else if (sscanf(opt_string, "commit_time:%u%c", &val, &dummy) == 1)
1424 + sync_msec = val;
1425 +- else if (!memcmp(opt_string, "meta_device:", strlen("meta_device:"))) {
1426 ++ else if (!strncmp(opt_string, "meta_device:", strlen("meta_device:"))) {
1427 + if (ic->meta_dev) {
1428 + dm_put_device(ti, ic->meta_dev);
1429 + ic->meta_dev = NULL;
1430 +@@ -3193,17 +3191,17 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned argc, char **argv)
1431 + goto bad;
1432 + }
1433 + ic->sectors_per_block = val >> SECTOR_SHIFT;
1434 +- } else if (!memcmp(opt_string, "internal_hash:", strlen("internal_hash:"))) {
1435 ++ } else if (!strncmp(opt_string, "internal_hash:", strlen("internal_hash:"))) {
1436 + r = get_alg_and_key(opt_string, &ic->internal_hash_alg, &ti->error,
1437 + "Invalid internal_hash argument");
1438 + if (r)
1439 + goto bad;
1440 +- } else if (!memcmp(opt_string, "journal_crypt:", strlen("journal_crypt:"))) {
1441 ++ } else if (!strncmp(opt_string, "journal_crypt:", strlen("journal_crypt:"))) {
1442 + r = get_alg_and_key(opt_string, &ic->journal_crypt_alg, &ti->error,
1443 + "Invalid journal_crypt argument");
1444 + if (r)
1445 + goto bad;
1446 +- } else if (!memcmp(opt_string, "journal_mac:", strlen("journal_mac:"))) {
1447 ++ } else if (!strncmp(opt_string, "journal_mac:", strlen("journal_mac:"))) {
1448 + r = get_alg_and_key(opt_string, &ic->journal_mac_alg, &ti->error,
1449 + "Invalid journal_mac argument");
1450 + if (r)
1451 +diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
1452 +index 3d0e2c198f06..c7fe4789c40e 100644
1453 +--- a/drivers/md/dm-table.c
1454 ++++ b/drivers/md/dm-table.c
1455 +@@ -1872,6 +1872,36 @@ static bool dm_table_supports_secure_erase(struct dm_table *t)
1456 + return true;
1457 + }
1458 +
1459 ++static int device_requires_stable_pages(struct dm_target *ti,
1460 ++ struct dm_dev *dev, sector_t start,
1461 ++ sector_t len, void *data)
1462 ++{
1463 ++ struct request_queue *q = bdev_get_queue(dev->bdev);
1464 ++
1465 ++ return q && bdi_cap_stable_pages_required(q->backing_dev_info);
1466 ++}
1467 ++
1468 ++/*
1469 ++ * If any underlying device requires stable pages, a table must require
1470 ++ * them as well. Only targets that support iterate_devices are considered:
1471 ++ * don't want error, zero, etc to require stable pages.
1472 ++ */
1473 ++static bool dm_table_requires_stable_pages(struct dm_table *t)
1474 ++{
1475 ++ struct dm_target *ti;
1476 ++ unsigned i;
1477 ++
1478 ++ for (i = 0; i < dm_table_get_num_targets(t); i++) {
1479 ++ ti = dm_table_get_target(t, i);
1480 ++
1481 ++ if (ti->type->iterate_devices &&
1482 ++ ti->type->iterate_devices(ti, device_requires_stable_pages, NULL))
1483 ++ return true;
1484 ++ }
1485 ++
1486 ++ return false;
1487 ++}
1488 ++
1489 + void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
1490 + struct queue_limits *limits)
1491 + {
1492 +@@ -1929,6 +1959,15 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
1493 +
1494 + dm_table_verify_integrity(t);
1495 +
1496 ++ /*
1497 ++ * Some devices don't use blk_integrity but still want stable pages
1498 ++ * because they do their own checksumming.
1499 ++ */
1500 ++ if (dm_table_requires_stable_pages(t))
1501 ++ q->backing_dev_info->capabilities |= BDI_CAP_STABLE_WRITES;
1502 ++ else
1503 ++ q->backing_dev_info->capabilities &= ~BDI_CAP_STABLE_WRITES;
1504 ++
1505 + /*
1506 + * Determine whether or not this queue's I/O timings contribute
1507 + * to the entropy pool, Only request-based targets use this.
1508 +diff --git a/drivers/md/dm.c b/drivers/md/dm.c
1509 +index 07d2949a8746..42768fe92b41 100644
1510 +--- a/drivers/md/dm.c
1511 ++++ b/drivers/md/dm.c
1512 +@@ -1007,15 +1007,7 @@ int dm_set_target_max_io_len(struct dm_target *ti, sector_t len)
1513 + return -EINVAL;
1514 + }
1515 +
1516 +- /*
1517 +- * BIO based queue uses its own splitting. When multipage bvecs
1518 +- * is switched on, size of the incoming bio may be too big to
1519 +- * be handled in some targets, such as crypt.
1520 +- *
1521 +- * When these targets are ready for the big bio, we can remove
1522 +- * the limit.
1523 +- */
1524 +- ti->max_io_len = min_t(uint32_t, len, BIO_MAX_PAGES * PAGE_SIZE);
1525 ++ ti->max_io_len = (uint32_t) len;
1526 +
1527 + return 0;
1528 + }
1529 +diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
1530 +index 0bd93bb7d1a2..581ad0a17d0c 100644
1531 +--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
1532 ++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
1533 +@@ -1092,6 +1092,8 @@ static void bnxt_tpa_start(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
1534 + tpa_info = &rxr->rx_tpa[agg_id];
1535 +
1536 + if (unlikely(cons != rxr->rx_next_cons)) {
1537 ++ netdev_warn(bp->dev, "TPA cons %x != expected cons %x\n",
1538 ++ cons, rxr->rx_next_cons);
1539 + bnxt_sched_reset(bp, rxr);
1540 + return;
1541 + }
1542 +@@ -1544,15 +1546,17 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_napi *bnapi, u32 *raw_cons,
1543 + }
1544 +
1545 + cons = rxcmp->rx_cmp_opaque;
1546 +- rx_buf = &rxr->rx_buf_ring[cons];
1547 +- data = rx_buf->data;
1548 +- data_ptr = rx_buf->data_ptr;
1549 + if (unlikely(cons != rxr->rx_next_cons)) {
1550 + int rc1 = bnxt_discard_rx(bp, bnapi, raw_cons, rxcmp);
1551 +
1552 ++ netdev_warn(bp->dev, "RX cons %x != expected cons %x\n",
1553 ++ cons, rxr->rx_next_cons);
1554 + bnxt_sched_reset(bp, rxr);
1555 + return rc1;
1556 + }
1557 ++ rx_buf = &rxr->rx_buf_ring[cons];
1558 ++ data = rx_buf->data;
1559 ++ data_ptr = rx_buf->data_ptr;
1560 + prefetch(data_ptr);
1561 +
1562 + misc = le32_to_cpu(rxcmp->rx_cmp_misc_v1);
1563 +@@ -1569,11 +1573,17 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_napi *bnapi, u32 *raw_cons,
1564 +
1565 + rx_buf->data = NULL;
1566 + if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L2_ERRORS) {
1567 ++ u32 rx_err = le32_to_cpu(rxcmp1->rx_cmp_cfa_code_errors_v2);
1568 ++
1569 + bnxt_reuse_rx_data(rxr, cons, data);
1570 + if (agg_bufs)
1571 + bnxt_reuse_rx_agg_bufs(bnapi, cp_cons, agg_bufs);
1572 +
1573 + rc = -EIO;
1574 ++ if (rx_err & RX_CMPL_ERRORS_BUFFER_ERROR_MASK) {
1575 ++ netdev_warn(bp->dev, "RX buffer error %x\n", rx_err);
1576 ++ bnxt_sched_reset(bp, rxr);
1577 ++ }
1578 + goto next_rx;
1579 + }
1580 +
1581 +diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
1582 +index c8704b1690eb..a475f36ddf8c 100644
1583 +--- a/drivers/net/ethernet/ibm/ibmvnic.c
1584 ++++ b/drivers/net/ethernet/ibm/ibmvnic.c
1585 +@@ -1888,6 +1888,7 @@ static int do_hard_reset(struct ibmvnic_adapter *adapter,
1586 + */
1587 + adapter->state = VNIC_PROBED;
1588 +
1589 ++ reinit_completion(&adapter->init_done);
1590 + rc = init_crq_queue(adapter);
1591 + if (rc) {
1592 + netdev_err(adapter->netdev,
1593 +@@ -4569,7 +4570,7 @@ static int ibmvnic_reset_init(struct ibmvnic_adapter *adapter)
1594 + old_num_rx_queues = adapter->req_rx_queues;
1595 + old_num_tx_queues = adapter->req_tx_queues;
1596 +
1597 +- init_completion(&adapter->init_done);
1598 ++ reinit_completion(&adapter->init_done);
1599 + adapter->init_done_rc = 0;
1600 + ibmvnic_send_crq_init(adapter);
1601 + if (!wait_for_completion_timeout(&adapter->init_done, timeout)) {
1602 +@@ -4624,7 +4625,6 @@ static int ibmvnic_init(struct ibmvnic_adapter *adapter)
1603 +
1604 + adapter->from_passive_init = false;
1605 +
1606 +- init_completion(&adapter->init_done);
1607 + adapter->init_done_rc = 0;
1608 + ibmvnic_send_crq_init(adapter);
1609 + if (!wait_for_completion_timeout(&adapter->init_done, timeout)) {
1610 +@@ -4703,6 +4703,7 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
1611 + INIT_WORK(&adapter->ibmvnic_reset, __ibmvnic_reset);
1612 + INIT_LIST_HEAD(&adapter->rwi_list);
1613 + spin_lock_init(&adapter->rwi_lock);
1614 ++ init_completion(&adapter->init_done);
1615 + adapter->resetting = false;
1616 +
1617 + adapter->mac_change_pending = false;
1618 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c b/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c
1619 +index eac245a93f91..4ab0d030b544 100644
1620 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c
1621 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c
1622 +@@ -122,7 +122,9 @@ out:
1623 + return err;
1624 + }
1625 +
1626 +-/* xoff = ((301+2.16 * len [m]) * speed [Gbps] + 2.72 MTU [B]) */
1627 ++/* xoff = ((301+2.16 * len [m]) * speed [Gbps] + 2.72 MTU [B])
1628 ++ * minimum speed value is 40Gbps
1629 ++ */
1630 + static u32 calculate_xoff(struct mlx5e_priv *priv, unsigned int mtu)
1631 + {
1632 + u32 speed;
1633 +@@ -130,10 +132,9 @@ static u32 calculate_xoff(struct mlx5e_priv *priv, unsigned int mtu)
1634 + int err;
1635 +
1636 + err = mlx5e_port_linkspeed(priv->mdev, &speed);
1637 +- if (err) {
1638 +- mlx5_core_warn(priv->mdev, "cannot get port speed\n");
1639 +- return 0;
1640 +- }
1641 ++ if (err)
1642 ++ speed = SPEED_40000;
1643 ++ speed = max_t(u32, speed, SPEED_40000);
1644 +
1645 + xoff = (301 + 216 * priv->dcbx.cable_len / 100) * speed / 1000 + 272 * mtu / 100;
1646 +
1647 +@@ -142,7 +143,7 @@ static u32 calculate_xoff(struct mlx5e_priv *priv, unsigned int mtu)
1648 + }
1649 +
1650 + static int update_xoff_threshold(struct mlx5e_port_buffer *port_buffer,
1651 +- u32 xoff, unsigned int mtu)
1652 ++ u32 xoff, unsigned int max_mtu)
1653 + {
1654 + int i;
1655 +
1656 +@@ -154,11 +155,12 @@ static int update_xoff_threshold(struct mlx5e_port_buffer *port_buffer,
1657 + }
1658 +
1659 + if (port_buffer->buffer[i].size <
1660 +- (xoff + mtu + (1 << MLX5E_BUFFER_CELL_SHIFT)))
1661 ++ (xoff + max_mtu + (1 << MLX5E_BUFFER_CELL_SHIFT)))
1662 + return -ENOMEM;
1663 +
1664 + port_buffer->buffer[i].xoff = port_buffer->buffer[i].size - xoff;
1665 +- port_buffer->buffer[i].xon = port_buffer->buffer[i].xoff - mtu;
1666 ++ port_buffer->buffer[i].xon =
1667 ++ port_buffer->buffer[i].xoff - max_mtu;
1668 + }
1669 +
1670 + return 0;
1671 +@@ -166,7 +168,7 @@ static int update_xoff_threshold(struct mlx5e_port_buffer *port_buffer,
1672 +
1673 + /**
1674 + * update_buffer_lossy()
1675 +- * mtu: device's MTU
1676 ++ * max_mtu: netdev's max_mtu
1677 + * pfc_en: <input> current pfc configuration
1678 + * buffer: <input> current prio to buffer mapping
1679 + * xoff: <input> xoff value
1680 +@@ -183,7 +185,7 @@ static int update_xoff_threshold(struct mlx5e_port_buffer *port_buffer,
1681 + * Return 0 if no error.
1682 + * Set change to true if buffer configuration is modified.
1683 + */
1684 +-static int update_buffer_lossy(unsigned int mtu,
1685 ++static int update_buffer_lossy(unsigned int max_mtu,
1686 + u8 pfc_en, u8 *buffer, u32 xoff,
1687 + struct mlx5e_port_buffer *port_buffer,
1688 + bool *change)
1689 +@@ -220,7 +222,7 @@ static int update_buffer_lossy(unsigned int mtu,
1690 + }
1691 +
1692 + if (changed) {
1693 +- err = update_xoff_threshold(port_buffer, xoff, mtu);
1694 ++ err = update_xoff_threshold(port_buffer, xoff, max_mtu);
1695 + if (err)
1696 + return err;
1697 +
1698 +@@ -230,6 +232,7 @@ static int update_buffer_lossy(unsigned int mtu,
1699 + return 0;
1700 + }
1701 +
1702 ++#define MINIMUM_MAX_MTU 9216
1703 + int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv,
1704 + u32 change, unsigned int mtu,
1705 + struct ieee_pfc *pfc,
1706 +@@ -241,12 +244,14 @@ int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv,
1707 + bool update_prio2buffer = false;
1708 + u8 buffer[MLX5E_MAX_PRIORITY];
1709 + bool update_buffer = false;
1710 ++ unsigned int max_mtu;
1711 + u32 total_used = 0;
1712 + u8 curr_pfc_en;
1713 + int err;
1714 + int i;
1715 +
1716 + mlx5e_dbg(HW, priv, "%s: change=%x\n", __func__, change);
1717 ++ max_mtu = max_t(unsigned int, priv->netdev->max_mtu, MINIMUM_MAX_MTU);
1718 +
1719 + err = mlx5e_port_query_buffer(priv, &port_buffer);
1720 + if (err)
1721 +@@ -254,7 +259,7 @@ int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv,
1722 +
1723 + if (change & MLX5E_PORT_BUFFER_CABLE_LEN) {
1724 + update_buffer = true;
1725 +- err = update_xoff_threshold(&port_buffer, xoff, mtu);
1726 ++ err = update_xoff_threshold(&port_buffer, xoff, max_mtu);
1727 + if (err)
1728 + return err;
1729 + }
1730 +@@ -264,7 +269,7 @@ int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv,
1731 + if (err)
1732 + return err;
1733 +
1734 +- err = update_buffer_lossy(mtu, pfc->pfc_en, buffer, xoff,
1735 ++ err = update_buffer_lossy(max_mtu, pfc->pfc_en, buffer, xoff,
1736 + &port_buffer, &update_buffer);
1737 + if (err)
1738 + return err;
1739 +@@ -276,8 +281,8 @@ int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv,
1740 + if (err)
1741 + return err;
1742 +
1743 +- err = update_buffer_lossy(mtu, curr_pfc_en, prio2buffer, xoff,
1744 +- &port_buffer, &update_buffer);
1745 ++ err = update_buffer_lossy(max_mtu, curr_pfc_en, prio2buffer,
1746 ++ xoff, &port_buffer, &update_buffer);
1747 + if (err)
1748 + return err;
1749 + }
1750 +@@ -301,7 +306,7 @@ int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv,
1751 + return -EINVAL;
1752 +
1753 + update_buffer = true;
1754 +- err = update_xoff_threshold(&port_buffer, xoff, mtu);
1755 ++ err = update_xoff_threshold(&port_buffer, xoff, max_mtu);
1756 + if (err)
1757 + return err;
1758 + }
1759 +@@ -309,7 +314,7 @@ int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv,
1760 + /* Need to update buffer configuration if xoff value is changed */
1761 + if (!update_buffer && xoff != priv->dcbx.xoff) {
1762 + update_buffer = true;
1763 +- err = update_xoff_threshold(&port_buffer, xoff, mtu);
1764 ++ err = update_xoff_threshold(&port_buffer, xoff, max_mtu);
1765 + if (err)
1766 + return err;
1767 + }
1768 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_common.c b/drivers/net/ethernet/mellanox/mlx5/core/en_common.c
1769 +index db3278cc052b..124e4567a4ee 100644
1770 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_common.c
1771 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_common.c
1772 +@@ -45,7 +45,9 @@ int mlx5e_create_tir(struct mlx5_core_dev *mdev,
1773 + if (err)
1774 + return err;
1775 +
1776 ++ mutex_lock(&mdev->mlx5e_res.td.list_lock);
1777 + list_add(&tir->list, &mdev->mlx5e_res.td.tirs_list);
1778 ++ mutex_unlock(&mdev->mlx5e_res.td.list_lock);
1779 +
1780 + return 0;
1781 + }
1782 +@@ -53,8 +55,10 @@ int mlx5e_create_tir(struct mlx5_core_dev *mdev,
1783 + void mlx5e_destroy_tir(struct mlx5_core_dev *mdev,
1784 + struct mlx5e_tir *tir)
1785 + {
1786 ++ mutex_lock(&mdev->mlx5e_res.td.list_lock);
1787 + mlx5_core_destroy_tir(mdev, tir->tirn);
1788 + list_del(&tir->list);
1789 ++ mutex_unlock(&mdev->mlx5e_res.td.list_lock);
1790 + }
1791 +
1792 + static int mlx5e_create_mkey(struct mlx5_core_dev *mdev, u32 pdn,
1793 +@@ -114,6 +118,7 @@ int mlx5e_create_mdev_resources(struct mlx5_core_dev *mdev)
1794 + }
1795 +
1796 + INIT_LIST_HEAD(&mdev->mlx5e_res.td.tirs_list);
1797 ++ mutex_init(&mdev->mlx5e_res.td.list_lock);
1798 +
1799 + return 0;
1800 +
1801 +@@ -141,15 +146,17 @@ int mlx5e_refresh_tirs(struct mlx5e_priv *priv, bool enable_uc_lb)
1802 + {
1803 + struct mlx5_core_dev *mdev = priv->mdev;
1804 + struct mlx5e_tir *tir;
1805 +- int err = -ENOMEM;
1806 ++ int err = 0;
1807 + u32 tirn = 0;
1808 + int inlen;
1809 + void *in;
1810 +
1811 + inlen = MLX5_ST_SZ_BYTES(modify_tir_in);
1812 + in = kvzalloc(inlen, GFP_KERNEL);
1813 +- if (!in)
1814 ++ if (!in) {
1815 ++ err = -ENOMEM;
1816 + goto out;
1817 ++ }
1818 +
1819 + if (enable_uc_lb)
1820 + MLX5_SET(modify_tir_in, in, ctx.self_lb_block,
1821 +@@ -157,6 +164,7 @@ int mlx5e_refresh_tirs(struct mlx5e_priv *priv, bool enable_uc_lb)
1822 +
1823 + MLX5_SET(modify_tir_in, in, bitmask.self_lb_en, 1);
1824 +
1825 ++ mutex_lock(&mdev->mlx5e_res.td.list_lock);
1826 + list_for_each_entry(tir, &mdev->mlx5e_res.td.tirs_list, list) {
1827 + tirn = tir->tirn;
1828 + err = mlx5_core_modify_tir(mdev, tirn, in, inlen);
1829 +@@ -168,6 +176,7 @@ out:
1830 + kvfree(in);
1831 + if (err)
1832 + netdev_err(priv->netdev, "refresh tir(0x%x) failed, %d\n", tirn, err);
1833 ++ mutex_unlock(&mdev->mlx5e_res.td.list_lock);
1834 +
1835 + return err;
1836 + }
1837 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.c b/drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.c
1838 +index 5cf5f2a9d51f..8de64e88c670 100644
1839 +--- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.c
1840 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.c
1841 +@@ -217,15 +217,21 @@ int mlx5_fpga_tls_resync_rx(struct mlx5_core_dev *mdev, u32 handle, u32 seq,
1842 + void *cmd;
1843 + int ret;
1844 +
1845 ++ rcu_read_lock();
1846 ++ flow = idr_find(&mdev->fpga->tls->rx_idr, ntohl(handle));
1847 ++ rcu_read_unlock();
1848 ++
1849 ++ if (!flow) {
1850 ++ WARN_ONCE(1, "Received NULL pointer for handle\n");
1851 ++ return -EINVAL;
1852 ++ }
1853 ++
1854 + buf = kzalloc(size, GFP_ATOMIC);
1855 + if (!buf)
1856 + return -ENOMEM;
1857 +
1858 + cmd = (buf + 1);
1859 +
1860 +- rcu_read_lock();
1861 +- flow = idr_find(&mdev->fpga->tls->rx_idr, ntohl(handle));
1862 +- rcu_read_unlock();
1863 + mlx5_fpga_tls_flow_to_cmd(flow, cmd);
1864 +
1865 + MLX5_SET(tls_cmd, cmd, swid, ntohl(handle));
1866 +@@ -238,6 +244,8 @@ int mlx5_fpga_tls_resync_rx(struct mlx5_core_dev *mdev, u32 handle, u32 seq,
1867 + buf->complete = mlx_tls_kfree_complete;
1868 +
1869 + ret = mlx5_fpga_sbu_conn_sendmsg(mdev->fpga->tls->conn, buf);
1870 ++ if (ret < 0)
1871 ++ kfree(buf);
1872 +
1873 + return ret;
1874 + }
1875 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
1876 +index 563ce3fedab4..0e820cf92f8a 100644
1877 +--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
1878 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
1879 +@@ -162,26 +162,6 @@ static struct mlx5_profile profile[] = {
1880 + .size = 8,
1881 + .limit = 4
1882 + },
1883 +- .mr_cache[16] = {
1884 +- .size = 8,
1885 +- .limit = 4
1886 +- },
1887 +- .mr_cache[17] = {
1888 +- .size = 8,
1889 +- .limit = 4
1890 +- },
1891 +- .mr_cache[18] = {
1892 +- .size = 8,
1893 +- .limit = 4
1894 +- },
1895 +- .mr_cache[19] = {
1896 +- .size = 4,
1897 +- .limit = 2
1898 +- },
1899 +- .mr_cache[20] = {
1900 +- .size = 4,
1901 +- .limit = 2
1902 +- },
1903 + },
1904 + };
1905 +
1906 +diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c
1907 +index 18a09cdcd9c6..aa5869eb2e3f 100644
1908 +--- a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c
1909 ++++ b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c
1910 +@@ -225,7 +225,7 @@ static netdev_tx_t nfp_repr_xmit(struct sk_buff *skb, struct net_device *netdev)
1911 + ret = dev_queue_xmit(skb);
1912 + nfp_repr_inc_tx_stats(netdev, len, ret);
1913 +
1914 +- return ret;
1915 ++ return NETDEV_TX_OK;
1916 + }
1917 +
1918 + static int nfp_repr_stop(struct net_device *netdev)
1919 +@@ -329,6 +329,8 @@ int nfp_repr_init(struct nfp_app *app, struct net_device *netdev,
1920 +
1921 + SWITCHDEV_SET_OPS(netdev, &nfp_port_switchdev_ops);
1922 +
1923 ++ netdev->priv_flags |= IFF_DISABLE_NETPOLL;
1924 ++
1925 + if (nfp_app_has_tc(app)) {
1926 + netdev->features |= NETIF_F_HW_TC;
1927 + netdev->hw_features |= NETIF_F_HW_TC;
1928 +diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
1929 +index 5f45ffeeecb4..7a50b911b180 100644
1930 +--- a/drivers/net/ethernet/realtek/r8169.c
1931 ++++ b/drivers/net/ethernet/realtek/r8169.c
1932 +@@ -28,6 +28,7 @@
1933 + #include <linux/pm_runtime.h>
1934 + #include <linux/firmware.h>
1935 + #include <linux/prefetch.h>
1936 ++#include <linux/pci-aspm.h>
1937 + #include <linux/ipv6.h>
1938 + #include <net/ip6_checksum.h>
1939 +
1940 +@@ -5417,7 +5418,7 @@ static void rtl_hw_start_8168(struct rtl8169_private *tp)
1941 + tp->cp_cmd |= PktCntrDisable | INTT_1;
1942 + RTL_W16(tp, CPlusCmd, tp->cp_cmd);
1943 +
1944 +- RTL_W16(tp, IntrMitigate, 0x5151);
1945 ++ RTL_W16(tp, IntrMitigate, 0x5100);
1946 +
1947 + /* Work around for RxFIFO overflow. */
1948 + if (tp->mac_version == RTL_GIGA_MAC_VER_11) {
1949 +@@ -7324,6 +7325,11 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
1950 + return rc;
1951 + }
1952 +
1953 ++ /* Disable ASPM completely as that cause random device stop working
1954 ++ * problems as well as full system hangs for some PCIe devices users.
1955 ++ */
1956 ++ pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1);
1957 ++
1958 + /* enable device (incl. PCI PM wakeup and hotplug setup) */
1959 + rc = pcim_enable_device(pdev);
1960 + if (rc < 0) {
1961 +diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h
1962 +index 42d284669b03..31d8d83c25ac 100644
1963 +--- a/drivers/net/hyperv/hyperv_net.h
1964 ++++ b/drivers/net/hyperv/hyperv_net.h
1965 +@@ -970,6 +970,7 @@ struct netvsc_device {
1966 +
1967 + wait_queue_head_t wait_drain;
1968 + bool destroy;
1969 ++ bool tx_disable; /* if true, do not wake up queue again */
1970 +
1971 + /* Receive buffer allocated by us but manages by NetVSP */
1972 + void *recv_buf;
1973 +diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c
1974 +index 1a942feab954..fb12b63439c6 100644
1975 +--- a/drivers/net/hyperv/netvsc.c
1976 ++++ b/drivers/net/hyperv/netvsc.c
1977 +@@ -110,6 +110,7 @@ static struct netvsc_device *alloc_net_device(void)
1978 +
1979 + init_waitqueue_head(&net_device->wait_drain);
1980 + net_device->destroy = false;
1981 ++ net_device->tx_disable = false;
1982 +
1983 + net_device->max_pkt = RNDIS_MAX_PKT_DEFAULT;
1984 + net_device->pkt_align = RNDIS_PKT_ALIGN_DEFAULT;
1985 +@@ -716,7 +717,7 @@ static void netvsc_send_tx_complete(struct net_device *ndev,
1986 + } else {
1987 + struct netdev_queue *txq = netdev_get_tx_queue(ndev, q_idx);
1988 +
1989 +- if (netif_tx_queue_stopped(txq) &&
1990 ++ if (netif_tx_queue_stopped(txq) && !net_device->tx_disable &&
1991 + (hv_get_avail_to_write_percent(&channel->outbound) >
1992 + RING_AVAIL_PERCENT_HIWATER || queue_sends < 1)) {
1993 + netif_tx_wake_queue(txq);
1994 +@@ -871,7 +872,8 @@ static inline int netvsc_send_pkt(
1995 + } else if (ret == -EAGAIN) {
1996 + netif_tx_stop_queue(txq);
1997 + ndev_ctx->eth_stats.stop_queue++;
1998 +- if (atomic_read(&nvchan->queue_sends) < 1) {
1999 ++ if (atomic_read(&nvchan->queue_sends) < 1 &&
2000 ++ !net_device->tx_disable) {
2001 + netif_tx_wake_queue(txq);
2002 + ndev_ctx->eth_stats.wake_queue++;
2003 + ret = -ENOSPC;
2004 +diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
2005 +index c8320405c8f1..9d699bd5f715 100644
2006 +--- a/drivers/net/hyperv/netvsc_drv.c
2007 ++++ b/drivers/net/hyperv/netvsc_drv.c
2008 +@@ -109,6 +109,15 @@ static void netvsc_set_rx_mode(struct net_device *net)
2009 + rcu_read_unlock();
2010 + }
2011 +
2012 ++static void netvsc_tx_enable(struct netvsc_device *nvscdev,
2013 ++ struct net_device *ndev)
2014 ++{
2015 ++ nvscdev->tx_disable = false;
2016 ++ virt_wmb(); /* ensure queue wake up mechanism is on */
2017 ++
2018 ++ netif_tx_wake_all_queues(ndev);
2019 ++}
2020 ++
2021 + static int netvsc_open(struct net_device *net)
2022 + {
2023 + struct net_device_context *ndev_ctx = netdev_priv(net);
2024 +@@ -129,7 +138,7 @@ static int netvsc_open(struct net_device *net)
2025 + rdev = nvdev->extension;
2026 + if (!rdev->link_state) {
2027 + netif_carrier_on(net);
2028 +- netif_tx_wake_all_queues(net);
2029 ++ netvsc_tx_enable(nvdev, net);
2030 + }
2031 +
2032 + if (vf_netdev) {
2033 +@@ -184,6 +193,17 @@ static int netvsc_wait_until_empty(struct netvsc_device *nvdev)
2034 + }
2035 + }
2036 +
2037 ++static void netvsc_tx_disable(struct netvsc_device *nvscdev,
2038 ++ struct net_device *ndev)
2039 ++{
2040 ++ if (nvscdev) {
2041 ++ nvscdev->tx_disable = true;
2042 ++ virt_wmb(); /* ensure txq will not wake up after stop */
2043 ++ }
2044 ++
2045 ++ netif_tx_disable(ndev);
2046 ++}
2047 ++
2048 + static int netvsc_close(struct net_device *net)
2049 + {
2050 + struct net_device_context *net_device_ctx = netdev_priv(net);
2051 +@@ -192,7 +212,7 @@ static int netvsc_close(struct net_device *net)
2052 + struct netvsc_device *nvdev = rtnl_dereference(net_device_ctx->nvdev);
2053 + int ret;
2054 +
2055 +- netif_tx_disable(net);
2056 ++ netvsc_tx_disable(nvdev, net);
2057 +
2058 + /* No need to close rndis filter if it is removed already */
2059 + if (!nvdev)
2060 +@@ -918,7 +938,7 @@ static int netvsc_detach(struct net_device *ndev,
2061 +
2062 + /* If device was up (receiving) then shutdown */
2063 + if (netif_running(ndev)) {
2064 +- netif_tx_disable(ndev);
2065 ++ netvsc_tx_disable(nvdev, ndev);
2066 +
2067 + ret = rndis_filter_close(nvdev);
2068 + if (ret) {
2069 +@@ -1899,7 +1919,7 @@ static void netvsc_link_change(struct work_struct *w)
2070 + if (rdev->link_state) {
2071 + rdev->link_state = false;
2072 + netif_carrier_on(net);
2073 +- netif_tx_wake_all_queues(net);
2074 ++ netvsc_tx_enable(net_device, net);
2075 + } else {
2076 + notify = true;
2077 + }
2078 +@@ -1909,7 +1929,7 @@ static void netvsc_link_change(struct work_struct *w)
2079 + if (!rdev->link_state) {
2080 + rdev->link_state = true;
2081 + netif_carrier_off(net);
2082 +- netif_tx_stop_all_queues(net);
2083 ++ netvsc_tx_disable(net_device, net);
2084 + }
2085 + kfree(event);
2086 + break;
2087 +@@ -1918,7 +1938,7 @@ static void netvsc_link_change(struct work_struct *w)
2088 + if (!rdev->link_state) {
2089 + rdev->link_state = true;
2090 + netif_carrier_off(net);
2091 +- netif_tx_stop_all_queues(net);
2092 ++ netvsc_tx_disable(net_device, net);
2093 + event->event = RNDIS_STATUS_MEDIA_CONNECT;
2094 + spin_lock_irqsave(&ndev_ctx->lock, flags);
2095 + list_add(&event->list, &ndev_ctx->reconfig_events);
2096 +diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
2097 +index 74bebbdb4b15..9195f3476b1d 100644
2098 +--- a/drivers/net/usb/qmi_wwan.c
2099 ++++ b/drivers/net/usb/qmi_wwan.c
2100 +@@ -1203,6 +1203,7 @@ static const struct usb_device_id products[] = {
2101 + {QMI_FIXED_INTF(0x19d2, 0x2002, 4)}, /* ZTE (Vodafone) K3765-Z */
2102 + {QMI_FIXED_INTF(0x2001, 0x7e19, 4)}, /* D-Link DWM-221 B1 */
2103 + {QMI_FIXED_INTF(0x2001, 0x7e35, 4)}, /* D-Link DWM-222 */
2104 ++ {QMI_FIXED_INTF(0x2020, 0x2031, 4)}, /* Olicard 600 */
2105 + {QMI_FIXED_INTF(0x2020, 0x2033, 4)}, /* BroadMobi BM806U */
2106 + {QMI_FIXED_INTF(0x0f3d, 0x68a2, 8)}, /* Sierra Wireless MC7700 */
2107 + {QMI_FIXED_INTF(0x114f, 0x68a2, 8)}, /* Sierra Wireless MC7750 */
2108 +diff --git a/drivers/pci/hotplug/pciehp_ctrl.c b/drivers/pci/hotplug/pciehp_ctrl.c
2109 +index da7c72372ffc..9c397fa8704c 100644
2110 +--- a/drivers/pci/hotplug/pciehp_ctrl.c
2111 ++++ b/drivers/pci/hotplug/pciehp_ctrl.c
2112 +@@ -117,6 +117,10 @@ static void remove_board(struct slot *p_slot)
2113 + * removed from the slot/adapter.
2114 + */
2115 + msleep(1000);
2116 ++
2117 ++ /* Ignore link or presence changes caused by power off */
2118 ++ atomic_and(~(PCI_EXP_SLTSTA_DLLSC | PCI_EXP_SLTSTA_PDC),
2119 ++ &ctrl->pending_events);
2120 + }
2121 +
2122 + /* turn off Green LED */
2123 +diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
2124 +index c0673a717239..37d897bc4cf1 100644
2125 +--- a/drivers/pci/quirks.c
2126 ++++ b/drivers/pci/quirks.c
2127 +@@ -3852,6 +3852,8 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9128,
2128 + /* https://bugzilla.kernel.org/show_bug.cgi?id=42679#c14 */
2129 + DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9130,
2130 + quirk_dma_func1_alias);
2131 ++DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9170,
2132 ++ quirk_dma_func1_alias);
2133 + /* https://bugzilla.kernel.org/show_bug.cgi?id=42679#c47 + c57 */
2134 + DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9172,
2135 + quirk_dma_func1_alias);
2136 +diff --git a/drivers/tty/Kconfig b/drivers/tty/Kconfig
2137 +index 0840d27381ea..e0a04bfc873e 100644
2138 +--- a/drivers/tty/Kconfig
2139 ++++ b/drivers/tty/Kconfig
2140 +@@ -441,4 +441,28 @@ config VCC
2141 + depends on SUN_LDOMS
2142 + help
2143 + Support for Sun logical domain consoles.
2144 ++
2145 ++config LDISC_AUTOLOAD
2146 ++ bool "Automatically load TTY Line Disciplines"
2147 ++ default y
2148 ++ help
2149 ++ Historically the kernel has always automatically loaded any
2150 ++ line discipline that is in a kernel module when a user asks
2151 ++ for it to be loaded with the TIOCSETD ioctl, or through other
2152 ++ means. This is not always the best thing to do on systems
2153 ++ where you know you will not be using some of the more
2154 ++ "ancient" line disciplines, so prevent the kernel from doing
2155 ++ this unless the request is coming from a process with the
2156 ++ CAP_SYS_MODULE permissions.
2157 ++
2158 ++ Say 'Y' here if you trust your userspace users to do the right
2159 ++ thing, or if you have only provided the line disciplines that
2160 ++ you know you will be using, or if you wish to continue to use
2161 ++ the traditional method of on-demand loading of these modules
2162 ++ by any user.
2163 ++
2164 ++ This functionality can be changed at runtime with the
2165 ++ dev.tty.ldisc_autoload sysctl, this configuration option will
2166 ++ only set the default value of this functionality.
2167 ++
2168 + endif # TTY
2169 +diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
2170 +index e7d192ebecd7..ac8025cd4a1f 100644
2171 +--- a/drivers/tty/tty_io.c
2172 ++++ b/drivers/tty/tty_io.c
2173 +@@ -512,6 +512,8 @@ static const struct file_operations hung_up_tty_fops = {
2174 + static DEFINE_SPINLOCK(redirect_lock);
2175 + static struct file *redirect;
2176 +
2177 ++extern void tty_sysctl_init(void);
2178 ++
2179 + /**
2180 + * tty_wakeup - request more data
2181 + * @tty: terminal
2182 +@@ -3340,6 +3342,7 @@ void console_sysfs_notify(void)
2183 + */
2184 + int __init tty_init(void)
2185 + {
2186 ++ tty_sysctl_init();
2187 + cdev_init(&tty_cdev, &tty_fops);
2188 + if (cdev_add(&tty_cdev, MKDEV(TTYAUX_MAJOR, 0), 1) ||
2189 + register_chrdev_region(MKDEV(TTYAUX_MAJOR, 0), 1, "/dev/tty") < 0)
2190 +diff --git a/drivers/tty/tty_ldisc.c b/drivers/tty/tty_ldisc.c
2191 +index fc4c97cae01e..53bb6d4e9e8d 100644
2192 +--- a/drivers/tty/tty_ldisc.c
2193 ++++ b/drivers/tty/tty_ldisc.c
2194 +@@ -156,6 +156,13 @@ static void put_ldops(struct tty_ldisc_ops *ldops)
2195 + * takes tty_ldiscs_lock to guard against ldisc races
2196 + */
2197 +
2198 ++#if defined(CONFIG_LDISC_AUTOLOAD)
2199 ++ #define INITIAL_AUTOLOAD_STATE 1
2200 ++#else
2201 ++ #define INITIAL_AUTOLOAD_STATE 0
2202 ++#endif
2203 ++static int tty_ldisc_autoload = INITIAL_AUTOLOAD_STATE;
2204 ++
2205 + static struct tty_ldisc *tty_ldisc_get(struct tty_struct *tty, int disc)
2206 + {
2207 + struct tty_ldisc *ld;
2208 +@@ -170,6 +177,8 @@ static struct tty_ldisc *tty_ldisc_get(struct tty_struct *tty, int disc)
2209 + */
2210 + ldops = get_ldops(disc);
2211 + if (IS_ERR(ldops)) {
2212 ++ if (!capable(CAP_SYS_MODULE) && !tty_ldisc_autoload)
2213 ++ return ERR_PTR(-EPERM);
2214 + request_module("tty-ldisc-%d", disc);
2215 + ldops = get_ldops(disc);
2216 + if (IS_ERR(ldops))
2217 +@@ -829,3 +838,41 @@ void tty_ldisc_deinit(struct tty_struct *tty)
2218 + tty_ldisc_put(tty->ldisc);
2219 + tty->ldisc = NULL;
2220 + }
2221 ++
2222 ++static int zero;
2223 ++static int one = 1;
2224 ++static struct ctl_table tty_table[] = {
2225 ++ {
2226 ++ .procname = "ldisc_autoload",
2227 ++ .data = &tty_ldisc_autoload,
2228 ++ .maxlen = sizeof(tty_ldisc_autoload),
2229 ++ .mode = 0644,
2230 ++ .proc_handler = proc_dointvec,
2231 ++ .extra1 = &zero,
2232 ++ .extra2 = &one,
2233 ++ },
2234 ++ { }
2235 ++};
2236 ++
2237 ++static struct ctl_table tty_dir_table[] = {
2238 ++ {
2239 ++ .procname = "tty",
2240 ++ .mode = 0555,
2241 ++ .child = tty_table,
2242 ++ },
2243 ++ { }
2244 ++};
2245 ++
2246 ++static struct ctl_table tty_root_table[] = {
2247 ++ {
2248 ++ .procname = "dev",
2249 ++ .mode = 0555,
2250 ++ .child = tty_dir_table,
2251 ++ },
2252 ++ { }
2253 ++};
2254 ++
2255 ++void tty_sysctl_init(void)
2256 ++{
2257 ++ register_sysctl_table(tty_root_table);
2258 ++}
2259 +diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c
2260 +index 814b395007b2..9529e28e1822 100644
2261 +--- a/drivers/virtio/virtio_ring.c
2262 ++++ b/drivers/virtio/virtio_ring.c
2263 +@@ -1086,6 +1086,8 @@ struct virtqueue *vring_create_virtqueue(
2264 + GFP_KERNEL|__GFP_NOWARN|__GFP_ZERO);
2265 + if (queue)
2266 + break;
2267 ++ if (!may_reduce_num)
2268 ++ return NULL;
2269 + }
2270 +
2271 + if (!num)
2272 +diff --git a/fs/block_dev.c b/fs/block_dev.c
2273 +index cdbb888a8d4a..1c25dae083a8 100644
2274 +--- a/fs/block_dev.c
2275 ++++ b/fs/block_dev.c
2276 +@@ -296,10 +296,10 @@ static void blkdev_bio_end_io(struct bio *bio)
2277 + struct blkdev_dio *dio = bio->bi_private;
2278 + bool should_dirty = dio->should_dirty;
2279 +
2280 +- if (dio->multi_bio && !atomic_dec_and_test(&dio->ref)) {
2281 +- if (bio->bi_status && !dio->bio.bi_status)
2282 +- dio->bio.bi_status = bio->bi_status;
2283 +- } else {
2284 ++ if (bio->bi_status && !dio->bio.bi_status)
2285 ++ dio->bio.bi_status = bio->bi_status;
2286 ++
2287 ++ if (!dio->multi_bio || atomic_dec_and_test(&dio->ref)) {
2288 + if (!dio->is_sync) {
2289 + struct kiocb *iocb = dio->iocb;
2290 + ssize_t ret;
2291 +diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
2292 +index 8bf9cce11213..0eb333c62fe4 100644
2293 +--- a/fs/btrfs/ioctl.c
2294 ++++ b/fs/btrfs/ioctl.c
2295 +@@ -496,6 +496,16 @@ static noinline int btrfs_ioctl_fitrim(struct file *file, void __user *arg)
2296 + if (!capable(CAP_SYS_ADMIN))
2297 + return -EPERM;
2298 +
2299 ++ /*
2300 ++ * If the fs is mounted with nologreplay, which requires it to be
2301 ++ * mounted in RO mode as well, we can not allow discard on free space
2302 ++ * inside block groups, because log trees refer to extents that are not
2303 ++ * pinned in a block group's free space cache (pinning the extents is
2304 ++ * precisely the first phase of replaying a log tree).
2305 ++ */
2306 ++ if (btrfs_test_opt(fs_info, NOLOGREPLAY))
2307 ++ return -EROFS;
2308 ++
2309 + rcu_read_lock();
2310 + list_for_each_entry_rcu(device, &fs_info->fs_devices->devices,
2311 + dev_list) {
2312 +diff --git a/fs/btrfs/props.c b/fs/btrfs/props.c
2313 +index dc6140013ae8..61d22a56c0ba 100644
2314 +--- a/fs/btrfs/props.c
2315 ++++ b/fs/btrfs/props.c
2316 +@@ -366,11 +366,11 @@ int btrfs_subvol_inherit_props(struct btrfs_trans_handle *trans,
2317 +
2318 + static int prop_compression_validate(const char *value, size_t len)
2319 + {
2320 +- if (!strncmp("lzo", value, len))
2321 ++ if (!strncmp("lzo", value, 3))
2322 + return 0;
2323 +- else if (!strncmp("zlib", value, len))
2324 ++ else if (!strncmp("zlib", value, 4))
2325 + return 0;
2326 +- else if (!strncmp("zstd", value, len))
2327 ++ else if (!strncmp("zstd", value, 4))
2328 + return 0;
2329 +
2330 + return -EINVAL;
2331 +@@ -396,7 +396,7 @@ static int prop_compression_apply(struct inode *inode,
2332 + btrfs_set_fs_incompat(fs_info, COMPRESS_LZO);
2333 + } else if (!strncmp("zlib", value, 4)) {
2334 + type = BTRFS_COMPRESS_ZLIB;
2335 +- } else if (!strncmp("zstd", value, len)) {
2336 ++ } else if (!strncmp("zstd", value, 4)) {
2337 + type = BTRFS_COMPRESS_ZSTD;
2338 + btrfs_set_fs_incompat(fs_info, COMPRESS_ZSTD);
2339 + } else {
2340 +diff --git a/include/linux/bitrev.h b/include/linux/bitrev.h
2341 +index 50fb0dee23e8..d35b8ec1c485 100644
2342 +--- a/include/linux/bitrev.h
2343 ++++ b/include/linux/bitrev.h
2344 +@@ -34,41 +34,41 @@ static inline u32 __bitrev32(u32 x)
2345 +
2346 + #define __constant_bitrev32(x) \
2347 + ({ \
2348 +- u32 __x = x; \
2349 +- __x = (__x >> 16) | (__x << 16); \
2350 +- __x = ((__x & (u32)0xFF00FF00UL) >> 8) | ((__x & (u32)0x00FF00FFUL) << 8); \
2351 +- __x = ((__x & (u32)0xF0F0F0F0UL) >> 4) | ((__x & (u32)0x0F0F0F0FUL) << 4); \
2352 +- __x = ((__x & (u32)0xCCCCCCCCUL) >> 2) | ((__x & (u32)0x33333333UL) << 2); \
2353 +- __x = ((__x & (u32)0xAAAAAAAAUL) >> 1) | ((__x & (u32)0x55555555UL) << 1); \
2354 +- __x; \
2355 ++ u32 ___x = x; \
2356 ++ ___x = (___x >> 16) | (___x << 16); \
2357 ++ ___x = ((___x & (u32)0xFF00FF00UL) >> 8) | ((___x & (u32)0x00FF00FFUL) << 8); \
2358 ++ ___x = ((___x & (u32)0xF0F0F0F0UL) >> 4) | ((___x & (u32)0x0F0F0F0FUL) << 4); \
2359 ++ ___x = ((___x & (u32)0xCCCCCCCCUL) >> 2) | ((___x & (u32)0x33333333UL) << 2); \
2360 ++ ___x = ((___x & (u32)0xAAAAAAAAUL) >> 1) | ((___x & (u32)0x55555555UL) << 1); \
2361 ++ ___x; \
2362 + })
2363 +
2364 + #define __constant_bitrev16(x) \
2365 + ({ \
2366 +- u16 __x = x; \
2367 +- __x = (__x >> 8) | (__x << 8); \
2368 +- __x = ((__x & (u16)0xF0F0U) >> 4) | ((__x & (u16)0x0F0FU) << 4); \
2369 +- __x = ((__x & (u16)0xCCCCU) >> 2) | ((__x & (u16)0x3333U) << 2); \
2370 +- __x = ((__x & (u16)0xAAAAU) >> 1) | ((__x & (u16)0x5555U) << 1); \
2371 +- __x; \
2372 ++ u16 ___x = x; \
2373 ++ ___x = (___x >> 8) | (___x << 8); \
2374 ++ ___x = ((___x & (u16)0xF0F0U) >> 4) | ((___x & (u16)0x0F0FU) << 4); \
2375 ++ ___x = ((___x & (u16)0xCCCCU) >> 2) | ((___x & (u16)0x3333U) << 2); \
2376 ++ ___x = ((___x & (u16)0xAAAAU) >> 1) | ((___x & (u16)0x5555U) << 1); \
2377 ++ ___x; \
2378 + })
2379 +
2380 + #define __constant_bitrev8x4(x) \
2381 + ({ \
2382 +- u32 __x = x; \
2383 +- __x = ((__x & (u32)0xF0F0F0F0UL) >> 4) | ((__x & (u32)0x0F0F0F0FUL) << 4); \
2384 +- __x = ((__x & (u32)0xCCCCCCCCUL) >> 2) | ((__x & (u32)0x33333333UL) << 2); \
2385 +- __x = ((__x & (u32)0xAAAAAAAAUL) >> 1) | ((__x & (u32)0x55555555UL) << 1); \
2386 +- __x; \
2387 ++ u32 ___x = x; \
2388 ++ ___x = ((___x & (u32)0xF0F0F0F0UL) >> 4) | ((___x & (u32)0x0F0F0F0FUL) << 4); \
2389 ++ ___x = ((___x & (u32)0xCCCCCCCCUL) >> 2) | ((___x & (u32)0x33333333UL) << 2); \
2390 ++ ___x = ((___x & (u32)0xAAAAAAAAUL) >> 1) | ((___x & (u32)0x55555555UL) << 1); \
2391 ++ ___x; \
2392 + })
2393 +
2394 + #define __constant_bitrev8(x) \
2395 + ({ \
2396 +- u8 __x = x; \
2397 +- __x = (__x >> 4) | (__x << 4); \
2398 +- __x = ((__x & (u8)0xCCU) >> 2) | ((__x & (u8)0x33U) << 2); \
2399 +- __x = ((__x & (u8)0xAAU) >> 1) | ((__x & (u8)0x55U) << 1); \
2400 +- __x; \
2401 ++ u8 ___x = x; \
2402 ++ ___x = (___x >> 4) | (___x << 4); \
2403 ++ ___x = ((___x & (u8)0xCCU) >> 2) | ((___x & (u8)0x33U) << 2); \
2404 ++ ___x = ((___x & (u8)0xAAU) >> 1) | ((___x & (u8)0x55U) << 1); \
2405 ++ ___x; \
2406 + })
2407 +
2408 + #define bitrev32(x) \
2409 +diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
2410 +index 652f602167df..cc6b6532eb56 100644
2411 +--- a/include/linux/memcontrol.h
2412 ++++ b/include/linux/memcontrol.h
2413 +@@ -559,7 +559,10 @@ struct mem_cgroup *lock_page_memcg(struct page *page);
2414 + void __unlock_page_memcg(struct mem_cgroup *memcg);
2415 + void unlock_page_memcg(struct page *page);
2416 +
2417 +-/* idx can be of type enum memcg_stat_item or node_stat_item */
2418 ++/*
2419 ++ * idx can be of type enum memcg_stat_item or node_stat_item.
2420 ++ * Keep in sync with memcg_exact_page_state().
2421 ++ */
2422 + static inline unsigned long memcg_page_state(struct mem_cgroup *memcg,
2423 + int idx)
2424 + {
2425 +diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
2426 +index bbcfe2e5fd91..e8b92dee5a72 100644
2427 +--- a/include/linux/mlx5/driver.h
2428 ++++ b/include/linux/mlx5/driver.h
2429 +@@ -776,6 +776,8 @@ struct mlx5_pagefault {
2430 + };
2431 +
2432 + struct mlx5_td {
2433 ++ /* protects tirs list changes while tirs refresh */
2434 ++ struct mutex list_lock;
2435 + struct list_head tirs_list;
2436 + u32 tdn;
2437 + };
2438 +diff --git a/include/linux/netfilter/nf_conntrack_proto_gre.h b/include/linux/netfilter/nf_conntrack_proto_gre.h
2439 +index b8d95564bd53..14edb795ab43 100644
2440 +--- a/include/linux/netfilter/nf_conntrack_proto_gre.h
2441 ++++ b/include/linux/netfilter/nf_conntrack_proto_gre.h
2442 +@@ -21,6 +21,19 @@ struct nf_ct_gre_keymap {
2443 + struct nf_conntrack_tuple tuple;
2444 + };
2445 +
2446 ++enum grep_conntrack {
2447 ++ GRE_CT_UNREPLIED,
2448 ++ GRE_CT_REPLIED,
2449 ++ GRE_CT_MAX
2450 ++};
2451 ++
2452 ++struct netns_proto_gre {
2453 ++ struct nf_proto_net nf;
2454 ++ rwlock_t keymap_lock;
2455 ++ struct list_head keymap_list;
2456 ++ unsigned int gre_timeouts[GRE_CT_MAX];
2457 ++};
2458 ++
2459 + /* add new tuple->key_reply pair to keymap */
2460 + int nf_ct_gre_keymap_add(struct nf_conn *ct, enum ip_conntrack_dir dir,
2461 + struct nf_conntrack_tuple *t);
2462 +diff --git a/include/linux/string.h b/include/linux/string.h
2463 +index 4a5a0eb7df51..f58e1ef76572 100644
2464 +--- a/include/linux/string.h
2465 ++++ b/include/linux/string.h
2466 +@@ -143,6 +143,9 @@ extern void * memscan(void *,int,__kernel_size_t);
2467 + #ifndef __HAVE_ARCH_MEMCMP
2468 + extern int memcmp(const void *,const void *,__kernel_size_t);
2469 + #endif
2470 ++#ifndef __HAVE_ARCH_BCMP
2471 ++extern int bcmp(const void *,const void *,__kernel_size_t);
2472 ++#endif
2473 + #ifndef __HAVE_ARCH_MEMCHR
2474 + extern void * memchr(const void *,int,__kernel_size_t);
2475 + #endif
2476 +diff --git a/include/linux/virtio_ring.h b/include/linux/virtio_ring.h
2477 +index fab02133a919..3dc70adfe5f5 100644
2478 +--- a/include/linux/virtio_ring.h
2479 ++++ b/include/linux/virtio_ring.h
2480 +@@ -63,7 +63,7 @@ struct virtqueue;
2481 + /*
2482 + * Creates a virtqueue and allocates the descriptor ring. If
2483 + * may_reduce_num is set, then this may allocate a smaller ring than
2484 +- * expected. The caller should query virtqueue_get_ring_size to learn
2485 ++ * expected. The caller should query virtqueue_get_vring_size to learn
2486 + * the actual size of the ring.
2487 + */
2488 + struct virtqueue *vring_create_virtqueue(unsigned int index,
2489 +diff --git a/include/net/ip.h b/include/net/ip.h
2490 +index 71d31e4d4391..cfc3dd5ff085 100644
2491 +--- a/include/net/ip.h
2492 ++++ b/include/net/ip.h
2493 +@@ -648,7 +648,7 @@ int ip_options_get_from_user(struct net *net, struct ip_options_rcu **optp,
2494 + unsigned char __user *data, int optlen);
2495 + void ip_options_undo(struct ip_options *opt);
2496 + void ip_forward_options(struct sk_buff *skb);
2497 +-int ip_options_rcv_srr(struct sk_buff *skb);
2498 ++int ip_options_rcv_srr(struct sk_buff *skb, struct net_device *dev);
2499 +
2500 + /*
2501 + * Functions provided by ip_sockglue.c
2502 +diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h
2503 +index 9b5fdc50519a..3f7b166262d7 100644
2504 +--- a/include/net/net_namespace.h
2505 ++++ b/include/net/net_namespace.h
2506 +@@ -57,6 +57,7 @@ struct net {
2507 + */
2508 + spinlock_t rules_mod_lock;
2509 +
2510 ++ u32 hash_mix;
2511 + atomic64_t cookie_gen;
2512 +
2513 + struct list_head list; /* list of network namespaces */
2514 +diff --git a/include/net/netns/hash.h b/include/net/netns/hash.h
2515 +index 16a842456189..d9b665151f3d 100644
2516 +--- a/include/net/netns/hash.h
2517 ++++ b/include/net/netns/hash.h
2518 +@@ -2,16 +2,10 @@
2519 + #ifndef __NET_NS_HASH_H__
2520 + #define __NET_NS_HASH_H__
2521 +
2522 +-#include <asm/cache.h>
2523 +-
2524 +-struct net;
2525 ++#include <net/net_namespace.h>
2526 +
2527 + static inline u32 net_hash_mix(const struct net *net)
2528 + {
2529 +-#ifdef CONFIG_NET_NS
2530 +- return (u32)(((unsigned long)net) >> ilog2(sizeof(*net)));
2531 +-#else
2532 +- return 0;
2533 +-#endif
2534 ++ return net->hash_mix;
2535 + }
2536 + #endif
2537 +diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c
2538 +index 811009ebacd4..379e89c706c9 100644
2539 +--- a/kernel/irq/chip.c
2540 ++++ b/kernel/irq/chip.c
2541 +@@ -1384,6 +1384,10 @@ int irq_chip_set_vcpu_affinity_parent(struct irq_data *data, void *vcpu_info)
2542 + int irq_chip_set_wake_parent(struct irq_data *data, unsigned int on)
2543 + {
2544 + data = data->parent_data;
2545 ++
2546 ++ if (data->chip->flags & IRQCHIP_SKIP_SET_WAKE)
2547 ++ return 0;
2548 ++
2549 + if (data->chip->irq_set_wake)
2550 + return data->chip->irq_set_wake(data, on);
2551 +
2552 +diff --git a/kernel/irq/irqdesc.c b/kernel/irq/irqdesc.c
2553 +index ba454cba4069..8e009cee6517 100644
2554 +--- a/kernel/irq/irqdesc.c
2555 ++++ b/kernel/irq/irqdesc.c
2556 +@@ -554,6 +554,7 @@ int __init early_irq_init(void)
2557 + alloc_masks(&desc[i], node);
2558 + raw_spin_lock_init(&desc[i].lock);
2559 + lockdep_set_class(&desc[i].lock, &irq_desc_lock_class);
2560 ++ mutex_init(&desc[i].request_mutex);
2561 + desc_set_defaults(i, &desc[i], node, NULL, NULL);
2562 + }
2563 + return arch_early_irq_init();
2564 +diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
2565 +index f7c375d1e601..640094391169 100644
2566 +--- a/kernel/sched/fair.c
2567 ++++ b/kernel/sched/fair.c
2568 +@@ -7437,10 +7437,10 @@ static void update_cfs_rq_h_load(struct cfs_rq *cfs_rq)
2569 + if (cfs_rq->last_h_load_update == now)
2570 + return;
2571 +
2572 +- cfs_rq->h_load_next = NULL;
2573 ++ WRITE_ONCE(cfs_rq->h_load_next, NULL);
2574 + for_each_sched_entity(se) {
2575 + cfs_rq = cfs_rq_of(se);
2576 +- cfs_rq->h_load_next = se;
2577 ++ WRITE_ONCE(cfs_rq->h_load_next, se);
2578 + if (cfs_rq->last_h_load_update == now)
2579 + break;
2580 + }
2581 +@@ -7450,7 +7450,7 @@ static void update_cfs_rq_h_load(struct cfs_rq *cfs_rq)
2582 + cfs_rq->last_h_load_update = now;
2583 + }
2584 +
2585 +- while ((se = cfs_rq->h_load_next) != NULL) {
2586 ++ while ((se = READ_ONCE(cfs_rq->h_load_next)) != NULL) {
2587 + load = cfs_rq->h_load;
2588 + load = div64_ul(load * se->avg.load_avg,
2589 + cfs_rq_load_avg(cfs_rq) + 1);
2590 +diff --git a/kernel/time/alarmtimer.c b/kernel/time/alarmtimer.c
2591 +index fa5de5e8de61..fdeb9bc6affb 100644
2592 +--- a/kernel/time/alarmtimer.c
2593 ++++ b/kernel/time/alarmtimer.c
2594 +@@ -597,7 +597,7 @@ static ktime_t alarm_timer_remaining(struct k_itimer *timr, ktime_t now)
2595 + {
2596 + struct alarm *alarm = &timr->it.alarm.alarmtimer;
2597 +
2598 +- return ktime_sub(now, alarm->node.expires);
2599 ++ return ktime_sub(alarm->node.expires, now);
2600 + }
2601 +
2602 + /**
2603 +diff --git a/lib/string.c b/lib/string.c
2604 +index 2c0900a5d51a..72125fd5b4a6 100644
2605 +--- a/lib/string.c
2606 ++++ b/lib/string.c
2607 +@@ -865,6 +865,26 @@ __visible int memcmp(const void *cs, const void *ct, size_t count)
2608 + EXPORT_SYMBOL(memcmp);
2609 + #endif
2610 +
2611 ++#ifndef __HAVE_ARCH_BCMP
2612 ++/**
2613 ++ * bcmp - returns 0 if and only if the buffers have identical contents.
2614 ++ * @a: pointer to first buffer.
2615 ++ * @b: pointer to second buffer.
2616 ++ * @len: size of buffers.
2617 ++ *
2618 ++ * The sign or magnitude of a non-zero return value has no particular
2619 ++ * meaning, and architectures may implement their own more efficient bcmp(). So
2620 ++ * while this particular implementation is a simple (tail) call to memcmp, do
2621 ++ * not rely on anything but whether the return value is zero or non-zero.
2622 ++ */
2623 ++#undef bcmp
2624 ++int bcmp(const void *a, const void *b, size_t len)
2625 ++{
2626 ++ return memcmp(a, b, len);
2627 ++}
2628 ++EXPORT_SYMBOL(bcmp);
2629 ++#endif
2630 ++
2631 + #ifndef __HAVE_ARCH_MEMSCAN
2632 + /**
2633 + * memscan - Find a character in an area of memory.
2634 +diff --git a/mm/huge_memory.c b/mm/huge_memory.c
2635 +index d2cd70cfaa90..7d08e89361ee 100644
2636 +--- a/mm/huge_memory.c
2637 ++++ b/mm/huge_memory.c
2638 +@@ -734,6 +734,21 @@ static void insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr,
2639 + spinlock_t *ptl;
2640 +
2641 + ptl = pmd_lock(mm, pmd);
2642 ++ if (!pmd_none(*pmd)) {
2643 ++ if (write) {
2644 ++ if (pmd_pfn(*pmd) != pfn_t_to_pfn(pfn)) {
2645 ++ WARN_ON_ONCE(!is_huge_zero_pmd(*pmd));
2646 ++ goto out_unlock;
2647 ++ }
2648 ++ entry = pmd_mkyoung(*pmd);
2649 ++ entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
2650 ++ if (pmdp_set_access_flags(vma, addr, pmd, entry, 1))
2651 ++ update_mmu_cache_pmd(vma, addr, pmd);
2652 ++ }
2653 ++
2654 ++ goto out_unlock;
2655 ++ }
2656 ++
2657 + entry = pmd_mkhuge(pfn_t_pmd(pfn, prot));
2658 + if (pfn_t_devmap(pfn))
2659 + entry = pmd_mkdevmap(entry);
2660 +@@ -745,11 +760,16 @@ static void insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr,
2661 + if (pgtable) {
2662 + pgtable_trans_huge_deposit(mm, pmd, pgtable);
2663 + mm_inc_nr_ptes(mm);
2664 ++ pgtable = NULL;
2665 + }
2666 +
2667 + set_pmd_at(mm, addr, pmd, entry);
2668 + update_mmu_cache_pmd(vma, addr, pmd);
2669 ++
2670 ++out_unlock:
2671 + spin_unlock(ptl);
2672 ++ if (pgtable)
2673 ++ pte_free(mm, pgtable);
2674 + }
2675 +
2676 + vm_fault_t vmf_insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr,
2677 +@@ -800,6 +820,20 @@ static void insert_pfn_pud(struct vm_area_struct *vma, unsigned long addr,
2678 + spinlock_t *ptl;
2679 +
2680 + ptl = pud_lock(mm, pud);
2681 ++ if (!pud_none(*pud)) {
2682 ++ if (write) {
2683 ++ if (pud_pfn(*pud) != pfn_t_to_pfn(pfn)) {
2684 ++ WARN_ON_ONCE(!is_huge_zero_pud(*pud));
2685 ++ goto out_unlock;
2686 ++ }
2687 ++ entry = pud_mkyoung(*pud);
2688 ++ entry = maybe_pud_mkwrite(pud_mkdirty(entry), vma);
2689 ++ if (pudp_set_access_flags(vma, addr, pud, entry, 1))
2690 ++ update_mmu_cache_pud(vma, addr, pud);
2691 ++ }
2692 ++ goto out_unlock;
2693 ++ }
2694 ++
2695 + entry = pud_mkhuge(pfn_t_pud(pfn, prot));
2696 + if (pfn_t_devmap(pfn))
2697 + entry = pud_mkdevmap(entry);
2698 +@@ -809,6 +843,8 @@ static void insert_pfn_pud(struct vm_area_struct *vma, unsigned long addr,
2699 + }
2700 + set_pud_at(mm, addr, pud, entry);
2701 + update_mmu_cache_pud(vma, addr, pud);
2702 ++
2703 ++out_unlock:
2704 + spin_unlock(ptl);
2705 + }
2706 +
2707 +diff --git a/mm/memcontrol.c b/mm/memcontrol.c
2708 +index 7c712c4565e6..7e7cc0cd89fe 100644
2709 +--- a/mm/memcontrol.c
2710 ++++ b/mm/memcontrol.c
2711 +@@ -3897,6 +3897,22 @@ struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb)
2712 + return &memcg->cgwb_domain;
2713 + }
2714 +
2715 ++/*
2716 ++ * idx can be of type enum memcg_stat_item or node_stat_item.
2717 ++ * Keep in sync with memcg_exact_page().
2718 ++ */
2719 ++static unsigned long memcg_exact_page_state(struct mem_cgroup *memcg, int idx)
2720 ++{
2721 ++ long x = atomic_long_read(&memcg->stat[idx]);
2722 ++ int cpu;
2723 ++
2724 ++ for_each_online_cpu(cpu)
2725 ++ x += per_cpu_ptr(memcg->stat_cpu, cpu)->count[idx];
2726 ++ if (x < 0)
2727 ++ x = 0;
2728 ++ return x;
2729 ++}
2730 ++
2731 + /**
2732 + * mem_cgroup_wb_stats - retrieve writeback related stats from its memcg
2733 + * @wb: bdi_writeback in question
2734 +@@ -3922,10 +3938,10 @@ void mem_cgroup_wb_stats(struct bdi_writeback *wb, unsigned long *pfilepages,
2735 + struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css);
2736 + struct mem_cgroup *parent;
2737 +
2738 +- *pdirty = memcg_page_state(memcg, NR_FILE_DIRTY);
2739 ++ *pdirty = memcg_exact_page_state(memcg, NR_FILE_DIRTY);
2740 +
2741 + /* this should eventually include NR_UNSTABLE_NFS */
2742 +- *pwriteback = memcg_page_state(memcg, NR_WRITEBACK);
2743 ++ *pwriteback = memcg_exact_page_state(memcg, NR_WRITEBACK);
2744 + *pfilepages = mem_cgroup_nr_lru_pages(memcg, (1 << LRU_INACTIVE_FILE) |
2745 + (1 << LRU_ACTIVE_FILE));
2746 + *pheadroom = PAGE_COUNTER_MAX;
2747 +diff --git a/net/core/dev.c b/net/core/dev.c
2748 +index 5c8c0a572ee9..d47554307a6d 100644
2749 +--- a/net/core/dev.c
2750 ++++ b/net/core/dev.c
2751 +@@ -4959,8 +4959,10 @@ static inline void __netif_receive_skb_list_ptype(struct list_head *head,
2752 + if (pt_prev->list_func != NULL)
2753 + pt_prev->list_func(head, pt_prev, orig_dev);
2754 + else
2755 +- list_for_each_entry_safe(skb, next, head, list)
2756 ++ list_for_each_entry_safe(skb, next, head, list) {
2757 ++ skb_list_del_init(skb);
2758 + pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
2759 ++ }
2760 + }
2761 +
2762 + static void __netif_receive_skb_list_core(struct list_head *head, bool pfmemalloc)
2763 +diff --git a/net/core/ethtool.c b/net/core/ethtool.c
2764 +index aeabc4831fca..7cc97f43f138 100644
2765 +--- a/net/core/ethtool.c
2766 ++++ b/net/core/ethtool.c
2767 +@@ -1863,11 +1863,16 @@ static int ethtool_get_strings(struct net_device *dev, void __user *useraddr)
2768 + WARN_ON_ONCE(!ret);
2769 +
2770 + gstrings.len = ret;
2771 +- data = vzalloc(array_size(gstrings.len, ETH_GSTRING_LEN));
2772 +- if (gstrings.len && !data)
2773 +- return -ENOMEM;
2774 +
2775 +- __ethtool_get_strings(dev, gstrings.string_set, data);
2776 ++ if (gstrings.len) {
2777 ++ data = vzalloc(array_size(gstrings.len, ETH_GSTRING_LEN));
2778 ++ if (!data)
2779 ++ return -ENOMEM;
2780 ++
2781 ++ __ethtool_get_strings(dev, gstrings.string_set, data);
2782 ++ } else {
2783 ++ data = NULL;
2784 ++ }
2785 +
2786 + ret = -EFAULT;
2787 + if (copy_to_user(useraddr, &gstrings, sizeof(gstrings)))
2788 +@@ -1963,11 +1968,15 @@ static int ethtool_get_stats(struct net_device *dev, void __user *useraddr)
2789 + return -EFAULT;
2790 +
2791 + stats.n_stats = n_stats;
2792 +- data = vzalloc(array_size(n_stats, sizeof(u64)));
2793 +- if (n_stats && !data)
2794 +- return -ENOMEM;
2795 +
2796 +- ops->get_ethtool_stats(dev, &stats, data);
2797 ++ if (n_stats) {
2798 ++ data = vzalloc(array_size(n_stats, sizeof(u64)));
2799 ++ if (!data)
2800 ++ return -ENOMEM;
2801 ++ ops->get_ethtool_stats(dev, &stats, data);
2802 ++ } else {
2803 ++ data = NULL;
2804 ++ }
2805 +
2806 + ret = -EFAULT;
2807 + if (copy_to_user(useraddr, &stats, sizeof(stats)))
2808 +@@ -2007,16 +2016,21 @@ static int ethtool_get_phy_stats(struct net_device *dev, void __user *useraddr)
2809 + return -EFAULT;
2810 +
2811 + stats.n_stats = n_stats;
2812 +- data = vzalloc(array_size(n_stats, sizeof(u64)));
2813 +- if (n_stats && !data)
2814 +- return -ENOMEM;
2815 +
2816 +- if (dev->phydev && !ops->get_ethtool_phy_stats) {
2817 +- ret = phy_ethtool_get_stats(dev->phydev, &stats, data);
2818 +- if (ret < 0)
2819 +- return ret;
2820 ++ if (n_stats) {
2821 ++ data = vzalloc(array_size(n_stats, sizeof(u64)));
2822 ++ if (!data)
2823 ++ return -ENOMEM;
2824 ++
2825 ++ if (dev->phydev && !ops->get_ethtool_phy_stats) {
2826 ++ ret = phy_ethtool_get_stats(dev->phydev, &stats, data);
2827 ++ if (ret < 0)
2828 ++ goto out;
2829 ++ } else {
2830 ++ ops->get_ethtool_phy_stats(dev, &stats, data);
2831 ++ }
2832 + } else {
2833 +- ops->get_ethtool_phy_stats(dev, &stats, data);
2834 ++ data = NULL;
2835 + }
2836 +
2837 + ret = -EFAULT;
2838 +diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c
2839 +index 670c84b1bfc2..7320f0844a50 100644
2840 +--- a/net/core/net_namespace.c
2841 ++++ b/net/core/net_namespace.c
2842 +@@ -304,6 +304,7 @@ static __net_init int setup_net(struct net *net, struct user_namespace *user_ns)
2843 +
2844 + refcount_set(&net->count, 1);
2845 + refcount_set(&net->passive, 1);
2846 ++ get_random_bytes(&net->hash_mix, sizeof(u32));
2847 + net->dev_base_seq = 1;
2848 + net->user_ns = user_ns;
2849 + idr_init(&net->netns_ids);
2850 +diff --git a/net/core/skbuff.c b/net/core/skbuff.c
2851 +index 8656b1e20d35..ceee28e184af 100644
2852 +--- a/net/core/skbuff.c
2853 ++++ b/net/core/skbuff.c
2854 +@@ -3832,7 +3832,7 @@ int skb_gro_receive(struct sk_buff *p, struct sk_buff *skb)
2855 + unsigned int delta_truesize;
2856 + struct sk_buff *lp;
2857 +
2858 +- if (unlikely(p->len + len >= 65536))
2859 ++ if (unlikely(p->len + len >= 65536 || NAPI_GRO_CB(skb)->flush))
2860 + return -E2BIG;
2861 +
2862 + lp = NAPI_GRO_CB(p)->last;
2863 +diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
2864 +index f199945f6e4a..3c734832bb7c 100644
2865 +--- a/net/ipv4/ip_gre.c
2866 ++++ b/net/ipv4/ip_gre.c
2867 +@@ -260,7 +260,6 @@ static int erspan_rcv(struct sk_buff *skb, struct tnl_ptk_info *tpi,
2868 + struct net *net = dev_net(skb->dev);
2869 + struct metadata_dst *tun_dst = NULL;
2870 + struct erspan_base_hdr *ershdr;
2871 +- struct erspan_metadata *pkt_md;
2872 + struct ip_tunnel_net *itn;
2873 + struct ip_tunnel *tunnel;
2874 + const struct iphdr *iph;
2875 +@@ -283,9 +282,6 @@ static int erspan_rcv(struct sk_buff *skb, struct tnl_ptk_info *tpi,
2876 + if (unlikely(!pskb_may_pull(skb, len)))
2877 + return PACKET_REJECT;
2878 +
2879 +- ershdr = (struct erspan_base_hdr *)(skb->data + gre_hdr_len);
2880 +- pkt_md = (struct erspan_metadata *)(ershdr + 1);
2881 +-
2882 + if (__iptunnel_pull_header(skb,
2883 + len,
2884 + htons(ETH_P_TEB),
2885 +@@ -293,8 +289,9 @@ static int erspan_rcv(struct sk_buff *skb, struct tnl_ptk_info *tpi,
2886 + goto drop;
2887 +
2888 + if (tunnel->collect_md) {
2889 ++ struct erspan_metadata *pkt_md, *md;
2890 + struct ip_tunnel_info *info;
2891 +- struct erspan_metadata *md;
2892 ++ unsigned char *gh;
2893 + __be64 tun_id;
2894 + __be16 flags;
2895 +
2896 +@@ -307,6 +304,14 @@ static int erspan_rcv(struct sk_buff *skb, struct tnl_ptk_info *tpi,
2897 + if (!tun_dst)
2898 + return PACKET_REJECT;
2899 +
2900 ++ /* skb can be uncloned in __iptunnel_pull_header, so
2901 ++ * old pkt_md is no longer valid and we need to reset
2902 ++ * it
2903 ++ */
2904 ++ gh = skb_network_header(skb) +
2905 ++ skb_network_header_len(skb);
2906 ++ pkt_md = (struct erspan_metadata *)(gh + gre_hdr_len +
2907 ++ sizeof(*ershdr));
2908 + md = ip_tunnel_info_opts(&tun_dst->u.tun_info);
2909 + md->version = ver;
2910 + md2 = &md->u.md2;
2911 +diff --git a/net/ipv4/ip_input.c b/net/ipv4/ip_input.c
2912 +index bd8ef4f87c79..c3a0683e83df 100644
2913 +--- a/net/ipv4/ip_input.c
2914 ++++ b/net/ipv4/ip_input.c
2915 +@@ -258,11 +258,10 @@ int ip_local_deliver(struct sk_buff *skb)
2916 + ip_local_deliver_finish);
2917 + }
2918 +
2919 +-static inline bool ip_rcv_options(struct sk_buff *skb)
2920 ++static inline bool ip_rcv_options(struct sk_buff *skb, struct net_device *dev)
2921 + {
2922 + struct ip_options *opt;
2923 + const struct iphdr *iph;
2924 +- struct net_device *dev = skb->dev;
2925 +
2926 + /* It looks as overkill, because not all
2927 + IP options require packet mangling.
2928 +@@ -298,7 +297,7 @@ static inline bool ip_rcv_options(struct sk_buff *skb)
2929 + }
2930 + }
2931 +
2932 +- if (ip_options_rcv_srr(skb))
2933 ++ if (ip_options_rcv_srr(skb, dev))
2934 + goto drop;
2935 + }
2936 +
2937 +@@ -354,7 +353,7 @@ static int ip_rcv_finish_core(struct net *net, struct sock *sk,
2938 + }
2939 + #endif
2940 +
2941 +- if (iph->ihl > 5 && ip_rcv_options(skb))
2942 ++ if (iph->ihl > 5 && ip_rcv_options(skb, dev))
2943 + goto drop;
2944 +
2945 + rt = skb_rtable(skb);
2946 +diff --git a/net/ipv4/ip_options.c b/net/ipv4/ip_options.c
2947 +index 32a35043c9f5..3db31bb9df50 100644
2948 +--- a/net/ipv4/ip_options.c
2949 ++++ b/net/ipv4/ip_options.c
2950 +@@ -612,7 +612,7 @@ void ip_forward_options(struct sk_buff *skb)
2951 + }
2952 + }
2953 +
2954 +-int ip_options_rcv_srr(struct sk_buff *skb)
2955 ++int ip_options_rcv_srr(struct sk_buff *skb, struct net_device *dev)
2956 + {
2957 + struct ip_options *opt = &(IPCB(skb)->opt);
2958 + int srrspace, srrptr;
2959 +@@ -647,7 +647,7 @@ int ip_options_rcv_srr(struct sk_buff *skb)
2960 +
2961 + orefdst = skb->_skb_refdst;
2962 + skb_dst_set(skb, NULL);
2963 +- err = ip_route_input(skb, nexthop, iph->saddr, iph->tos, skb->dev);
2964 ++ err = ip_route_input(skb, nexthop, iph->saddr, iph->tos, dev);
2965 + rt2 = skb_rtable(skb);
2966 + if (err || (rt2->rt_type != RTN_UNICAST && rt2->rt_type != RTN_LOCAL)) {
2967 + skb_dst_drop(skb);
2968 +diff --git a/net/ipv4/tcp_dctcp.c b/net/ipv4/tcp_dctcp.c
2969 +index ca61e2a659e7..5205c5a5d8d5 100644
2970 +--- a/net/ipv4/tcp_dctcp.c
2971 ++++ b/net/ipv4/tcp_dctcp.c
2972 +@@ -66,11 +66,6 @@ static unsigned int dctcp_alpha_on_init __read_mostly = DCTCP_MAX_ALPHA;
2973 + module_param(dctcp_alpha_on_init, uint, 0644);
2974 + MODULE_PARM_DESC(dctcp_alpha_on_init, "parameter for initial alpha value");
2975 +
2976 +-static unsigned int dctcp_clamp_alpha_on_loss __read_mostly;
2977 +-module_param(dctcp_clamp_alpha_on_loss, uint, 0644);
2978 +-MODULE_PARM_DESC(dctcp_clamp_alpha_on_loss,
2979 +- "parameter for clamping alpha on loss");
2980 +-
2981 + static struct tcp_congestion_ops dctcp_reno;
2982 +
2983 + static void dctcp_reset(const struct tcp_sock *tp, struct dctcp *ca)
2984 +@@ -211,21 +206,23 @@ static void dctcp_update_alpha(struct sock *sk, u32 flags)
2985 + }
2986 + }
2987 +
2988 +-static void dctcp_state(struct sock *sk, u8 new_state)
2989 ++static void dctcp_react_to_loss(struct sock *sk)
2990 + {
2991 +- if (dctcp_clamp_alpha_on_loss && new_state == TCP_CA_Loss) {
2992 +- struct dctcp *ca = inet_csk_ca(sk);
2993 ++ struct dctcp *ca = inet_csk_ca(sk);
2994 ++ struct tcp_sock *tp = tcp_sk(sk);
2995 +
2996 +- /* If this extension is enabled, we clamp dctcp_alpha to
2997 +- * max on packet loss; the motivation is that dctcp_alpha
2998 +- * is an indicator to the extend of congestion and packet
2999 +- * loss is an indicator of extreme congestion; setting
3000 +- * this in practice turned out to be beneficial, and
3001 +- * effectively assumes total congestion which reduces the
3002 +- * window by half.
3003 +- */
3004 +- ca->dctcp_alpha = DCTCP_MAX_ALPHA;
3005 +- }
3006 ++ ca->loss_cwnd = tp->snd_cwnd;
3007 ++ tp->snd_ssthresh = max(tp->snd_cwnd >> 1U, 2U);
3008 ++}
3009 ++
3010 ++static void dctcp_state(struct sock *sk, u8 new_state)
3011 ++{
3012 ++ if (new_state == TCP_CA_Recovery &&
3013 ++ new_state != inet_csk(sk)->icsk_ca_state)
3014 ++ dctcp_react_to_loss(sk);
3015 ++ /* We handle RTO in dctcp_cwnd_event to ensure that we perform only
3016 ++ * one loss-adjustment per RTT.
3017 ++ */
3018 + }
3019 +
3020 + static void dctcp_cwnd_event(struct sock *sk, enum tcp_ca_event ev)
3021 +@@ -237,6 +234,9 @@ static void dctcp_cwnd_event(struct sock *sk, enum tcp_ca_event ev)
3022 + case CA_EVENT_ECN_NO_CE:
3023 + dctcp_ce_state_1_to_0(sk);
3024 + break;
3025 ++ case CA_EVENT_LOSS:
3026 ++ dctcp_react_to_loss(sk);
3027 ++ break;
3028 + default:
3029 + /* Don't care for the rest. */
3030 + break;
3031 +diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
3032 +index 30fdf891940b..11101cf8693b 100644
3033 +--- a/net/ipv4/tcp_ipv4.c
3034 ++++ b/net/ipv4/tcp_ipv4.c
3035 +@@ -2490,7 +2490,8 @@ static void __net_exit tcp_sk_exit(struct net *net)
3036 + {
3037 + int cpu;
3038 +
3039 +- module_put(net->ipv4.tcp_congestion_control->owner);
3040 ++ if (net->ipv4.tcp_congestion_control)
3041 ++ module_put(net->ipv4.tcp_congestion_control->owner);
3042 +
3043 + for_each_possible_cpu(cpu)
3044 + inet_ctl_sock_destroy(*per_cpu_ptr(net->ipv4.tcp_sk, cpu));
3045 +diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
3046 +index faed98dab913..c4a7db62658e 100644
3047 +--- a/net/ipv6/ip6_gre.c
3048 ++++ b/net/ipv6/ip6_gre.c
3049 +@@ -540,11 +540,10 @@ static int ip6gre_rcv(struct sk_buff *skb, const struct tnl_ptk_info *tpi)
3050 + return PACKET_REJECT;
3051 + }
3052 +
3053 +-static int ip6erspan_rcv(struct sk_buff *skb, int gre_hdr_len,
3054 +- struct tnl_ptk_info *tpi)
3055 ++static int ip6erspan_rcv(struct sk_buff *skb, struct tnl_ptk_info *tpi,
3056 ++ int gre_hdr_len)
3057 + {
3058 + struct erspan_base_hdr *ershdr;
3059 +- struct erspan_metadata *pkt_md;
3060 + const struct ipv6hdr *ipv6h;
3061 + struct erspan_md2 *md2;
3062 + struct ip6_tnl *tunnel;
3063 +@@ -563,18 +562,16 @@ static int ip6erspan_rcv(struct sk_buff *skb, int gre_hdr_len,
3064 + if (unlikely(!pskb_may_pull(skb, len)))
3065 + return PACKET_REJECT;
3066 +
3067 +- ershdr = (struct erspan_base_hdr *)skb->data;
3068 +- pkt_md = (struct erspan_metadata *)(ershdr + 1);
3069 +-
3070 + if (__iptunnel_pull_header(skb, len,
3071 + htons(ETH_P_TEB),
3072 + false, false) < 0)
3073 + return PACKET_REJECT;
3074 +
3075 + if (tunnel->parms.collect_md) {
3076 ++ struct erspan_metadata *pkt_md, *md;
3077 + struct metadata_dst *tun_dst;
3078 + struct ip_tunnel_info *info;
3079 +- struct erspan_metadata *md;
3080 ++ unsigned char *gh;
3081 + __be64 tun_id;
3082 + __be16 flags;
3083 +
3084 +@@ -587,6 +584,14 @@ static int ip6erspan_rcv(struct sk_buff *skb, int gre_hdr_len,
3085 + if (!tun_dst)
3086 + return PACKET_REJECT;
3087 +
3088 ++ /* skb can be uncloned in __iptunnel_pull_header, so
3089 ++ * old pkt_md is no longer valid and we need to reset
3090 ++ * it
3091 ++ */
3092 ++ gh = skb_network_header(skb) +
3093 ++ skb_network_header_len(skb);
3094 ++ pkt_md = (struct erspan_metadata *)(gh + gre_hdr_len +
3095 ++ sizeof(*ershdr));
3096 + info = &tun_dst->u.tun_info;
3097 + md = ip_tunnel_info_opts(info);
3098 + md->version = ver;
3099 +@@ -623,7 +628,7 @@ static int gre_rcv(struct sk_buff *skb)
3100 +
3101 + if (unlikely(tpi.proto == htons(ETH_P_ERSPAN) ||
3102 + tpi.proto == htons(ETH_P_ERSPAN2))) {
3103 +- if (ip6erspan_rcv(skb, hdr_len, &tpi) == PACKET_RCVD)
3104 ++ if (ip6erspan_rcv(skb, &tpi, hdr_len) == PACKET_RCVD)
3105 + return 0;
3106 + goto out;
3107 + }
3108 +diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
3109 +index 0bb87f3a10c7..eed9231c90ad 100644
3110 +--- a/net/ipv6/ip6_output.c
3111 ++++ b/net/ipv6/ip6_output.c
3112 +@@ -587,7 +587,7 @@ int ip6_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
3113 + inet6_sk(skb->sk) : NULL;
3114 + struct ipv6hdr *tmp_hdr;
3115 + struct frag_hdr *fh;
3116 +- unsigned int mtu, hlen, left, len;
3117 ++ unsigned int mtu, hlen, left, len, nexthdr_offset;
3118 + int hroom, troom;
3119 + __be32 frag_id;
3120 + int ptr, offset = 0, err = 0;
3121 +@@ -598,6 +598,7 @@ int ip6_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
3122 + goto fail;
3123 + hlen = err;
3124 + nexthdr = *prevhdr;
3125 ++ nexthdr_offset = prevhdr - skb_network_header(skb);
3126 +
3127 + mtu = ip6_skb_dst_mtu(skb);
3128 +
3129 +@@ -632,6 +633,7 @@ int ip6_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
3130 + (err = skb_checksum_help(skb)))
3131 + goto fail;
3132 +
3133 ++ prevhdr = skb_network_header(skb) + nexthdr_offset;
3134 + hroom = LL_RESERVED_SPACE(rt->dst.dev);
3135 + if (skb_has_frag_list(skb)) {
3136 + unsigned int first_len = skb_pagelen(skb);
3137 +diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
3138 +index 0c6403cf8b52..ade1390c6348 100644
3139 +--- a/net/ipv6/ip6_tunnel.c
3140 ++++ b/net/ipv6/ip6_tunnel.c
3141 +@@ -627,7 +627,7 @@ ip4ip6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
3142 + rt = ip_route_output_ports(dev_net(skb->dev), &fl4, NULL,
3143 + eiph->daddr, eiph->saddr, 0, 0,
3144 + IPPROTO_IPIP, RT_TOS(eiph->tos), 0);
3145 +- if (IS_ERR(rt) || rt->dst.dev->type != ARPHRD_TUNNEL) {
3146 ++ if (IS_ERR(rt) || rt->dst.dev->type != ARPHRD_TUNNEL6) {
3147 + if (!IS_ERR(rt))
3148 + ip_rt_put(rt);
3149 + goto out;
3150 +@@ -636,7 +636,7 @@ ip4ip6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
3151 + } else {
3152 + if (ip_route_input(skb2, eiph->daddr, eiph->saddr, eiph->tos,
3153 + skb2->dev) ||
3154 +- skb_dst(skb2)->dev->type != ARPHRD_TUNNEL)
3155 ++ skb_dst(skb2)->dev->type != ARPHRD_TUNNEL6)
3156 + goto out;
3157 + }
3158 +
3159 +diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
3160 +index de9aa5cb295c..8f6cf8e6b5c1 100644
3161 +--- a/net/ipv6/sit.c
3162 ++++ b/net/ipv6/sit.c
3163 +@@ -669,6 +669,10 @@ static int ipip6_rcv(struct sk_buff *skb)
3164 + !net_eq(tunnel->net, dev_net(tunnel->dev))))
3165 + goto out;
3166 +
3167 ++ /* skb can be uncloned in iptunnel_pull_header, so
3168 ++ * old iph is no longer valid
3169 ++ */
3170 ++ iph = (const struct iphdr *)skb_mac_header(skb);
3171 + err = IP_ECN_decapsulate(iph, skb);
3172 + if (unlikely(err)) {
3173 + if (log_ecn_error)
3174 +diff --git a/net/kcm/kcmsock.c b/net/kcm/kcmsock.c
3175 +index 571d824e4e24..b919db02c7f9 100644
3176 +--- a/net/kcm/kcmsock.c
3177 ++++ b/net/kcm/kcmsock.c
3178 +@@ -2054,14 +2054,14 @@ static int __init kcm_init(void)
3179 + if (err)
3180 + goto fail;
3181 +
3182 +- err = sock_register(&kcm_family_ops);
3183 +- if (err)
3184 +- goto sock_register_fail;
3185 +-
3186 + err = register_pernet_device(&kcm_net_ops);
3187 + if (err)
3188 + goto net_ops_fail;
3189 +
3190 ++ err = sock_register(&kcm_family_ops);
3191 ++ if (err)
3192 ++ goto sock_register_fail;
3193 ++
3194 + err = kcm_proc_init();
3195 + if (err)
3196 + goto proc_init_fail;
3197 +@@ -2069,12 +2069,12 @@ static int __init kcm_init(void)
3198 + return 0;
3199 +
3200 + proc_init_fail:
3201 +- unregister_pernet_device(&kcm_net_ops);
3202 +-
3203 +-net_ops_fail:
3204 + sock_unregister(PF_KCM);
3205 +
3206 + sock_register_fail:
3207 ++ unregister_pernet_device(&kcm_net_ops);
3208 ++
3209 ++net_ops_fail:
3210 + proto_unregister(&kcm_proto);
3211 +
3212 + fail:
3213 +@@ -2090,8 +2090,8 @@ fail:
3214 + static void __exit kcm_exit(void)
3215 + {
3216 + kcm_proc_exit();
3217 +- unregister_pernet_device(&kcm_net_ops);
3218 + sock_unregister(PF_KCM);
3219 ++ unregister_pernet_device(&kcm_net_ops);
3220 + proto_unregister(&kcm_proto);
3221 + destroy_workqueue(kcm_wq);
3222 +
3223 +diff --git a/net/netfilter/nf_conntrack_proto_gre.c b/net/netfilter/nf_conntrack_proto_gre.c
3224 +index 650eb4fba2c5..841c472aae1c 100644
3225 +--- a/net/netfilter/nf_conntrack_proto_gre.c
3226 ++++ b/net/netfilter/nf_conntrack_proto_gre.c
3227 +@@ -43,24 +43,12 @@
3228 + #include <linux/netfilter/nf_conntrack_proto_gre.h>
3229 + #include <linux/netfilter/nf_conntrack_pptp.h>
3230 +
3231 +-enum grep_conntrack {
3232 +- GRE_CT_UNREPLIED,
3233 +- GRE_CT_REPLIED,
3234 +- GRE_CT_MAX
3235 +-};
3236 +-
3237 + static const unsigned int gre_timeouts[GRE_CT_MAX] = {
3238 + [GRE_CT_UNREPLIED] = 30*HZ,
3239 + [GRE_CT_REPLIED] = 180*HZ,
3240 + };
3241 +
3242 + static unsigned int proto_gre_net_id __read_mostly;
3243 +-struct netns_proto_gre {
3244 +- struct nf_proto_net nf;
3245 +- rwlock_t keymap_lock;
3246 +- struct list_head keymap_list;
3247 +- unsigned int gre_timeouts[GRE_CT_MAX];
3248 +-};
3249 +
3250 + static inline struct netns_proto_gre *gre_pernet(struct net *net)
3251 + {
3252 +@@ -408,6 +396,8 @@ static int __init nf_ct_proto_gre_init(void)
3253 + {
3254 + int ret;
3255 +
3256 ++ BUILD_BUG_ON(offsetof(struct netns_proto_gre, nf) != 0);
3257 ++
3258 + ret = register_pernet_subsys(&proto_gre_net_ops);
3259 + if (ret < 0)
3260 + goto out_pernet;
3261 +diff --git a/net/netfilter/nfnetlink_cttimeout.c b/net/netfilter/nfnetlink_cttimeout.c
3262 +index a30f8ba4b89a..70a7382b9787 100644
3263 +--- a/net/netfilter/nfnetlink_cttimeout.c
3264 ++++ b/net/netfilter/nfnetlink_cttimeout.c
3265 +@@ -392,7 +392,8 @@ err:
3266 + static int
3267 + cttimeout_default_fill_info(struct net *net, struct sk_buff *skb, u32 portid,
3268 + u32 seq, u32 type, int event,
3269 +- const struct nf_conntrack_l4proto *l4proto)
3270 ++ const struct nf_conntrack_l4proto *l4proto,
3271 ++ const unsigned int *timeouts)
3272 + {
3273 + struct nlmsghdr *nlh;
3274 + struct nfgenmsg *nfmsg;
3275 +@@ -421,7 +422,7 @@ cttimeout_default_fill_info(struct net *net, struct sk_buff *skb, u32 portid,
3276 + if (!nest_parms)
3277 + goto nla_put_failure;
3278 +
3279 +- ret = l4proto->ctnl_timeout.obj_to_nlattr(skb, NULL);
3280 ++ ret = l4proto->ctnl_timeout.obj_to_nlattr(skb, timeouts);
3281 + if (ret < 0)
3282 + goto nla_put_failure;
3283 +
3284 +@@ -444,6 +445,7 @@ static int cttimeout_default_get(struct net *net, struct sock *ctnl,
3285 + struct netlink_ext_ack *extack)
3286 + {
3287 + const struct nf_conntrack_l4proto *l4proto;
3288 ++ unsigned int *timeouts = NULL;
3289 + struct sk_buff *skb2;
3290 + int ret, err;
3291 + __u16 l3num;
3292 +@@ -456,12 +458,55 @@ static int cttimeout_default_get(struct net *net, struct sock *ctnl,
3293 + l4num = nla_get_u8(cda[CTA_TIMEOUT_L4PROTO]);
3294 + l4proto = nf_ct_l4proto_find_get(l3num, l4num);
3295 +
3296 +- /* This protocol is not supported, skip. */
3297 +- if (l4proto->l4proto != l4num) {
3298 +- err = -EOPNOTSUPP;
3299 ++ err = -EOPNOTSUPP;
3300 ++ if (l4proto->l4proto != l4num)
3301 + goto err;
3302 ++
3303 ++ switch (l4proto->l4proto) {
3304 ++ case IPPROTO_ICMP:
3305 ++ timeouts = &net->ct.nf_ct_proto.icmp.timeout;
3306 ++ break;
3307 ++ case IPPROTO_TCP:
3308 ++ timeouts = net->ct.nf_ct_proto.tcp.timeouts;
3309 ++ break;
3310 ++ case IPPROTO_UDP: /* fallthrough */
3311 ++ case IPPROTO_UDPLITE:
3312 ++ timeouts = net->ct.nf_ct_proto.udp.timeouts;
3313 ++ break;
3314 ++ case IPPROTO_DCCP:
3315 ++#ifdef CONFIG_NF_CT_PROTO_DCCP
3316 ++ timeouts = net->ct.nf_ct_proto.dccp.dccp_timeout;
3317 ++#endif
3318 ++ break;
3319 ++ case IPPROTO_ICMPV6:
3320 ++ timeouts = &net->ct.nf_ct_proto.icmpv6.timeout;
3321 ++ break;
3322 ++ case IPPROTO_SCTP:
3323 ++#ifdef CONFIG_NF_CT_PROTO_SCTP
3324 ++ timeouts = net->ct.nf_ct_proto.sctp.timeouts;
3325 ++#endif
3326 ++ break;
3327 ++ case IPPROTO_GRE:
3328 ++#ifdef CONFIG_NF_CT_PROTO_GRE
3329 ++ if (l4proto->net_id) {
3330 ++ struct netns_proto_gre *net_gre;
3331 ++
3332 ++ net_gre = net_generic(net, *l4proto->net_id);
3333 ++ timeouts = net_gre->gre_timeouts;
3334 ++ }
3335 ++#endif
3336 ++ break;
3337 ++ case 255:
3338 ++ timeouts = &net->ct.nf_ct_proto.generic.timeout;
3339 ++ break;
3340 ++ default:
3341 ++ WARN_ONCE(1, "Missing timeouts for proto %d", l4proto->l4proto);
3342 ++ break;
3343 + }
3344 +
3345 ++ if (!timeouts)
3346 ++ goto err;
3347 ++
3348 + skb2 = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
3349 + if (skb2 == NULL) {
3350 + err = -ENOMEM;
3351 +@@ -472,7 +517,7 @@ static int cttimeout_default_get(struct net *net, struct sock *ctnl,
3352 + nlh->nlmsg_seq,
3353 + NFNL_MSG_TYPE(nlh->nlmsg_type),
3354 + IPCTNL_MSG_TIMEOUT_DEFAULT_SET,
3355 +- l4proto);
3356 ++ l4proto, timeouts);
3357 + if (ret <= 0) {
3358 + kfree_skb(skb2);
3359 + err = -ENOMEM;
3360 +diff --git a/net/openvswitch/flow_netlink.c b/net/openvswitch/flow_netlink.c
3361 +index c7b6010b2c09..eab5e8eaddaa 100644
3362 +--- a/net/openvswitch/flow_netlink.c
3363 ++++ b/net/openvswitch/flow_netlink.c
3364 +@@ -2306,14 +2306,14 @@ static struct nlattr *reserve_sfa_size(struct sw_flow_actions **sfa,
3365 +
3366 + struct sw_flow_actions *acts;
3367 + int new_acts_size;
3368 +- int req_size = NLA_ALIGN(attr_len);
3369 ++ size_t req_size = NLA_ALIGN(attr_len);
3370 + int next_offset = offsetof(struct sw_flow_actions, actions) +
3371 + (*sfa)->actions_len;
3372 +
3373 + if (req_size <= (ksize(*sfa) - next_offset))
3374 + goto out;
3375 +
3376 +- new_acts_size = ksize(*sfa) * 2;
3377 ++ new_acts_size = max(next_offset + req_size, ksize(*sfa) * 2);
3378 +
3379 + if (new_acts_size > MAX_ACTIONS_BUFSIZE) {
3380 + if ((MAX_ACTIONS_BUFSIZE - next_offset) < req_size) {
3381 +diff --git a/net/rds/tcp.c b/net/rds/tcp.c
3382 +index b9bbcf3d6c63..18bb522df282 100644
3383 +--- a/net/rds/tcp.c
3384 ++++ b/net/rds/tcp.c
3385 +@@ -600,7 +600,7 @@ static void rds_tcp_kill_sock(struct net *net)
3386 + list_for_each_entry_safe(tc, _tc, &rds_tcp_conn_list, t_tcp_node) {
3387 + struct net *c_net = read_pnet(&tc->t_cpath->cp_conn->c_net);
3388 +
3389 +- if (net != c_net || !tc->t_sock)
3390 ++ if (net != c_net)
3391 + continue;
3392 + if (!list_has_conn(&tmp_list, tc->t_cpath->cp_conn)) {
3393 + list_move_tail(&tc->t_tcp_node, &tmp_list);
3394 +diff --git a/net/sched/act_sample.c b/net/sched/act_sample.c
3395 +index 6b67aa13d2dd..c7f5d630d97c 100644
3396 +--- a/net/sched/act_sample.c
3397 ++++ b/net/sched/act_sample.c
3398 +@@ -43,8 +43,8 @@ static int tcf_sample_init(struct net *net, struct nlattr *nla,
3399 + struct tc_action_net *tn = net_generic(net, sample_net_id);
3400 + struct nlattr *tb[TCA_SAMPLE_MAX + 1];
3401 + struct psample_group *psample_group;
3402 ++ u32 psample_group_num, rate;
3403 + struct tc_sample *parm;
3404 +- u32 psample_group_num;
3405 + struct tcf_sample *s;
3406 + bool exists = false;
3407 + int ret, err;
3408 +@@ -80,6 +80,12 @@ static int tcf_sample_init(struct net *net, struct nlattr *nla,
3409 + return -EEXIST;
3410 + }
3411 +
3412 ++ rate = nla_get_u32(tb[TCA_SAMPLE_RATE]);
3413 ++ if (!rate) {
3414 ++ NL_SET_ERR_MSG(extack, "invalid sample rate");
3415 ++ tcf_idr_release(*a, bind);
3416 ++ return -EINVAL;
3417 ++ }
3418 + psample_group_num = nla_get_u32(tb[TCA_SAMPLE_PSAMPLE_GROUP]);
3419 + psample_group = psample_group_get(net, psample_group_num);
3420 + if (!psample_group) {
3421 +@@ -91,7 +97,7 @@ static int tcf_sample_init(struct net *net, struct nlattr *nla,
3422 +
3423 + spin_lock_bh(&s->tcf_lock);
3424 + s->tcf_action = parm->action;
3425 +- s->rate = nla_get_u32(tb[TCA_SAMPLE_RATE]);
3426 ++ s->rate = rate;
3427 + s->psample_group_num = psample_group_num;
3428 + RCU_INIT_POINTER(s->psample_group, psample_group);
3429 +
3430 +diff --git a/net/sched/cls_matchall.c b/net/sched/cls_matchall.c
3431 +index 856fa79d4ffd..621bc1d5b057 100644
3432 +--- a/net/sched/cls_matchall.c
3433 ++++ b/net/sched/cls_matchall.c
3434 +@@ -126,6 +126,11 @@ static void mall_destroy(struct tcf_proto *tp, struct netlink_ext_ack *extack)
3435 +
3436 + static void *mall_get(struct tcf_proto *tp, u32 handle)
3437 + {
3438 ++ struct cls_mall_head *head = rtnl_dereference(tp->root);
3439 ++
3440 ++ if (head && head->handle == handle)
3441 ++ return head;
3442 ++
3443 + return NULL;
3444 + }
3445 +
3446 +diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c
3447 +index 1c9f079e8a50..d97b2b4b7a8b 100644
3448 +--- a/net/sctp/protocol.c
3449 ++++ b/net/sctp/protocol.c
3450 +@@ -600,6 +600,7 @@ out:
3451 + static int sctp_v4_addr_to_user(struct sctp_sock *sp, union sctp_addr *addr)
3452 + {
3453 + /* No address mapping for V4 sockets */
3454 ++ memset(addr->v4.sin_zero, 0, sizeof(addr->v4.sin_zero));
3455 + return sizeof(struct sockaddr_in);
3456 + }
3457 +
3458 +diff --git a/scripts/package/builddeb b/scripts/package/builddeb
3459 +index 90c9a8ac7adb..0b31f4f1f92c 100755
3460 +--- a/scripts/package/builddeb
3461 ++++ b/scripts/package/builddeb
3462 +@@ -81,7 +81,7 @@ else
3463 + cp System.map "$tmpdir/boot/System.map-$version"
3464 + cp $KCONFIG_CONFIG "$tmpdir/boot/config-$version"
3465 + fi
3466 +-cp "$($MAKE -s image_name)" "$tmpdir/$installed_image_path"
3467 ++cp "$($MAKE -s -f $srctree/Makefile image_name)" "$tmpdir/$installed_image_path"
3468 +
3469 + if grep -q "^CONFIG_OF=y" $KCONFIG_CONFIG ; then
3470 + # Only some architectures with OF support have this target
3471 +diff --git a/sound/core/seq/seq_clientmgr.c b/sound/core/seq/seq_clientmgr.c
3472 +index 92e6524a3a9d..b55cb96d1fed 100644
3473 +--- a/sound/core/seq/seq_clientmgr.c
3474 ++++ b/sound/core/seq/seq_clientmgr.c
3475 +@@ -1252,7 +1252,7 @@ static int snd_seq_ioctl_set_client_info(struct snd_seq_client *client,
3476 +
3477 + /* fill the info fields */
3478 + if (client_info->name[0])
3479 +- strlcpy(client->name, client_info->name, sizeof(client->name));
3480 ++ strscpy(client->name, client_info->name, sizeof(client->name));
3481 +
3482 + client->filter = client_info->filter;
3483 + client->event_lost = client_info->event_lost;
3484 +@@ -1530,7 +1530,7 @@ static int snd_seq_ioctl_create_queue(struct snd_seq_client *client, void *arg)
3485 + /* set queue name */
3486 + if (!info->name[0])
3487 + snprintf(info->name, sizeof(info->name), "Queue-%d", q->queue);
3488 +- strlcpy(q->name, info->name, sizeof(q->name));
3489 ++ strscpy(q->name, info->name, sizeof(q->name));
3490 + snd_use_lock_free(&q->use_lock);
3491 +
3492 + return 0;
3493 +@@ -1592,7 +1592,7 @@ static int snd_seq_ioctl_set_queue_info(struct snd_seq_client *client,
3494 + queuefree(q);
3495 + return -EPERM;
3496 + }
3497 +- strlcpy(q->name, info->name, sizeof(q->name));
3498 ++ strscpy(q->name, info->name, sizeof(q->name));
3499 + queuefree(q);
3500 +
3501 + return 0;
3502 +diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
3503 +index 7572b8cc7127..9bc8a7cb40ea 100644
3504 +--- a/sound/pci/hda/hda_intel.c
3505 ++++ b/sound/pci/hda/hda_intel.c
3506 +@@ -2272,6 +2272,8 @@ static struct snd_pci_quirk power_save_blacklist[] = {
3507 + SND_PCI_QUIRK(0x8086, 0x2040, "Intel DZ77BH-55K", 0),
3508 + /* https://bugzilla.kernel.org/show_bug.cgi?id=199607 */
3509 + SND_PCI_QUIRK(0x8086, 0x2057, "Intel NUC5i7RYB", 0),
3510 ++ /* https://bugs.launchpad.net/bugs/1821663 */
3511 ++ SND_PCI_QUIRK(0x8086, 0x2064, "Intel SDP 8086:2064", 0),
3512 + /* https://bugzilla.redhat.com/show_bug.cgi?id=1520902 */
3513 + SND_PCI_QUIRK(0x8086, 0x2068, "Intel NUC7i3BNB", 0),
3514 + /* https://bugzilla.kernel.org/show_bug.cgi?id=198611 */
3515 +@@ -2280,6 +2282,8 @@ static struct snd_pci_quirk power_save_blacklist[] = {
3516 + SND_PCI_QUIRK(0x17aa, 0x367b, "Lenovo IdeaCentre B550", 0),
3517 + /* https://bugzilla.redhat.com/show_bug.cgi?id=1572975 */
3518 + SND_PCI_QUIRK(0x17aa, 0x36a7, "Lenovo C50 All in one", 0),
3519 ++ /* https://bugs.launchpad.net/bugs/1821663 */
3520 ++ SND_PCI_QUIRK(0x1631, 0xe017, "Packard Bell NEC IMEDIA 5204", 0),
3521 + {}
3522 + };
3523 + #endif /* CONFIG_PM */
3524 +diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
3525 +index 4c6321ec844d..b9d832bde23e 100644
3526 +--- a/sound/pci/hda/patch_realtek.c
3527 ++++ b/sound/pci/hda/patch_realtek.c
3528 +@@ -1864,8 +1864,8 @@ enum {
3529 + ALC887_FIXUP_BASS_CHMAP,
3530 + ALC1220_FIXUP_GB_DUAL_CODECS,
3531 + ALC1220_FIXUP_CLEVO_P950,
3532 +- ALC1220_FIXUP_SYSTEM76_ORYP5,
3533 +- ALC1220_FIXUP_SYSTEM76_ORYP5_PINS,
3534 ++ ALC1220_FIXUP_CLEVO_PB51ED,
3535 ++ ALC1220_FIXUP_CLEVO_PB51ED_PINS,
3536 + };
3537 +
3538 + static void alc889_fixup_coef(struct hda_codec *codec,
3539 +@@ -2070,7 +2070,7 @@ static void alc1220_fixup_clevo_p950(struct hda_codec *codec,
3540 + static void alc_fixup_headset_mode_no_hp_mic(struct hda_codec *codec,
3541 + const struct hda_fixup *fix, int action);
3542 +
3543 +-static void alc1220_fixup_system76_oryp5(struct hda_codec *codec,
3544 ++static void alc1220_fixup_clevo_pb51ed(struct hda_codec *codec,
3545 + const struct hda_fixup *fix,
3546 + int action)
3547 + {
3548 +@@ -2322,18 +2322,18 @@ static const struct hda_fixup alc882_fixups[] = {
3549 + .type = HDA_FIXUP_FUNC,
3550 + .v.func = alc1220_fixup_clevo_p950,
3551 + },
3552 +- [ALC1220_FIXUP_SYSTEM76_ORYP5] = {
3553 ++ [ALC1220_FIXUP_CLEVO_PB51ED] = {
3554 + .type = HDA_FIXUP_FUNC,
3555 +- .v.func = alc1220_fixup_system76_oryp5,
3556 ++ .v.func = alc1220_fixup_clevo_pb51ed,
3557 + },
3558 +- [ALC1220_FIXUP_SYSTEM76_ORYP5_PINS] = {
3559 ++ [ALC1220_FIXUP_CLEVO_PB51ED_PINS] = {
3560 + .type = HDA_FIXUP_PINS,
3561 + .v.pins = (const struct hda_pintbl[]) {
3562 + { 0x19, 0x01a1913c }, /* use as headset mic, without its own jack detect */
3563 + {}
3564 + },
3565 + .chained = true,
3566 +- .chain_id = ALC1220_FIXUP_SYSTEM76_ORYP5,
3567 ++ .chain_id = ALC1220_FIXUP_CLEVO_PB51ED,
3568 + },
3569 + };
3570 +
3571 +@@ -2411,8 +2411,9 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = {
3572 + SND_PCI_QUIRK(0x1558, 0x9501, "Clevo P950HR", ALC1220_FIXUP_CLEVO_P950),
3573 + SND_PCI_QUIRK(0x1558, 0x95e1, "Clevo P95xER", ALC1220_FIXUP_CLEVO_P950),
3574 + SND_PCI_QUIRK(0x1558, 0x95e2, "Clevo P950ER", ALC1220_FIXUP_CLEVO_P950),
3575 +- SND_PCI_QUIRK(0x1558, 0x96e1, "System76 Oryx Pro (oryp5)", ALC1220_FIXUP_SYSTEM76_ORYP5_PINS),
3576 +- SND_PCI_QUIRK(0x1558, 0x97e1, "System76 Oryx Pro (oryp5)", ALC1220_FIXUP_SYSTEM76_ORYP5_PINS),
3577 ++ SND_PCI_QUIRK(0x1558, 0x96e1, "System76 Oryx Pro (oryp5)", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
3578 ++ SND_PCI_QUIRK(0x1558, 0x97e1, "System76 Oryx Pro (oryp5)", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
3579 ++ SND_PCI_QUIRK(0x1558, 0x65d1, "Tuxedo Book XC1509", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
3580 + SND_PCI_QUIRK_VENDOR(0x1558, "Clevo laptop", ALC882_FIXUP_EAPD),
3581 + SND_PCI_QUIRK(0x161f, 0x2054, "Medion laptop", ALC883_FIXUP_EAPD),
3582 + SND_PCI_QUIRK(0x17aa, 0x3a0d, "Lenovo Y530", ALC882_FIXUP_LENOVO_Y530),
3583 +@@ -5594,6 +5595,7 @@ enum {
3584 + ALC233_FIXUP_ASUS_MIC_NO_PRESENCE,
3585 + ALC233_FIXUP_EAPD_COEF_AND_MIC_NO_PRESENCE,
3586 + ALC233_FIXUP_LENOVO_MULTI_CODECS,
3587 ++ ALC233_FIXUP_ACER_HEADSET_MIC,
3588 + ALC294_FIXUP_LENOVO_MIC_LOCATION,
3589 + ALC225_FIXUP_DELL_WYSE_MIC_NO_PRESENCE,
3590 + ALC700_FIXUP_INTEL_REFERENCE,
3591 +@@ -6401,6 +6403,16 @@ static const struct hda_fixup alc269_fixups[] = {
3592 + .type = HDA_FIXUP_FUNC,
3593 + .v.func = alc233_alc662_fixup_lenovo_dual_codecs,
3594 + },
3595 ++ [ALC233_FIXUP_ACER_HEADSET_MIC] = {
3596 ++ .type = HDA_FIXUP_VERBS,
3597 ++ .v.verbs = (const struct hda_verb[]) {
3598 ++ { 0x20, AC_VERB_SET_COEF_INDEX, 0x45 },
3599 ++ { 0x20, AC_VERB_SET_PROC_COEF, 0x5089 },
3600 ++ { }
3601 ++ },
3602 ++ .chained = true,
3603 ++ .chain_id = ALC233_FIXUP_ASUS_MIC_NO_PRESENCE
3604 ++ },
3605 + [ALC294_FIXUP_LENOVO_MIC_LOCATION] = {
3606 + .type = HDA_FIXUP_PINS,
3607 + .v.pins = (const struct hda_pintbl[]) {
3608 +@@ -6644,6 +6656,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
3609 + SND_PCI_QUIRK(0x1025, 0x1290, "Acer Veriton Z4860G", ALC286_FIXUP_ACER_AIO_HEADSET_MIC),
3610 + SND_PCI_QUIRK(0x1025, 0x1291, "Acer Veriton Z4660G", ALC286_FIXUP_ACER_AIO_HEADSET_MIC),
3611 + SND_PCI_QUIRK(0x1025, 0x1308, "Acer Aspire Z24-890", ALC286_FIXUP_ACER_AIO_HEADSET_MIC),
3612 ++ SND_PCI_QUIRK(0x1025, 0x132a, "Acer TravelMate B114-21", ALC233_FIXUP_ACER_HEADSET_MIC),
3613 + SND_PCI_QUIRK(0x1025, 0x1330, "Acer TravelMate X514-51T", ALC255_FIXUP_ACER_HEADSET_MIC),
3614 + SND_PCI_QUIRK(0x1028, 0x0470, "Dell M101z", ALC269_FIXUP_DELL_M101Z),
3615 + SND_PCI_QUIRK(0x1028, 0x054b, "Dell XPS one 2710", ALC275_FIXUP_DELL_XPS),
3616 +diff --git a/sound/soc/fsl/fsl_esai.c b/sound/soc/fsl/fsl_esai.c
3617 +index 4daefa5b150a..38fd32ab443c 100644
3618 +--- a/sound/soc/fsl/fsl_esai.c
3619 ++++ b/sound/soc/fsl/fsl_esai.c
3620 +@@ -54,6 +54,8 @@ struct fsl_esai {
3621 + u32 fifo_depth;
3622 + u32 slot_width;
3623 + u32 slots;
3624 ++ u32 tx_mask;
3625 ++ u32 rx_mask;
3626 + u32 hck_rate[2];
3627 + u32 sck_rate[2];
3628 + bool hck_dir[2];
3629 +@@ -361,21 +363,13 @@ static int fsl_esai_set_dai_tdm_slot(struct snd_soc_dai *dai, u32 tx_mask,
3630 + regmap_update_bits(esai_priv->regmap, REG_ESAI_TCCR,
3631 + ESAI_xCCR_xDC_MASK, ESAI_xCCR_xDC(slots));
3632 +
3633 +- regmap_update_bits(esai_priv->regmap, REG_ESAI_TSMA,
3634 +- ESAI_xSMA_xS_MASK, ESAI_xSMA_xS(tx_mask));
3635 +- regmap_update_bits(esai_priv->regmap, REG_ESAI_TSMB,
3636 +- ESAI_xSMB_xS_MASK, ESAI_xSMB_xS(tx_mask));
3637 +-
3638 + regmap_update_bits(esai_priv->regmap, REG_ESAI_RCCR,
3639 + ESAI_xCCR_xDC_MASK, ESAI_xCCR_xDC(slots));
3640 +
3641 +- regmap_update_bits(esai_priv->regmap, REG_ESAI_RSMA,
3642 +- ESAI_xSMA_xS_MASK, ESAI_xSMA_xS(rx_mask));
3643 +- regmap_update_bits(esai_priv->regmap, REG_ESAI_RSMB,
3644 +- ESAI_xSMB_xS_MASK, ESAI_xSMB_xS(rx_mask));
3645 +-
3646 + esai_priv->slot_width = slot_width;
3647 + esai_priv->slots = slots;
3648 ++ esai_priv->tx_mask = tx_mask;
3649 ++ esai_priv->rx_mask = rx_mask;
3650 +
3651 + return 0;
3652 + }
3653 +@@ -596,6 +590,7 @@ static int fsl_esai_trigger(struct snd_pcm_substream *substream, int cmd,
3654 + bool tx = substream->stream == SNDRV_PCM_STREAM_PLAYBACK;
3655 + u8 i, channels = substream->runtime->channels;
3656 + u32 pins = DIV_ROUND_UP(channels, esai_priv->slots);
3657 ++ u32 mask;
3658 +
3659 + switch (cmd) {
3660 + case SNDRV_PCM_TRIGGER_START:
3661 +@@ -608,15 +603,38 @@ static int fsl_esai_trigger(struct snd_pcm_substream *substream, int cmd,
3662 + for (i = 0; tx && i < channels; i++)
3663 + regmap_write(esai_priv->regmap, REG_ESAI_ETDR, 0x0);
3664 +
3665 ++ /*
3666 ++ * When set the TE/RE in the end of enablement flow, there
3667 ++ * will be channel swap issue for multi data line case.
3668 ++ * In order to workaround this issue, we switch the bit
3669 ++ * enablement sequence to below sequence
3670 ++ * 1) clear the xSMB & xSMA: which is done in probe and
3671 ++ * stop state.
3672 ++ * 2) set TE/RE
3673 ++ * 3) set xSMB
3674 ++ * 4) set xSMA: xSMA is the last one in this flow, which
3675 ++ * will trigger esai to start.
3676 ++ */
3677 + regmap_update_bits(esai_priv->regmap, REG_ESAI_xCR(tx),
3678 + tx ? ESAI_xCR_TE_MASK : ESAI_xCR_RE_MASK,
3679 + tx ? ESAI_xCR_TE(pins) : ESAI_xCR_RE(pins));
3680 ++ mask = tx ? esai_priv->tx_mask : esai_priv->rx_mask;
3681 ++
3682 ++ regmap_update_bits(esai_priv->regmap, REG_ESAI_xSMB(tx),
3683 ++ ESAI_xSMB_xS_MASK, ESAI_xSMB_xS(mask));
3684 ++ regmap_update_bits(esai_priv->regmap, REG_ESAI_xSMA(tx),
3685 ++ ESAI_xSMA_xS_MASK, ESAI_xSMA_xS(mask));
3686 ++
3687 + break;
3688 + case SNDRV_PCM_TRIGGER_SUSPEND:
3689 + case SNDRV_PCM_TRIGGER_STOP:
3690 + case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
3691 + regmap_update_bits(esai_priv->regmap, REG_ESAI_xCR(tx),
3692 + tx ? ESAI_xCR_TE_MASK : ESAI_xCR_RE_MASK, 0);
3693 ++ regmap_update_bits(esai_priv->regmap, REG_ESAI_xSMA(tx),
3694 ++ ESAI_xSMA_xS_MASK, 0);
3695 ++ regmap_update_bits(esai_priv->regmap, REG_ESAI_xSMB(tx),
3696 ++ ESAI_xSMB_xS_MASK, 0);
3697 +
3698 + /* Disable and reset FIFO */
3699 + regmap_update_bits(esai_priv->regmap, REG_ESAI_xFCR(tx),
3700 +@@ -906,6 +924,15 @@ static int fsl_esai_probe(struct platform_device *pdev)
3701 + return ret;
3702 + }
3703 +
3704 ++ esai_priv->tx_mask = 0xFFFFFFFF;
3705 ++ esai_priv->rx_mask = 0xFFFFFFFF;
3706 ++
3707 ++ /* Clear the TSMA, TSMB, RSMA, RSMB */
3708 ++ regmap_write(esai_priv->regmap, REG_ESAI_TSMA, 0);
3709 ++ regmap_write(esai_priv->regmap, REG_ESAI_TSMB, 0);
3710 ++ regmap_write(esai_priv->regmap, REG_ESAI_RSMA, 0);
3711 ++ regmap_write(esai_priv->regmap, REG_ESAI_RSMB, 0);
3712 ++
3713 + ret = devm_snd_soc_register_component(&pdev->dev, &fsl_esai_component,
3714 + &fsl_esai_dai, 1);
3715 + if (ret) {
3716 +diff --git a/sound/soc/intel/atom/sst-mfld-platform-pcm.c b/sound/soc/intel/atom/sst-mfld-platform-pcm.c
3717 +index e662400873ec..6868e71e3a3f 100644
3718 +--- a/sound/soc/intel/atom/sst-mfld-platform-pcm.c
3719 ++++ b/sound/soc/intel/atom/sst-mfld-platform-pcm.c
3720 +@@ -711,9 +711,17 @@ static int sst_soc_probe(struct snd_soc_component *component)
3721 + return sst_dsp_init_v2_dpcm(component);
3722 + }
3723 +
3724 ++static void sst_soc_remove(struct snd_soc_component *component)
3725 ++{
3726 ++ struct sst_data *drv = dev_get_drvdata(component->dev);
3727 ++
3728 ++ drv->soc_card = NULL;
3729 ++}
3730 ++
3731 + static const struct snd_soc_component_driver sst_soc_platform_drv = {
3732 + .name = DRV_NAME,
3733 + .probe = sst_soc_probe,
3734 ++ .remove = sst_soc_remove,
3735 + .ops = &sst_platform_ops,
3736 + .compr_ops = &sst_platform_compr_ops,
3737 + .pcm_new = sst_pcm_new,
3738 +diff --git a/tools/testing/selftests/tc-testing/tc-tests/actions/sample.json b/tools/testing/selftests/tc-testing/tc-tests/actions/sample.json
3739 +index 3aca33c00039..618def9bdf0e 100644
3740 +--- a/tools/testing/selftests/tc-testing/tc-tests/actions/sample.json
3741 ++++ b/tools/testing/selftests/tc-testing/tc-tests/actions/sample.json
3742 +@@ -143,6 +143,30 @@
3743 + "$TC actions flush action sample"
3744 + ]
3745 + },
3746 ++ {
3747 ++ "id": "7571",
3748 ++ "name": "Add sample action with invalid rate",
3749 ++ "category": [
3750 ++ "actions",
3751 ++ "sample"
3752 ++ ],
3753 ++ "setup": [
3754 ++ [
3755 ++ "$TC actions flush action sample",
3756 ++ 0,
3757 ++ 1,
3758 ++ 255
3759 ++ ]
3760 ++ ],
3761 ++ "cmdUnderTest": "$TC actions add action sample rate 0 group 1 index 2",
3762 ++ "expExitCode": "255",
3763 ++ "verifyCmd": "$TC actions get action sample index 2",
3764 ++ "matchPattern": "action order [0-9]+: sample rate 1/0 group 1.*index 2 ref",
3765 ++ "matchCount": "0",
3766 ++ "teardown": [
3767 ++ "$TC actions flush action sample"
3768 ++ ]
3769 ++ },
3770 + {
3771 + "id": "b6d4",
3772 + "name": "Add sample action with mandatory arguments and invalid control action",