Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:4.14 commit in: /
Date: Sat, 20 Apr 2019 23:28:00
Message-Id: 1555703570.7a397352aed0d1e1fdec8f3eb9b2c26f8d4620ff.mpagano@gentoo
1 commit: 7a397352aed0d1e1fdec8f3eb9b2c26f8d4620ff
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Fri Apr 19 19:52:50 2019 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Fri Apr 19 19:52:50 2019 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=7a397352
7
8 Linux patch 4.14.112
9
10 Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>
11
12 0000_README | 4 +
13 1111_linux-4.14.112.patch | 2505 +++++++++++++++++++++++++++++++++++++++++++++
14 2 files changed, 2509 insertions(+)
15
16 diff --git a/0000_README b/0000_README
17 index f77da1f..ea26cf5 100644
18 --- a/0000_README
19 +++ b/0000_README
20 @@ -487,6 +487,10 @@ Patch: 1110_4.14.111.patch
21 From: http://www.kernel.org
22 Desc: Linux 4.14.111
23
24 +Patch: 1111_4.14.112.patch
25 +From: http://www.kernel.org
26 +Desc: Linux 4.14.112
27 +
28 Patch: 1500_XATTR_USER_PREFIX.patch
29 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
30 Desc: Support for namespace user.pax.* on tmpfs.
31
32 diff --git a/1111_linux-4.14.112.patch b/1111_linux-4.14.112.patch
33 new file mode 100644
34 index 0000000..1ad2301
35 --- /dev/null
36 +++ b/1111_linux-4.14.112.patch
37 @@ -0,0 +1,2505 @@
38 +diff --git a/Makefile b/Makefile
39 +index da223c660c9a..94673d2a6a27 100644
40 +--- a/Makefile
41 ++++ b/Makefile
42 +@@ -1,7 +1,7 @@
43 + # SPDX-License-Identifier: GPL-2.0
44 + VERSION = 4
45 + PATCHLEVEL = 14
46 +-SUBLEVEL = 111
47 ++SUBLEVEL = 112
48 + EXTRAVERSION =
49 + NAME = Petit Gorille
50 +
51 +@@ -480,7 +480,7 @@ endif
52 + ifeq ($(cc-name),clang)
53 + ifneq ($(CROSS_COMPILE),)
54 + CLANG_FLAGS := --target=$(notdir $(CROSS_COMPILE:%-=%))
55 +-GCC_TOOLCHAIN_DIR := $(dir $(shell which $(LD)))
56 ++GCC_TOOLCHAIN_DIR := $(dir $(shell which $(CROSS_COMPILE)elfedit))
57 + CLANG_FLAGS += --prefix=$(GCC_TOOLCHAIN_DIR)
58 + GCC_TOOLCHAIN := $(realpath $(GCC_TOOLCHAIN_DIR)/..)
59 + endif
60 +diff --git a/arch/arm/boot/dts/am335x-evm.dts b/arch/arm/boot/dts/am335x-evm.dts
61 +index ddd897556e03..478434ebff92 100644
62 +--- a/arch/arm/boot/dts/am335x-evm.dts
63 ++++ b/arch/arm/boot/dts/am335x-evm.dts
64 +@@ -57,6 +57,24 @@
65 + enable-active-high;
66 + };
67 +
68 ++ /* TPS79501 */
69 ++ v1_8d_reg: fixedregulator-v1_8d {
70 ++ compatible = "regulator-fixed";
71 ++ regulator-name = "v1_8d";
72 ++ vin-supply = <&vbat>;
73 ++ regulator-min-microvolt = <1800000>;
74 ++ regulator-max-microvolt = <1800000>;
75 ++ };
76 ++
77 ++ /* TPS79501 */
78 ++ v3_3d_reg: fixedregulator-v3_3d {
79 ++ compatible = "regulator-fixed";
80 ++ regulator-name = "v3_3d";
81 ++ vin-supply = <&vbat>;
82 ++ regulator-min-microvolt = <3300000>;
83 ++ regulator-max-microvolt = <3300000>;
84 ++ };
85 ++
86 + matrix_keypad: matrix_keypad0 {
87 + compatible = "gpio-matrix-keypad";
88 + debounce-delay-ms = <5>;
89 +@@ -492,10 +510,10 @@
90 + status = "okay";
91 +
92 + /* Regulators */
93 +- AVDD-supply = <&vaux2_reg>;
94 +- IOVDD-supply = <&vaux2_reg>;
95 +- DRVDD-supply = <&vaux2_reg>;
96 +- DVDD-supply = <&vbat>;
97 ++ AVDD-supply = <&v3_3d_reg>;
98 ++ IOVDD-supply = <&v3_3d_reg>;
99 ++ DRVDD-supply = <&v3_3d_reg>;
100 ++ DVDD-supply = <&v1_8d_reg>;
101 + };
102 + };
103 +
104 +diff --git a/arch/arm/boot/dts/am335x-evmsk.dts b/arch/arm/boot/dts/am335x-evmsk.dts
105 +index 9ba4b18c0cb2..bbd828892fcb 100644
106 +--- a/arch/arm/boot/dts/am335x-evmsk.dts
107 ++++ b/arch/arm/boot/dts/am335x-evmsk.dts
108 +@@ -73,6 +73,24 @@
109 + enable-active-high;
110 + };
111 +
112 ++ /* TPS79518 */
113 ++ v1_8d_reg: fixedregulator-v1_8d {
114 ++ compatible = "regulator-fixed";
115 ++ regulator-name = "v1_8d";
116 ++ vin-supply = <&vbat>;
117 ++ regulator-min-microvolt = <1800000>;
118 ++ regulator-max-microvolt = <1800000>;
119 ++ };
120 ++
121 ++ /* TPS78633 */
122 ++ v3_3d_reg: fixedregulator-v3_3d {
123 ++ compatible = "regulator-fixed";
124 ++ regulator-name = "v3_3d";
125 ++ vin-supply = <&vbat>;
126 ++ regulator-min-microvolt = <3300000>;
127 ++ regulator-max-microvolt = <3300000>;
128 ++ };
129 ++
130 + leds {
131 + pinctrl-names = "default";
132 + pinctrl-0 = <&user_leds_s0>;
133 +@@ -493,10 +511,10 @@
134 + status = "okay";
135 +
136 + /* Regulators */
137 +- AVDD-supply = <&vaux2_reg>;
138 +- IOVDD-supply = <&vaux2_reg>;
139 +- DRVDD-supply = <&vaux2_reg>;
140 +- DVDD-supply = <&vbat>;
141 ++ AVDD-supply = <&v3_3d_reg>;
142 ++ IOVDD-supply = <&v3_3d_reg>;
143 ++ DRVDD-supply = <&v3_3d_reg>;
144 ++ DVDD-supply = <&v1_8d_reg>;
145 + };
146 + };
147 +
148 +diff --git a/arch/arm/boot/dts/sama5d2-pinfunc.h b/arch/arm/boot/dts/sama5d2-pinfunc.h
149 +index e57191fb83de..9daa6dfd71e0 100644
150 +--- a/arch/arm/boot/dts/sama5d2-pinfunc.h
151 ++++ b/arch/arm/boot/dts/sama5d2-pinfunc.h
152 +@@ -518,7 +518,7 @@
153 + #define PIN_PC9__GPIO PINMUX_PIN(PIN_PC9, 0, 0)
154 + #define PIN_PC9__FIQ PINMUX_PIN(PIN_PC9, 1, 3)
155 + #define PIN_PC9__GTSUCOMP PINMUX_PIN(PIN_PC9, 2, 1)
156 +-#define PIN_PC9__ISC_D0 PINMUX_PIN(PIN_PC9, 2, 1)
157 ++#define PIN_PC9__ISC_D0 PINMUX_PIN(PIN_PC9, 3, 1)
158 + #define PIN_PC9__TIOA4 PINMUX_PIN(PIN_PC9, 4, 2)
159 + #define PIN_PC10 74
160 + #define PIN_PC10__GPIO PINMUX_PIN(PIN_PC10, 0, 0)
161 +diff --git a/arch/arm64/boot/dts/rockchip/rk3328-rock64.dts b/arch/arm64/boot/dts/rockchip/rk3328-rock64.dts
162 +index 28257724a56e..e720f40bbd5d 100644
163 +--- a/arch/arm64/boot/dts/rockchip/rk3328-rock64.dts
164 ++++ b/arch/arm64/boot/dts/rockchip/rk3328-rock64.dts
165 +@@ -82,8 +82,7 @@
166 +
167 + vcc_host1_5v: vcc_otg_5v: vcc-host1-5v-regulator {
168 + compatible = "regulator-fixed";
169 +- enable-active-high;
170 +- gpio = <&gpio0 RK_PD3 GPIO_ACTIVE_HIGH>;
171 ++ gpio = <&gpio0 RK_PA2 GPIO_ACTIVE_LOW>;
172 + pinctrl-names = "default";
173 + pinctrl-0 = <&usb20_host_drv>;
174 + regulator-name = "vcc_host1_5v";
175 +@@ -275,7 +274,7 @@
176 +
177 + usb2 {
178 + usb20_host_drv: usb20-host-drv {
179 +- rockchip,pins = <0 RK_PD3 RK_FUNC_GPIO &pcfg_pull_none>;
180 ++ rockchip,pins = <0 RK_PA2 RK_FUNC_GPIO &pcfg_pull_none>;
181 + };
182 + };
183 +
184 +diff --git a/arch/arm64/boot/dts/rockchip/rk3328.dtsi b/arch/arm64/boot/dts/rockchip/rk3328.dtsi
185 +index efac2202b16e..f6b4b8f0260f 100644
186 +--- a/arch/arm64/boot/dts/rockchip/rk3328.dtsi
187 ++++ b/arch/arm64/boot/dts/rockchip/rk3328.dtsi
188 +@@ -1333,11 +1333,11 @@
189 +
190 + sdmmc0 {
191 + sdmmc0_clk: sdmmc0-clk {
192 +- rockchip,pins = <1 RK_PA6 1 &pcfg_pull_none_4ma>;
193 ++ rockchip,pins = <1 RK_PA6 1 &pcfg_pull_none_8ma>;
194 + };
195 +
196 + sdmmc0_cmd: sdmmc0-cmd {
197 +- rockchip,pins = <1 RK_PA4 1 &pcfg_pull_up_4ma>;
198 ++ rockchip,pins = <1 RK_PA4 1 &pcfg_pull_up_8ma>;
199 + };
200 +
201 + sdmmc0_dectn: sdmmc0-dectn {
202 +@@ -1349,14 +1349,14 @@
203 + };
204 +
205 + sdmmc0_bus1: sdmmc0-bus1 {
206 +- rockchip,pins = <1 RK_PA0 1 &pcfg_pull_up_4ma>;
207 ++ rockchip,pins = <1 RK_PA0 1 &pcfg_pull_up_8ma>;
208 + };
209 +
210 + sdmmc0_bus4: sdmmc0-bus4 {
211 +- rockchip,pins = <1 RK_PA0 1 &pcfg_pull_up_4ma>,
212 +- <1 RK_PA1 1 &pcfg_pull_up_4ma>,
213 +- <1 RK_PA2 1 &pcfg_pull_up_4ma>,
214 +- <1 RK_PA3 1 &pcfg_pull_up_4ma>;
215 ++ rockchip,pins = <1 RK_PA0 1 &pcfg_pull_up_8ma>,
216 ++ <1 RK_PA1 1 &pcfg_pull_up_8ma>,
217 ++ <1 RK_PA2 1 &pcfg_pull_up_8ma>,
218 ++ <1 RK_PA3 1 &pcfg_pull_up_8ma>;
219 + };
220 +
221 + sdmmc0_gpio: sdmmc0-gpio {
222 +@@ -1530,50 +1530,50 @@
223 + rgmiim1_pins: rgmiim1-pins {
224 + rockchip,pins =
225 + /* mac_txclk */
226 +- <1 RK_PB4 2 &pcfg_pull_none_12ma>,
227 ++ <1 RK_PB4 2 &pcfg_pull_none_8ma>,
228 + /* mac_rxclk */
229 +- <1 RK_PB5 2 &pcfg_pull_none_2ma>,
230 ++ <1 RK_PB5 2 &pcfg_pull_none_4ma>,
231 + /* mac_mdio */
232 +- <1 RK_PC3 2 &pcfg_pull_none_2ma>,
233 ++ <1 RK_PC3 2 &pcfg_pull_none_4ma>,
234 + /* mac_txen */
235 +- <1 RK_PD1 2 &pcfg_pull_none_12ma>,
236 ++ <1 RK_PD1 2 &pcfg_pull_none_8ma>,
237 + /* mac_clk */
238 +- <1 RK_PC5 2 &pcfg_pull_none_2ma>,
239 ++ <1 RK_PC5 2 &pcfg_pull_none_4ma>,
240 + /* mac_rxdv */
241 +- <1 RK_PC6 2 &pcfg_pull_none_2ma>,
242 ++ <1 RK_PC6 2 &pcfg_pull_none_4ma>,
243 + /* mac_mdc */
244 +- <1 RK_PC7 2 &pcfg_pull_none_2ma>,
245 ++ <1 RK_PC7 2 &pcfg_pull_none_4ma>,
246 + /* mac_rxd1 */
247 +- <1 RK_PB2 2 &pcfg_pull_none_2ma>,
248 ++ <1 RK_PB2 2 &pcfg_pull_none_4ma>,
249 + /* mac_rxd0 */
250 +- <1 RK_PB3 2 &pcfg_pull_none_2ma>,
251 ++ <1 RK_PB3 2 &pcfg_pull_none_4ma>,
252 + /* mac_txd1 */
253 +- <1 RK_PB0 2 &pcfg_pull_none_12ma>,
254 ++ <1 RK_PB0 2 &pcfg_pull_none_8ma>,
255 + /* mac_txd0 */
256 +- <1 RK_PB1 2 &pcfg_pull_none_12ma>,
257 ++ <1 RK_PB1 2 &pcfg_pull_none_8ma>,
258 + /* mac_rxd3 */
259 +- <1 RK_PB6 2 &pcfg_pull_none_2ma>,
260 ++ <1 RK_PB6 2 &pcfg_pull_none_4ma>,
261 + /* mac_rxd2 */
262 +- <1 RK_PB7 2 &pcfg_pull_none_2ma>,
263 ++ <1 RK_PB7 2 &pcfg_pull_none_4ma>,
264 + /* mac_txd3 */
265 +- <1 RK_PC0 2 &pcfg_pull_none_12ma>,
266 ++ <1 RK_PC0 2 &pcfg_pull_none_8ma>,
267 + /* mac_txd2 */
268 +- <1 RK_PC1 2 &pcfg_pull_none_12ma>,
269 ++ <1 RK_PC1 2 &pcfg_pull_none_8ma>,
270 +
271 + /* mac_txclk */
272 +- <0 RK_PB0 1 &pcfg_pull_none>,
273 ++ <0 RK_PB0 1 &pcfg_pull_none_8ma>,
274 + /* mac_txen */
275 +- <0 RK_PB4 1 &pcfg_pull_none>,
276 ++ <0 RK_PB4 1 &pcfg_pull_none_8ma>,
277 + /* mac_clk */
278 +- <0 RK_PD0 1 &pcfg_pull_none>,
279 ++ <0 RK_PD0 1 &pcfg_pull_none_4ma>,
280 + /* mac_txd1 */
281 +- <0 RK_PC0 1 &pcfg_pull_none>,
282 ++ <0 RK_PC0 1 &pcfg_pull_none_8ma>,
283 + /* mac_txd0 */
284 +- <0 RK_PC1 1 &pcfg_pull_none>,
285 ++ <0 RK_PC1 1 &pcfg_pull_none_8ma>,
286 + /* mac_txd3 */
287 +- <0 RK_PC7 1 &pcfg_pull_none>,
288 ++ <0 RK_PC7 1 &pcfg_pull_none_8ma>,
289 + /* mac_txd2 */
290 +- <0 RK_PC6 1 &pcfg_pull_none>;
291 ++ <0 RK_PC6 1 &pcfg_pull_none_8ma>;
292 + };
293 +
294 + rmiim1_pins: rmiim1-pins {
295 +diff --git a/arch/arm64/include/asm/futex.h b/arch/arm64/include/asm/futex.h
296 +index 07fe2479d310..b447b4db423a 100644
297 +--- a/arch/arm64/include/asm/futex.h
298 ++++ b/arch/arm64/include/asm/futex.h
299 +@@ -30,8 +30,8 @@ do { \
300 + " prfm pstl1strm, %2\n" \
301 + "1: ldxr %w1, %2\n" \
302 + insn "\n" \
303 +-"2: stlxr %w3, %w0, %2\n" \
304 +-" cbnz %w3, 1b\n" \
305 ++"2: stlxr %w0, %w3, %2\n" \
306 ++" cbnz %w0, 1b\n" \
307 + " dmb ish\n" \
308 + "3:\n" \
309 + " .pushsection .fixup,\"ax\"\n" \
310 +@@ -50,30 +50,30 @@ do { \
311 + static inline int
312 + arch_futex_atomic_op_inuser(int op, int oparg, int *oval, u32 __user *_uaddr)
313 + {
314 +- int oldval = 0, ret, tmp;
315 ++ int oldval, ret, tmp;
316 + u32 __user *uaddr = __uaccess_mask_ptr(_uaddr);
317 +
318 + pagefault_disable();
319 +
320 + switch (op) {
321 + case FUTEX_OP_SET:
322 +- __futex_atomic_op("mov %w0, %w4",
323 ++ __futex_atomic_op("mov %w3, %w4",
324 + ret, oldval, uaddr, tmp, oparg);
325 + break;
326 + case FUTEX_OP_ADD:
327 +- __futex_atomic_op("add %w0, %w1, %w4",
328 ++ __futex_atomic_op("add %w3, %w1, %w4",
329 + ret, oldval, uaddr, tmp, oparg);
330 + break;
331 + case FUTEX_OP_OR:
332 +- __futex_atomic_op("orr %w0, %w1, %w4",
333 ++ __futex_atomic_op("orr %w3, %w1, %w4",
334 + ret, oldval, uaddr, tmp, oparg);
335 + break;
336 + case FUTEX_OP_ANDN:
337 +- __futex_atomic_op("and %w0, %w1, %w4",
338 ++ __futex_atomic_op("and %w3, %w1, %w4",
339 + ret, oldval, uaddr, tmp, ~oparg);
340 + break;
341 + case FUTEX_OP_XOR:
342 +- __futex_atomic_op("eor %w0, %w1, %w4",
343 ++ __futex_atomic_op("eor %w3, %w1, %w4",
344 + ret, oldval, uaddr, tmp, oparg);
345 + break;
346 + default:
347 +diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c
348 +index 4fc0e958770b..4cacc33d07ce 100644
349 +--- a/arch/arm64/kernel/traps.c
350 ++++ b/arch/arm64/kernel/traps.c
351 +@@ -145,10 +145,16 @@ static void dump_instr(const char *lvl, struct pt_regs *regs)
352 + void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk)
353 + {
354 + struct stackframe frame;
355 +- int skip;
356 ++ int skip = 0;
357 +
358 + pr_debug("%s(regs = %p tsk = %p)\n", __func__, regs, tsk);
359 +
360 ++ if (regs) {
361 ++ if (user_mode(regs))
362 ++ return;
363 ++ skip = 1;
364 ++ }
365 ++
366 + if (!tsk)
367 + tsk = current;
368 +
369 +@@ -169,7 +175,6 @@ void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk)
370 + frame.graph = tsk->curr_ret_stack;
371 + #endif
372 +
373 +- skip = !!regs;
374 + printk("Call trace:\n");
375 + while (1) {
376 + unsigned long stack;
377 +@@ -232,15 +237,13 @@ static int __die(const char *str, int err, struct pt_regs *regs)
378 + return ret;
379 +
380 + print_modules();
381 +- __show_regs(regs);
382 + pr_emerg("Process %.*s (pid: %d, stack limit = 0x%p)\n",
383 + TASK_COMM_LEN, tsk->comm, task_pid_nr(tsk),
384 + end_of_stack(tsk));
385 ++ show_regs(regs);
386 +
387 +- if (!user_mode(regs)) {
388 +- dump_backtrace(regs, tsk);
389 ++ if (!user_mode(regs))
390 + dump_instr(KERN_EMERG, regs);
391 +- }
392 +
393 + return ret;
394 + }
395 +diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c
396 +index caa295cd5d09..9e6c822d458d 100644
397 +--- a/arch/arm64/mm/init.c
398 ++++ b/arch/arm64/mm/init.c
399 +@@ -447,7 +447,7 @@ void __init arm64_memblock_init(void)
400 + * memory spans, randomize the linear region as well.
401 + */
402 + if (memstart_offset_seed > 0 && range >= ARM64_MEMSTART_ALIGN) {
403 +- range = range / ARM64_MEMSTART_ALIGN + 1;
404 ++ range /= ARM64_MEMSTART_ALIGN;
405 + memstart_addr -= ARM64_MEMSTART_ALIGN *
406 + ((range * memstart_offset_seed) >> 16);
407 + }
408 +diff --git a/arch/parisc/include/asm/ptrace.h b/arch/parisc/include/asm/ptrace.h
409 +index 46da07670c2b..c8f70f965e8e 100644
410 +--- a/arch/parisc/include/asm/ptrace.h
411 ++++ b/arch/parisc/include/asm/ptrace.h
412 +@@ -22,7 +22,7 @@ unsigned long profile_pc(struct pt_regs *);
413 +
414 + static inline unsigned long regs_return_value(struct pt_regs *regs)
415 + {
416 +- return regs->gr[20];
417 ++ return regs->gr[28];
418 + }
419 +
420 + #endif
421 +diff --git a/arch/parisc/kernel/process.c b/arch/parisc/kernel/process.c
422 +index cad3e8661cd6..4d712c1d64b8 100644
423 +--- a/arch/parisc/kernel/process.c
424 ++++ b/arch/parisc/kernel/process.c
425 +@@ -209,12 +209,6 @@ void __cpuidle arch_cpu_idle(void)
426 +
427 + static int __init parisc_idle_init(void)
428 + {
429 +- const char *marker;
430 +-
431 +- /* check QEMU/SeaBIOS marker in PAGE0 */
432 +- marker = (char *) &PAGE0->pad0;
433 +- running_on_qemu = (memcmp(marker, "SeaBIOS", 8) == 0);
434 +-
435 + if (!running_on_qemu)
436 + cpu_idle_poll_ctrl(1);
437 +
438 +diff --git a/arch/parisc/kernel/setup.c b/arch/parisc/kernel/setup.c
439 +index f7d0c3b33d70..550f80ae9c8f 100644
440 +--- a/arch/parisc/kernel/setup.c
441 ++++ b/arch/parisc/kernel/setup.c
442 +@@ -406,6 +406,9 @@ void __init start_parisc(void)
443 + int ret, cpunum;
444 + struct pdc_coproc_cfg coproc_cfg;
445 +
446 ++ /* check QEMU/SeaBIOS marker in PAGE0 */
447 ++ running_on_qemu = (memcmp(&PAGE0->pad0, "SeaBIOS", 8) == 0);
448 ++
449 + cpunum = smp_processor_id();
450 +
451 + set_firmware_width_unlocked();
452 +diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c
453 +index 979b9463e17b..927384d85faf 100644
454 +--- a/arch/powerpc/kernel/signal_64.c
455 ++++ b/arch/powerpc/kernel/signal_64.c
456 +@@ -746,12 +746,25 @@ int sys_rt_sigreturn(unsigned long r3, unsigned long r4, unsigned long r5,
457 + if (restore_tm_sigcontexts(current, &uc->uc_mcontext,
458 + &uc_transact->uc_mcontext))
459 + goto badframe;
460 +- }
461 +- else
462 +- /* Fall through, for non-TM restore */
463 ++ } else
464 + #endif
465 +- if (restore_sigcontext(current, NULL, 1, &uc->uc_mcontext))
466 +- goto badframe;
467 ++ {
468 ++ /*
469 ++ * Fall through, for non-TM restore
470 ++ *
471 ++ * Unset MSR[TS] on the thread regs since MSR from user
472 ++ * context does not have MSR active, and recheckpoint was
473 ++ * not called since restore_tm_sigcontexts() was not called
474 ++ * also.
475 ++ *
476 ++ * If not unsetting it, the code can RFID to userspace with
477 ++ * MSR[TS] set, but without CPU in the proper state,
478 ++ * causing a TM bad thing.
479 ++ */
480 ++ current->thread.regs->msr &= ~MSR_TS_MASK;
481 ++ if (restore_sigcontext(current, NULL, 1, &uc->uc_mcontext))
482 ++ goto badframe;
483 ++ }
484 +
485 + if (restore_altstack(&uc->uc_stack))
486 + goto badframe;
487 +diff --git a/arch/x86/entry/vdso/Makefile b/arch/x86/entry/vdso/Makefile
488 +index 0a550dc5c525..839015f1b0de 100644
489 +--- a/arch/x86/entry/vdso/Makefile
490 ++++ b/arch/x86/entry/vdso/Makefile
491 +@@ -48,10 +48,8 @@ targets += $(vdso_img_sodbg)
492 +
493 + export CPPFLAGS_vdso.lds += -P -C
494 +
495 +-VDSO_LDFLAGS_vdso.lds = -m64 -Wl,-soname=linux-vdso.so.1 \
496 +- -Wl,--no-undefined \
497 +- -Wl,-z,max-page-size=4096 -Wl,-z,common-page-size=4096 \
498 +- $(DISABLE_LTO)
499 ++VDSO_LDFLAGS_vdso.lds = -m elf_x86_64 -soname linux-vdso.so.1 --no-undefined \
500 ++ -z max-page-size=4096
501 +
502 + $(obj)/vdso64.so.dbg: $(src)/vdso.lds $(vobjs) FORCE
503 + $(call if_changed,vdso)
504 +@@ -103,10 +101,8 @@ CFLAGS_REMOVE_vvar.o = -pg
505 + #
506 +
507 + CPPFLAGS_vdsox32.lds = $(CPPFLAGS_vdso.lds)
508 +-VDSO_LDFLAGS_vdsox32.lds = -Wl,-m,elf32_x86_64 \
509 +- -Wl,-soname=linux-vdso.so.1 \
510 +- -Wl,-z,max-page-size=4096 \
511 +- -Wl,-z,common-page-size=4096
512 ++VDSO_LDFLAGS_vdsox32.lds = -m elf32_x86_64 -soname linux-vdso.so.1 \
513 ++ -z max-page-size=4096
514 +
515 + # 64-bit objects to re-brand as x32
516 + vobjs64-for-x32 := $(filter-out $(vobjs-nox32),$(vobjs-y))
517 +@@ -134,7 +130,7 @@ $(obj)/vdsox32.so.dbg: $(src)/vdsox32.lds $(vobjx32s) FORCE
518 + $(call if_changed,vdso)
519 +
520 + CPPFLAGS_vdso32.lds = $(CPPFLAGS_vdso.lds)
521 +-VDSO_LDFLAGS_vdso32.lds = -m32 -Wl,-m,elf_i386 -Wl,-soname=linux-gate.so.1
522 ++VDSO_LDFLAGS_vdso32.lds = -m elf_i386 -soname linux-gate.so.1
523 +
524 + # This makes sure the $(obj) subdirectory exists even though vdso32/
525 + # is not a kbuild sub-make subdirectory.
526 +@@ -180,13 +176,13 @@ $(obj)/vdso32.so.dbg: FORCE \
527 + # The DSO images are built using a special linker script.
528 + #
529 + quiet_cmd_vdso = VDSO $@
530 +- cmd_vdso = $(CC) -nostdlib -o $@ \
531 ++ cmd_vdso = $(LD) -nostdlib -o $@ \
532 + $(VDSO_LDFLAGS) $(VDSO_LDFLAGS_$(filter %.lds,$(^F))) \
533 +- -Wl,-T,$(filter %.lds,$^) $(filter %.o,$^) && \
534 ++ -T $(filter %.lds,$^) $(filter %.o,$^) && \
535 + sh $(srctree)/$(src)/checkundef.sh '$(NM)' '$@'
536 +
537 +-VDSO_LDFLAGS = -fPIC -shared $(call cc-ldoption, -Wl$(comma)--hash-style=both) \
538 +- $(call cc-ldoption, -Wl$(comma)--build-id) -Wl,-Bsymbolic $(LTO_CFLAGS)
539 ++VDSO_LDFLAGS = -shared $(call ld-option, --hash-style=both) \
540 ++ $(call ld-option, --build-id) -Bsymbolic
541 + GCOV_PROFILE := n
542 +
543 + #
544 +diff --git a/arch/x86/events/amd/core.c b/arch/x86/events/amd/core.c
545 +index c84584bb9402..3e5dd85b019a 100644
546 +--- a/arch/x86/events/amd/core.c
547 ++++ b/arch/x86/events/amd/core.c
548 +@@ -3,10 +3,14 @@
549 + #include <linux/types.h>
550 + #include <linux/init.h>
551 + #include <linux/slab.h>
552 ++#include <linux/delay.h>
553 + #include <asm/apicdef.h>
554 ++#include <asm/nmi.h>
555 +
556 + #include "../perf_event.h"
557 +
558 ++static DEFINE_PER_CPU(unsigned int, perf_nmi_counter);
559 ++
560 + static __initconst const u64 amd_hw_cache_event_ids
561 + [PERF_COUNT_HW_CACHE_MAX]
562 + [PERF_COUNT_HW_CACHE_OP_MAX]
563 +@@ -429,6 +433,132 @@ static void amd_pmu_cpu_dead(int cpu)
564 + }
565 + }
566 +
567 ++/*
568 ++ * When a PMC counter overflows, an NMI is used to process the event and
569 ++ * reset the counter. NMI latency can result in the counter being updated
570 ++ * before the NMI can run, which can result in what appear to be spurious
571 ++ * NMIs. This function is intended to wait for the NMI to run and reset
572 ++ * the counter to avoid possible unhandled NMI messages.
573 ++ */
574 ++#define OVERFLOW_WAIT_COUNT 50
575 ++
576 ++static void amd_pmu_wait_on_overflow(int idx)
577 ++{
578 ++ unsigned int i;
579 ++ u64 counter;
580 ++
581 ++ /*
582 ++ * Wait for the counter to be reset if it has overflowed. This loop
583 ++ * should exit very, very quickly, but just in case, don't wait
584 ++ * forever...
585 ++ */
586 ++ for (i = 0; i < OVERFLOW_WAIT_COUNT; i++) {
587 ++ rdmsrl(x86_pmu_event_addr(idx), counter);
588 ++ if (counter & (1ULL << (x86_pmu.cntval_bits - 1)))
589 ++ break;
590 ++
591 ++ /* Might be in IRQ context, so can't sleep */
592 ++ udelay(1);
593 ++ }
594 ++}
595 ++
596 ++static void amd_pmu_disable_all(void)
597 ++{
598 ++ struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
599 ++ int idx;
600 ++
601 ++ x86_pmu_disable_all();
602 ++
603 ++ /*
604 ++ * This shouldn't be called from NMI context, but add a safeguard here
605 ++ * to return, since if we're in NMI context we can't wait for an NMI
606 ++ * to reset an overflowed counter value.
607 ++ */
608 ++ if (in_nmi())
609 ++ return;
610 ++
611 ++ /*
612 ++ * Check each counter for overflow and wait for it to be reset by the
613 ++ * NMI if it has overflowed. This relies on the fact that all active
614 ++ * counters are always enabled when this function is caled and
615 ++ * ARCH_PERFMON_EVENTSEL_INT is always set.
616 ++ */
617 ++ for (idx = 0; idx < x86_pmu.num_counters; idx++) {
618 ++ if (!test_bit(idx, cpuc->active_mask))
619 ++ continue;
620 ++
621 ++ amd_pmu_wait_on_overflow(idx);
622 ++ }
623 ++}
624 ++
625 ++static void amd_pmu_disable_event(struct perf_event *event)
626 ++{
627 ++ x86_pmu_disable_event(event);
628 ++
629 ++ /*
630 ++ * This can be called from NMI context (via x86_pmu_stop). The counter
631 ++ * may have overflowed, but either way, we'll never see it get reset
632 ++ * by the NMI if we're already in the NMI. And the NMI latency support
633 ++ * below will take care of any pending NMI that might have been
634 ++ * generated by the overflow.
635 ++ */
636 ++ if (in_nmi())
637 ++ return;
638 ++
639 ++ amd_pmu_wait_on_overflow(event->hw.idx);
640 ++}
641 ++
642 ++/*
643 ++ * Because of NMI latency, if multiple PMC counters are active or other sources
644 ++ * of NMIs are received, the perf NMI handler can handle one or more overflowed
645 ++ * PMC counters outside of the NMI associated with the PMC overflow. If the NMI
646 ++ * doesn't arrive at the LAPIC in time to become a pending NMI, then the kernel
647 ++ * back-to-back NMI support won't be active. This PMC handler needs to take into
648 ++ * account that this can occur, otherwise this could result in unknown NMI
649 ++ * messages being issued. Examples of this is PMC overflow while in the NMI
650 ++ * handler when multiple PMCs are active or PMC overflow while handling some
651 ++ * other source of an NMI.
652 ++ *
653 ++ * Attempt to mitigate this by using the number of active PMCs to determine
654 ++ * whether to return NMI_HANDLED if the perf NMI handler did not handle/reset
655 ++ * any PMCs. The per-CPU perf_nmi_counter variable is set to a minimum of the
656 ++ * number of active PMCs or 2. The value of 2 is used in case an NMI does not
657 ++ * arrive at the LAPIC in time to be collapsed into an already pending NMI.
658 ++ */
659 ++static int amd_pmu_handle_irq(struct pt_regs *regs)
660 ++{
661 ++ struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
662 ++ int active, handled;
663 ++
664 ++ /*
665 ++ * Obtain the active count before calling x86_pmu_handle_irq() since
666 ++ * it is possible that x86_pmu_handle_irq() may make a counter
667 ++ * inactive (through x86_pmu_stop).
668 ++ */
669 ++ active = __bitmap_weight(cpuc->active_mask, X86_PMC_IDX_MAX);
670 ++
671 ++ /* Process any counter overflows */
672 ++ handled = x86_pmu_handle_irq(regs);
673 ++
674 ++ /*
675 ++ * If a counter was handled, record the number of possible remaining
676 ++ * NMIs that can occur.
677 ++ */
678 ++ if (handled) {
679 ++ this_cpu_write(perf_nmi_counter,
680 ++ min_t(unsigned int, 2, active));
681 ++
682 ++ return handled;
683 ++ }
684 ++
685 ++ if (!this_cpu_read(perf_nmi_counter))
686 ++ return NMI_DONE;
687 ++
688 ++ this_cpu_dec(perf_nmi_counter);
689 ++
690 ++ return NMI_HANDLED;
691 ++}
692 ++
693 + static struct event_constraint *
694 + amd_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
695 + struct perf_event *event)
696 +@@ -621,11 +751,11 @@ static ssize_t amd_event_sysfs_show(char *page, u64 config)
697 +
698 + static __initconst const struct x86_pmu amd_pmu = {
699 + .name = "AMD",
700 +- .handle_irq = x86_pmu_handle_irq,
701 +- .disable_all = x86_pmu_disable_all,
702 ++ .handle_irq = amd_pmu_handle_irq,
703 ++ .disable_all = amd_pmu_disable_all,
704 + .enable_all = x86_pmu_enable_all,
705 + .enable = x86_pmu_enable_event,
706 +- .disable = x86_pmu_disable_event,
707 ++ .disable = amd_pmu_disable_event,
708 + .hw_config = amd_pmu_hw_config,
709 + .schedule_events = x86_schedule_events,
710 + .eventsel = MSR_K7_EVNTSEL0,
711 +@@ -728,7 +858,7 @@ void amd_pmu_enable_virt(void)
712 + cpuc->perf_ctr_virt_mask = 0;
713 +
714 + /* Reload all events */
715 +- x86_pmu_disable_all();
716 ++ amd_pmu_disable_all();
717 + x86_pmu_enable_all(0);
718 + }
719 + EXPORT_SYMBOL_GPL(amd_pmu_enable_virt);
720 +@@ -746,7 +876,7 @@ void amd_pmu_disable_virt(void)
721 + cpuc->perf_ctr_virt_mask = AMD64_EVENTSEL_HOSTONLY;
722 +
723 + /* Reload all events */
724 +- x86_pmu_disable_all();
725 ++ amd_pmu_disable_all();
726 + x86_pmu_enable_all(0);
727 + }
728 + EXPORT_SYMBOL_GPL(amd_pmu_disable_virt);
729 +diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c
730 +index 65e44f0588e2..6ed99de2ddf5 100644
731 +--- a/arch/x86/events/core.c
732 ++++ b/arch/x86/events/core.c
733 +@@ -1328,8 +1328,9 @@ void x86_pmu_stop(struct perf_event *event, int flags)
734 + struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
735 + struct hw_perf_event *hwc = &event->hw;
736 +
737 +- if (__test_and_clear_bit(hwc->idx, cpuc->active_mask)) {
738 ++ if (test_bit(hwc->idx, cpuc->active_mask)) {
739 + x86_pmu.disable(event);
740 ++ __clear_bit(hwc->idx, cpuc->active_mask);
741 + cpuc->events[hwc->idx] = NULL;
742 + WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED);
743 + hwc->state |= PERF_HES_STOPPED;
744 +@@ -1426,16 +1427,8 @@ int x86_pmu_handle_irq(struct pt_regs *regs)
745 + apic_write(APIC_LVTPC, APIC_DM_NMI);
746 +
747 + for (idx = 0; idx < x86_pmu.num_counters; idx++) {
748 +- if (!test_bit(idx, cpuc->active_mask)) {
749 +- /*
750 +- * Though we deactivated the counter some cpus
751 +- * might still deliver spurious interrupts still
752 +- * in flight. Catch them:
753 +- */
754 +- if (__test_and_clear_bit(idx, cpuc->running))
755 +- handled++;
756 ++ if (!test_bit(idx, cpuc->active_mask))
757 + continue;
758 +- }
759 +
760 + event = cpuc->events[idx];
761 +
762 +diff --git a/arch/x86/include/asm/suspend_32.h b/arch/x86/include/asm/suspend_32.h
763 +index 982c325dad33..8be6afb58471 100644
764 +--- a/arch/x86/include/asm/suspend_32.h
765 ++++ b/arch/x86/include/asm/suspend_32.h
766 +@@ -12,7 +12,13 @@
767 +
768 + /* image of the saved processor state */
769 + struct saved_context {
770 +- u16 es, fs, gs, ss;
771 ++ /*
772 ++ * On x86_32, all segment registers, with the possible exception of
773 ++ * gs, are saved at kernel entry in pt_regs.
774 ++ */
775 ++#ifdef CONFIG_X86_32_LAZY_GS
776 ++ u16 gs;
777 ++#endif
778 + unsigned long cr0, cr2, cr3, cr4;
779 + u64 misc_enable;
780 + bool misc_enable_saved;
781 +diff --git a/arch/x86/include/asm/suspend_64.h b/arch/x86/include/asm/suspend_64.h
782 +index 7306e911faee..a7af9f53c0cb 100644
783 +--- a/arch/x86/include/asm/suspend_64.h
784 ++++ b/arch/x86/include/asm/suspend_64.h
785 +@@ -20,8 +20,20 @@
786 + */
787 + struct saved_context {
788 + struct pt_regs regs;
789 +- u16 ds, es, fs, gs, ss;
790 +- unsigned long gs_base, gs_kernel_base, fs_base;
791 ++
792 ++ /*
793 ++ * User CS and SS are saved in current_pt_regs(). The rest of the
794 ++ * segment selectors need to be saved and restored here.
795 ++ */
796 ++ u16 ds, es, fs, gs;
797 ++
798 ++ /*
799 ++ * Usermode FSBASE and GSBASE may not match the fs and gs selectors,
800 ++ * so we save them separately. We save the kernelmode GSBASE to
801 ++ * restore percpu access after resume.
802 ++ */
803 ++ unsigned long kernelmode_gs_base, usermode_gs_base, fs_base;
804 ++
805 + unsigned long cr0, cr2, cr3, cr4, cr8;
806 + u64 misc_enable;
807 + bool misc_enable_saved;
808 +@@ -30,8 +42,7 @@ struct saved_context {
809 + u16 gdt_pad; /* Unused */
810 + struct desc_ptr gdt_desc;
811 + u16 idt_pad;
812 +- u16 idt_limit;
813 +- unsigned long idt_base;
814 ++ struct desc_ptr idt;
815 + u16 ldt;
816 + u16 tss;
817 + unsigned long tr;
818 +diff --git a/arch/x86/include/asm/xen/hypercall.h b/arch/x86/include/asm/xen/hypercall.h
819 +index bfd882617613..e7e625448008 100644
820 +--- a/arch/x86/include/asm/xen/hypercall.h
821 ++++ b/arch/x86/include/asm/xen/hypercall.h
822 +@@ -217,6 +217,9 @@ privcmd_call(unsigned call,
823 + __HYPERCALL_DECLS;
824 + __HYPERCALL_5ARG(a1, a2, a3, a4, a5);
825 +
826 ++ if (call >= PAGE_SIZE / sizeof(hypercall_page[0]))
827 ++ return -EINVAL;
828 ++
829 + stac();
830 + asm volatile(CALL_NOSPEC
831 + : __HYPERCALL_5PARAM
832 +diff --git a/arch/x86/power/cpu.c b/arch/x86/power/cpu.c
833 +index 04d5157fe7f8..a7d966964c6f 100644
834 +--- a/arch/x86/power/cpu.c
835 ++++ b/arch/x86/power/cpu.c
836 +@@ -82,12 +82,8 @@ static void __save_processor_state(struct saved_context *ctxt)
837 + /*
838 + * descriptor tables
839 + */
840 +-#ifdef CONFIG_X86_32
841 + store_idt(&ctxt->idt);
842 +-#else
843 +-/* CONFIG_X86_64 */
844 +- store_idt((struct desc_ptr *)&ctxt->idt_limit);
845 +-#endif
846 ++
847 + /*
848 + * We save it here, but restore it only in the hibernate case.
849 + * For ACPI S3 resume, this is loaded via 'early_gdt_desc' in 64-bit
850 +@@ -103,22 +99,18 @@ static void __save_processor_state(struct saved_context *ctxt)
851 + /*
852 + * segment registers
853 + */
854 +-#ifdef CONFIG_X86_32
855 +- savesegment(es, ctxt->es);
856 +- savesegment(fs, ctxt->fs);
857 ++#ifdef CONFIG_X86_32_LAZY_GS
858 + savesegment(gs, ctxt->gs);
859 +- savesegment(ss, ctxt->ss);
860 +-#else
861 +-/* CONFIG_X86_64 */
862 +- asm volatile ("movw %%ds, %0" : "=m" (ctxt->ds));
863 +- asm volatile ("movw %%es, %0" : "=m" (ctxt->es));
864 +- asm volatile ("movw %%fs, %0" : "=m" (ctxt->fs));
865 +- asm volatile ("movw %%gs, %0" : "=m" (ctxt->gs));
866 +- asm volatile ("movw %%ss, %0" : "=m" (ctxt->ss));
867 ++#endif
868 ++#ifdef CONFIG_X86_64
869 ++ savesegment(gs, ctxt->gs);
870 ++ savesegment(fs, ctxt->fs);
871 ++ savesegment(ds, ctxt->ds);
872 ++ savesegment(es, ctxt->es);
873 +
874 + rdmsrl(MSR_FS_BASE, ctxt->fs_base);
875 +- rdmsrl(MSR_GS_BASE, ctxt->gs_base);
876 +- rdmsrl(MSR_KERNEL_GS_BASE, ctxt->gs_kernel_base);
877 ++ rdmsrl(MSR_GS_BASE, ctxt->kernelmode_gs_base);
878 ++ rdmsrl(MSR_KERNEL_GS_BASE, ctxt->usermode_gs_base);
879 + mtrr_save_fixed_ranges(NULL);
880 +
881 + rdmsrl(MSR_EFER, ctxt->efer);
882 +@@ -180,6 +172,9 @@ static void fix_processor_context(void)
883 + write_gdt_entry(desc, GDT_ENTRY_TSS, &tss, DESC_TSS);
884 +
885 + syscall_init(); /* This sets MSR_*STAR and related */
886 ++#else
887 ++ if (boot_cpu_has(X86_FEATURE_SEP))
888 ++ enable_sep_cpu();
889 + #endif
890 + load_TR_desc(); /* This does ltr */
891 + load_mm_ldt(current->active_mm); /* This does lldt */
892 +@@ -192,9 +187,12 @@ static void fix_processor_context(void)
893 + }
894 +
895 + /**
896 +- * __restore_processor_state - restore the contents of CPU registers saved
897 +- * by __save_processor_state()
898 +- * @ctxt - structure to load the registers contents from
899 ++ * __restore_processor_state - restore the contents of CPU registers saved
900 ++ * by __save_processor_state()
901 ++ * @ctxt - structure to load the registers contents from
902 ++ *
903 ++ * The asm code that gets us here will have restored a usable GDT, although
904 ++ * it will be pointing to the wrong alias.
905 + */
906 + static void notrace __restore_processor_state(struct saved_context *ctxt)
907 + {
908 +@@ -217,46 +215,52 @@ static void notrace __restore_processor_state(struct saved_context *ctxt)
909 + write_cr2(ctxt->cr2);
910 + write_cr0(ctxt->cr0);
911 +
912 ++ /* Restore the IDT. */
913 ++ load_idt(&ctxt->idt);
914 ++
915 + /*
916 +- * now restore the descriptor tables to their proper values
917 +- * ltr is done i fix_processor_context().
918 ++ * Just in case the asm code got us here with the SS, DS, or ES
919 ++ * out of sync with the GDT, update them.
920 + */
921 +-#ifdef CONFIG_X86_32
922 +- load_idt(&ctxt->idt);
923 ++ loadsegment(ss, __KERNEL_DS);
924 ++ loadsegment(ds, __USER_DS);
925 ++ loadsegment(es, __USER_DS);
926 ++
927 ++ /*
928 ++ * Restore percpu access. Percpu access can happen in exception
929 ++ * handlers or in complicated helpers like load_gs_index().
930 ++ */
931 ++#ifdef CONFIG_X86_64
932 ++ wrmsrl(MSR_GS_BASE, ctxt->kernelmode_gs_base);
933 + #else
934 +-/* CONFIG_X86_64 */
935 +- load_idt((const struct desc_ptr *)&ctxt->idt_limit);
936 ++ loadsegment(fs, __KERNEL_PERCPU);
937 ++ loadsegment(gs, __KERNEL_STACK_CANARY);
938 + #endif
939 +
940 ++ /* Restore the TSS, RO GDT, LDT, and usermode-relevant MSRs. */
941 ++ fix_processor_context();
942 ++
943 + /*
944 +- * segment registers
945 ++ * Now that we have descriptor tables fully restored and working
946 ++ * exception handling, restore the usermode segments.
947 + */
948 +-#ifdef CONFIG_X86_32
949 ++#ifdef CONFIG_X86_64
950 ++ loadsegment(ds, ctxt->es);
951 + loadsegment(es, ctxt->es);
952 + loadsegment(fs, ctxt->fs);
953 +- loadsegment(gs, ctxt->gs);
954 +- loadsegment(ss, ctxt->ss);
955 ++ load_gs_index(ctxt->gs);
956 +
957 + /*
958 +- * sysenter MSRs
959 ++ * Restore FSBASE and GSBASE after restoring the selectors, since
960 ++ * restoring the selectors clobbers the bases. Keep in mind
961 ++ * that MSR_KERNEL_GS_BASE is horribly misnamed.
962 + */
963 +- if (boot_cpu_has(X86_FEATURE_SEP))
964 +- enable_sep_cpu();
965 +-#else
966 +-/* CONFIG_X86_64 */
967 +- asm volatile ("movw %0, %%ds" :: "r" (ctxt->ds));
968 +- asm volatile ("movw %0, %%es" :: "r" (ctxt->es));
969 +- asm volatile ("movw %0, %%fs" :: "r" (ctxt->fs));
970 +- load_gs_index(ctxt->gs);
971 +- asm volatile ("movw %0, %%ss" :: "r" (ctxt->ss));
972 +-
973 + wrmsrl(MSR_FS_BASE, ctxt->fs_base);
974 +- wrmsrl(MSR_GS_BASE, ctxt->gs_base);
975 +- wrmsrl(MSR_KERNEL_GS_BASE, ctxt->gs_kernel_base);
976 ++ wrmsrl(MSR_KERNEL_GS_BASE, ctxt->usermode_gs_base);
977 ++#elif defined(CONFIG_X86_32_LAZY_GS)
978 ++ loadsegment(gs, ctxt->gs);
979 + #endif
980 +
981 +- fix_processor_context();
982 +-
983 + do_fpu_end();
984 + tsc_verify_tsc_adjust(true);
985 + x86_platform.restore_sched_clock_state();
986 +diff --git a/arch/xtensa/kernel/stacktrace.c b/arch/xtensa/kernel/stacktrace.c
987 +index 0df4080fa20f..a94da7dd3eae 100644
988 +--- a/arch/xtensa/kernel/stacktrace.c
989 ++++ b/arch/xtensa/kernel/stacktrace.c
990 +@@ -253,10 +253,14 @@ static int return_address_cb(struct stackframe *frame, void *data)
991 + return 1;
992 + }
993 +
994 ++/*
995 ++ * level == 0 is for the return address from the caller of this function,
996 ++ * not from this function itself.
997 ++ */
998 + unsigned long return_address(unsigned level)
999 + {
1000 + struct return_addr_data r = {
1001 +- .skip = level + 1,
1002 ++ .skip = level,
1003 + };
1004 + walk_stackframe(stack_pointer(NULL), return_address_cb, &r);
1005 + return r.addr;
1006 +diff --git a/block/bio.c b/block/bio.c
1007 +index 2e5d881423b8..d01ab919b313 100644
1008 +--- a/block/bio.c
1009 ++++ b/block/bio.c
1010 +@@ -1280,8 +1280,11 @@ struct bio *bio_copy_user_iov(struct request_queue *q,
1011 + }
1012 + }
1013 +
1014 +- if (bio_add_pc_page(q, bio, page, bytes, offset) < bytes)
1015 ++ if (bio_add_pc_page(q, bio, page, bytes, offset) < bytes) {
1016 ++ if (!map_data)
1017 ++ __free_page(page);
1018 + break;
1019 ++ }
1020 +
1021 + len -= bytes;
1022 + offset = 0;
1023 +diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
1024 +index c28dca0c613d..88316f86cc95 100644
1025 +--- a/drivers/char/Kconfig
1026 ++++ b/drivers/char/Kconfig
1027 +@@ -380,7 +380,7 @@ config XILINX_HWICAP
1028 +
1029 + config R3964
1030 + tristate "Siemens R3964 line discipline"
1031 +- depends on TTY
1032 ++ depends on TTY && BROKEN
1033 + ---help---
1034 + This driver allows synchronous communication with devices using the
1035 + Siemens R3964 packet protocol. Unless you are dealing with special
1036 +diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c
1037 +index dadacbe558ab..1a1f7eb46d1e 100644
1038 +--- a/drivers/gpu/drm/i915/gvt/gtt.c
1039 ++++ b/drivers/gpu/drm/i915/gvt/gtt.c
1040 +@@ -1629,7 +1629,7 @@ void intel_vgpu_unpin_mm(struct intel_vgpu_mm *mm)
1041 + if (WARN_ON(mm->type != INTEL_GVT_MM_PPGTT))
1042 + return;
1043 +
1044 +- atomic_dec(&mm->pincount);
1045 ++ atomic_dec_if_positive(&mm->pincount);
1046 + }
1047 +
1048 + /**
1049 +diff --git a/drivers/gpu/drm/udl/udl_drv.c b/drivers/gpu/drm/udl/udl_drv.c
1050 +index 31421b6b586e..b45ac6bc8add 100644
1051 +--- a/drivers/gpu/drm/udl/udl_drv.c
1052 ++++ b/drivers/gpu/drm/udl/udl_drv.c
1053 +@@ -47,6 +47,7 @@ static struct drm_driver driver = {
1054 + .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME,
1055 + .load = udl_driver_load,
1056 + .unload = udl_driver_unload,
1057 ++ .release = udl_driver_release,
1058 +
1059 + /* gem hooks */
1060 + .gem_free_object = udl_gem_free_object,
1061 +diff --git a/drivers/gpu/drm/udl/udl_drv.h b/drivers/gpu/drm/udl/udl_drv.h
1062 +index 2c149b841cf1..307455dd6526 100644
1063 +--- a/drivers/gpu/drm/udl/udl_drv.h
1064 ++++ b/drivers/gpu/drm/udl/udl_drv.h
1065 +@@ -101,6 +101,7 @@ void udl_urb_completion(struct urb *urb);
1066 +
1067 + int udl_driver_load(struct drm_device *dev, unsigned long flags);
1068 + void udl_driver_unload(struct drm_device *dev);
1069 ++void udl_driver_release(struct drm_device *dev);
1070 +
1071 + int udl_fbdev_init(struct drm_device *dev);
1072 + void udl_fbdev_cleanup(struct drm_device *dev);
1073 +diff --git a/drivers/gpu/drm/udl/udl_main.c b/drivers/gpu/drm/udl/udl_main.c
1074 +index f8ea3c99b523..60866b422f81 100644
1075 +--- a/drivers/gpu/drm/udl/udl_main.c
1076 ++++ b/drivers/gpu/drm/udl/udl_main.c
1077 +@@ -378,6 +378,12 @@ void udl_driver_unload(struct drm_device *dev)
1078 + udl_free_urb_list(dev);
1079 +
1080 + udl_fbdev_cleanup(dev);
1081 +- udl_modeset_cleanup(dev);
1082 + kfree(udl);
1083 + }
1084 ++
1085 ++void udl_driver_release(struct drm_device *dev)
1086 ++{
1087 ++ udl_modeset_cleanup(dev);
1088 ++ drm_dev_fini(dev);
1089 ++ kfree(dev);
1090 ++}
1091 +diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
1092 +index f9cd81375f28..d76e685206b3 100644
1093 +--- a/drivers/md/dm-table.c
1094 ++++ b/drivers/md/dm-table.c
1095 +@@ -1789,6 +1789,36 @@ static bool dm_table_supports_discards(struct dm_table *t)
1096 + return true;
1097 + }
1098 +
1099 ++static int device_requires_stable_pages(struct dm_target *ti,
1100 ++ struct dm_dev *dev, sector_t start,
1101 ++ sector_t len, void *data)
1102 ++{
1103 ++ struct request_queue *q = bdev_get_queue(dev->bdev);
1104 ++
1105 ++ return q && bdi_cap_stable_pages_required(q->backing_dev_info);
1106 ++}
1107 ++
1108 ++/*
1109 ++ * If any underlying device requires stable pages, a table must require
1110 ++ * them as well. Only targets that support iterate_devices are considered:
1111 ++ * don't want error, zero, etc to require stable pages.
1112 ++ */
1113 ++static bool dm_table_requires_stable_pages(struct dm_table *t)
1114 ++{
1115 ++ struct dm_target *ti;
1116 ++ unsigned i;
1117 ++
1118 ++ for (i = 0; i < dm_table_get_num_targets(t); i++) {
1119 ++ ti = dm_table_get_target(t, i);
1120 ++
1121 ++ if (ti->type->iterate_devices &&
1122 ++ ti->type->iterate_devices(ti, device_requires_stable_pages, NULL))
1123 ++ return true;
1124 ++ }
1125 ++
1126 ++ return false;
1127 ++}
1128 ++
1129 + void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
1130 + struct queue_limits *limits)
1131 + {
1132 +@@ -1837,6 +1867,15 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
1133 +
1134 + dm_table_verify_integrity(t);
1135 +
1136 ++ /*
1137 ++ * Some devices don't use blk_integrity but still want stable pages
1138 ++ * because they do their own checksumming.
1139 ++ */
1140 ++ if (dm_table_requires_stable_pages(t))
1141 ++ q->backing_dev_info->capabilities |= BDI_CAP_STABLE_WRITES;
1142 ++ else
1143 ++ q->backing_dev_info->capabilities &= ~BDI_CAP_STABLE_WRITES;
1144 ++
1145 + /*
1146 + * Determine whether or not this queue's I/O timings contribute
1147 + * to the entropy pool, Only request-based targets use this.
1148 +diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
1149 +index 15ad247955f7..446577a1a6a5 100644
1150 +--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
1151 ++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
1152 +@@ -1076,6 +1076,8 @@ static void bnxt_tpa_start(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
1153 + tpa_info = &rxr->rx_tpa[agg_id];
1154 +
1155 + if (unlikely(cons != rxr->rx_next_cons)) {
1156 ++ netdev_warn(bp->dev, "TPA cons %x != expected cons %x\n",
1157 ++ cons, rxr->rx_next_cons);
1158 + bnxt_sched_reset(bp, rxr);
1159 + return;
1160 + }
1161 +@@ -1528,15 +1530,17 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_napi *bnapi, u32 *raw_cons,
1162 + }
1163 +
1164 + cons = rxcmp->rx_cmp_opaque;
1165 +- rx_buf = &rxr->rx_buf_ring[cons];
1166 +- data = rx_buf->data;
1167 +- data_ptr = rx_buf->data_ptr;
1168 + if (unlikely(cons != rxr->rx_next_cons)) {
1169 + int rc1 = bnxt_discard_rx(bp, bnapi, raw_cons, rxcmp);
1170 +
1171 ++ netdev_warn(bp->dev, "RX cons %x != expected cons %x\n",
1172 ++ cons, rxr->rx_next_cons);
1173 + bnxt_sched_reset(bp, rxr);
1174 + return rc1;
1175 + }
1176 ++ rx_buf = &rxr->rx_buf_ring[cons];
1177 ++ data = rx_buf->data;
1178 ++ data_ptr = rx_buf->data_ptr;
1179 + prefetch(data_ptr);
1180 +
1181 + misc = le32_to_cpu(rxcmp->rx_cmp_misc_v1);
1182 +@@ -1553,11 +1557,17 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_napi *bnapi, u32 *raw_cons,
1183 +
1184 + rx_buf->data = NULL;
1185 + if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L2_ERRORS) {
1186 ++ u32 rx_err = le32_to_cpu(rxcmp1->rx_cmp_cfa_code_errors_v2);
1187 ++
1188 + bnxt_reuse_rx_data(rxr, cons, data);
1189 + if (agg_bufs)
1190 + bnxt_reuse_rx_agg_bufs(bnapi, cp_cons, agg_bufs);
1191 +
1192 + rc = -EIO;
1193 ++ if (rx_err & RX_CMPL_ERRORS_BUFFER_ERROR_MASK) {
1194 ++ netdev_warn(bp->dev, "RX buffer error %x\n", rx_err);
1195 ++ bnxt_sched_reset(bp, rxr);
1196 ++ }
1197 + goto next_rx;
1198 + }
1199 +
1200 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_common.c b/drivers/net/ethernet/mellanox/mlx5/core/en_common.c
1201 +index ece3fb147e3e..36ae0b2519d2 100644
1202 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_common.c
1203 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_common.c
1204 +@@ -45,7 +45,9 @@ int mlx5e_create_tir(struct mlx5_core_dev *mdev,
1205 + if (err)
1206 + return err;
1207 +
1208 ++ mutex_lock(&mdev->mlx5e_res.td.list_lock);
1209 + list_add(&tir->list, &mdev->mlx5e_res.td.tirs_list);
1210 ++ mutex_unlock(&mdev->mlx5e_res.td.list_lock);
1211 +
1212 + return 0;
1213 + }
1214 +@@ -53,8 +55,10 @@ int mlx5e_create_tir(struct mlx5_core_dev *mdev,
1215 + void mlx5e_destroy_tir(struct mlx5_core_dev *mdev,
1216 + struct mlx5e_tir *tir)
1217 + {
1218 ++ mutex_lock(&mdev->mlx5e_res.td.list_lock);
1219 + mlx5_core_destroy_tir(mdev, tir->tirn);
1220 + list_del(&tir->list);
1221 ++ mutex_unlock(&mdev->mlx5e_res.td.list_lock);
1222 + }
1223 +
1224 + static int mlx5e_create_mkey(struct mlx5_core_dev *mdev, u32 pdn,
1225 +@@ -114,6 +118,7 @@ int mlx5e_create_mdev_resources(struct mlx5_core_dev *mdev)
1226 + }
1227 +
1228 + INIT_LIST_HEAD(&mdev->mlx5e_res.td.tirs_list);
1229 ++ mutex_init(&mdev->mlx5e_res.td.list_lock);
1230 +
1231 + return 0;
1232 +
1233 +@@ -140,15 +145,17 @@ int mlx5e_refresh_tirs(struct mlx5e_priv *priv, bool enable_uc_lb)
1234 + {
1235 + struct mlx5_core_dev *mdev = priv->mdev;
1236 + struct mlx5e_tir *tir;
1237 +- int err = -ENOMEM;
1238 ++ int err = 0;
1239 + u32 tirn = 0;
1240 + int inlen;
1241 + void *in;
1242 +
1243 + inlen = MLX5_ST_SZ_BYTES(modify_tir_in);
1244 + in = kvzalloc(inlen, GFP_KERNEL);
1245 +- if (!in)
1246 ++ if (!in) {
1247 ++ err = -ENOMEM;
1248 + goto out;
1249 ++ }
1250 +
1251 + if (enable_uc_lb)
1252 + MLX5_SET(modify_tir_in, in, ctx.self_lb_block,
1253 +@@ -156,6 +163,7 @@ int mlx5e_refresh_tirs(struct mlx5e_priv *priv, bool enable_uc_lb)
1254 +
1255 + MLX5_SET(modify_tir_in, in, bitmask.self_lb_en, 1);
1256 +
1257 ++ mutex_lock(&mdev->mlx5e_res.td.list_lock);
1258 + list_for_each_entry(tir, &mdev->mlx5e_res.td.tirs_list, list) {
1259 + tirn = tir->tirn;
1260 + err = mlx5_core_modify_tir(mdev, tirn, in, inlen);
1261 +@@ -167,6 +175,7 @@ out:
1262 + kvfree(in);
1263 + if (err)
1264 + netdev_err(priv->netdev, "refresh tir(0x%x) failed, %d\n", tirn, err);
1265 ++ mutex_unlock(&mdev->mlx5e_res.td.list_lock);
1266 +
1267 + return err;
1268 + }
1269 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
1270 +index 558fc6a05e2a..826d1a4600f3 100644
1271 +--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
1272 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
1273 +@@ -155,26 +155,6 @@ static struct mlx5_profile profile[] = {
1274 + .size = 8,
1275 + .limit = 4
1276 + },
1277 +- .mr_cache[16] = {
1278 +- .size = 8,
1279 +- .limit = 4
1280 +- },
1281 +- .mr_cache[17] = {
1282 +- .size = 8,
1283 +- .limit = 4
1284 +- },
1285 +- .mr_cache[18] = {
1286 +- .size = 8,
1287 +- .limit = 4
1288 +- },
1289 +- .mr_cache[19] = {
1290 +- .size = 4,
1291 +- .limit = 2
1292 +- },
1293 +- .mr_cache[20] = {
1294 +- .size = 4,
1295 +- .limit = 2
1296 +- },
1297 + },
1298 + };
1299 +
1300 +diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c
1301 +index 9a7655560629..1910ca21a1bc 100644
1302 +--- a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c
1303 ++++ b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c
1304 +@@ -200,7 +200,7 @@ static netdev_tx_t nfp_repr_xmit(struct sk_buff *skb, struct net_device *netdev)
1305 + ret = dev_queue_xmit(skb);
1306 + nfp_repr_inc_tx_stats(netdev, len, ret);
1307 +
1308 +- return ret;
1309 ++ return NETDEV_TX_OK;
1310 + }
1311 +
1312 + static int nfp_repr_stop(struct net_device *netdev)
1313 +diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h
1314 +index e33a6c672a0a..0f07b5978fa1 100644
1315 +--- a/drivers/net/hyperv/hyperv_net.h
1316 ++++ b/drivers/net/hyperv/hyperv_net.h
1317 +@@ -779,6 +779,7 @@ struct netvsc_device {
1318 +
1319 + wait_queue_head_t wait_drain;
1320 + bool destroy;
1321 ++ bool tx_disable; /* if true, do not wake up queue again */
1322 +
1323 + /* Receive buffer allocated by us but manages by NetVSP */
1324 + void *recv_buf;
1325 +diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c
1326 +index 806239b89990..a3bb4d5c64f5 100644
1327 +--- a/drivers/net/hyperv/netvsc.c
1328 ++++ b/drivers/net/hyperv/netvsc.c
1329 +@@ -107,6 +107,7 @@ static struct netvsc_device *alloc_net_device(void)
1330 +
1331 + init_waitqueue_head(&net_device->wait_drain);
1332 + net_device->destroy = false;
1333 ++ net_device->tx_disable = false;
1334 + atomic_set(&net_device->open_cnt, 0);
1335 + net_device->max_pkt = RNDIS_MAX_PKT_DEFAULT;
1336 + net_device->pkt_align = RNDIS_PKT_ALIGN_DEFAULT;
1337 +@@ -712,7 +713,7 @@ static void netvsc_send_tx_complete(struct netvsc_device *net_device,
1338 + } else {
1339 + struct netdev_queue *txq = netdev_get_tx_queue(ndev, q_idx);
1340 +
1341 +- if (netif_tx_queue_stopped(txq) &&
1342 ++ if (netif_tx_queue_stopped(txq) && !net_device->tx_disable &&
1343 + (hv_ringbuf_avail_percent(&channel->outbound) > RING_AVAIL_PERCENT_HIWATER ||
1344 + queue_sends < 1)) {
1345 + netif_tx_wake_queue(txq);
1346 +@@ -865,7 +866,8 @@ static inline int netvsc_send_pkt(
1347 + netif_tx_stop_queue(txq);
1348 + } else if (ret == -EAGAIN) {
1349 + netif_tx_stop_queue(txq);
1350 +- if (atomic_read(&nvchan->queue_sends) < 1) {
1351 ++ if (atomic_read(&nvchan->queue_sends) < 1 &&
1352 ++ !net_device->tx_disable) {
1353 + netif_tx_wake_queue(txq);
1354 + ret = -ENOSPC;
1355 + }
1356 +diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
1357 +index 74b9e51b2b47..eb92720dd1c4 100644
1358 +--- a/drivers/net/hyperv/netvsc_drv.c
1359 ++++ b/drivers/net/hyperv/netvsc_drv.c
1360 +@@ -108,6 +108,15 @@ static void netvsc_set_rx_mode(struct net_device *net)
1361 + rcu_read_unlock();
1362 + }
1363 +
1364 ++static void netvsc_tx_enable(struct netvsc_device *nvscdev,
1365 ++ struct net_device *ndev)
1366 ++{
1367 ++ nvscdev->tx_disable = false;
1368 ++ virt_wmb(); /* ensure queue wake up mechanism is on */
1369 ++
1370 ++ netif_tx_wake_all_queues(ndev);
1371 ++}
1372 ++
1373 + static int netvsc_open(struct net_device *net)
1374 + {
1375 + struct net_device_context *ndev_ctx = netdev_priv(net);
1376 +@@ -128,7 +137,7 @@ static int netvsc_open(struct net_device *net)
1377 + rdev = nvdev->extension;
1378 + if (!rdev->link_state) {
1379 + netif_carrier_on(net);
1380 +- netif_tx_wake_all_queues(net);
1381 ++ netvsc_tx_enable(nvdev, net);
1382 + }
1383 +
1384 + if (vf_netdev) {
1385 +@@ -183,6 +192,17 @@ static int netvsc_wait_until_empty(struct netvsc_device *nvdev)
1386 + }
1387 + }
1388 +
1389 ++static void netvsc_tx_disable(struct netvsc_device *nvscdev,
1390 ++ struct net_device *ndev)
1391 ++{
1392 ++ if (nvscdev) {
1393 ++ nvscdev->tx_disable = true;
1394 ++ virt_wmb(); /* ensure txq will not wake up after stop */
1395 ++ }
1396 ++
1397 ++ netif_tx_disable(ndev);
1398 ++}
1399 ++
1400 + static int netvsc_close(struct net_device *net)
1401 + {
1402 + struct net_device_context *net_device_ctx = netdev_priv(net);
1403 +@@ -191,7 +211,7 @@ static int netvsc_close(struct net_device *net)
1404 + struct netvsc_device *nvdev = rtnl_dereference(net_device_ctx->nvdev);
1405 + int ret;
1406 +
1407 +- netif_tx_disable(net);
1408 ++ netvsc_tx_disable(nvdev, net);
1409 +
1410 + /* No need to close rndis filter if it is removed already */
1411 + if (!nvdev)
1412 +@@ -893,7 +913,7 @@ static int netvsc_detach(struct net_device *ndev,
1413 +
1414 + /* If device was up (receiving) then shutdown */
1415 + if (netif_running(ndev)) {
1416 +- netif_tx_disable(ndev);
1417 ++ netvsc_tx_disable(nvdev, ndev);
1418 +
1419 + ret = rndis_filter_close(nvdev);
1420 + if (ret) {
1421 +@@ -1720,7 +1740,7 @@ static void netvsc_link_change(struct work_struct *w)
1422 + if (rdev->link_state) {
1423 + rdev->link_state = false;
1424 + netif_carrier_on(net);
1425 +- netif_tx_wake_all_queues(net);
1426 ++ netvsc_tx_enable(net_device, net);
1427 + } else {
1428 + notify = true;
1429 + }
1430 +@@ -1730,7 +1750,7 @@ static void netvsc_link_change(struct work_struct *w)
1431 + if (!rdev->link_state) {
1432 + rdev->link_state = true;
1433 + netif_carrier_off(net);
1434 +- netif_tx_stop_all_queues(net);
1435 ++ netvsc_tx_disable(net_device, net);
1436 + }
1437 + kfree(event);
1438 + break;
1439 +@@ -1739,7 +1759,7 @@ static void netvsc_link_change(struct work_struct *w)
1440 + if (!rdev->link_state) {
1441 + rdev->link_state = true;
1442 + netif_carrier_off(net);
1443 +- netif_tx_stop_all_queues(net);
1444 ++ netvsc_tx_disable(net_device, net);
1445 + event->event = RNDIS_STATUS_MEDIA_CONNECT;
1446 + spin_lock_irqsave(&ndev_ctx->lock, flags);
1447 + list_add(&event->list, &ndev_ctx->reconfig_events);
1448 +diff --git a/drivers/net/phy/sfp.c b/drivers/net/phy/sfp.c
1449 +index a1b68b19d912..5ab725a571a8 100644
1450 +--- a/drivers/net/phy/sfp.c
1451 ++++ b/drivers/net/phy/sfp.c
1452 +@@ -878,6 +878,10 @@ static int sfp_probe(struct platform_device *pdev)
1453 + if (poll)
1454 + mod_delayed_work(system_wq, &sfp->poll, poll_jiffies);
1455 +
1456 ++ sfp->sfp_bus = sfp_register_socket(sfp->dev, sfp, &sfp_module_ops);
1457 ++ if (!sfp->sfp_bus)
1458 ++ return -ENOMEM;
1459 ++
1460 + return 0;
1461 + }
1462 +
1463 +@@ -887,10 +891,6 @@ static int sfp_remove(struct platform_device *pdev)
1464 +
1465 + sfp_unregister_socket(sfp->sfp_bus);
1466 +
1467 +- sfp->sfp_bus = sfp_register_socket(sfp->dev, sfp, &sfp_module_ops);
1468 +- if (!sfp->sfp_bus)
1469 +- return -ENOMEM;
1470 +-
1471 + return 0;
1472 + }
1473 +
1474 +diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
1475 +index 65e47cc52d14..01abe8eea753 100644
1476 +--- a/drivers/net/usb/qmi_wwan.c
1477 ++++ b/drivers/net/usb/qmi_wwan.c
1478 +@@ -1188,6 +1188,7 @@ static const struct usb_device_id products[] = {
1479 + {QMI_FIXED_INTF(0x19d2, 0x2002, 4)}, /* ZTE (Vodafone) K3765-Z */
1480 + {QMI_FIXED_INTF(0x2001, 0x7e19, 4)}, /* D-Link DWM-221 B1 */
1481 + {QMI_FIXED_INTF(0x2001, 0x7e35, 4)}, /* D-Link DWM-222 */
1482 ++ {QMI_FIXED_INTF(0x2020, 0x2031, 4)}, /* Olicard 600 */
1483 + {QMI_FIXED_INTF(0x2020, 0x2033, 4)}, /* BroadMobi BM806U */
1484 + {QMI_FIXED_INTF(0x0f3d, 0x68a2, 8)}, /* Sierra Wireless MC7700 */
1485 + {QMI_FIXED_INTF(0x114f, 0x68a2, 8)}, /* Sierra Wireless MC7750 */
1486 +diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
1487 +index d442afa195ab..867056395d48 100644
1488 +--- a/drivers/pci/quirks.c
1489 ++++ b/drivers/pci/quirks.c
1490 +@@ -3888,6 +3888,8 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9128,
1491 + /* https://bugzilla.kernel.org/show_bug.cgi?id=42679#c14 */
1492 + DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9130,
1493 + quirk_dma_func1_alias);
1494 ++DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9170,
1495 ++ quirk_dma_func1_alias);
1496 + /* https://bugzilla.kernel.org/show_bug.cgi?id=42679#c47 + c57 */
1497 + DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9172,
1498 + quirk_dma_func1_alias);
1499 +diff --git a/drivers/staging/ccree/ssi_hash.c b/drivers/staging/ccree/ssi_hash.c
1500 +index e266a70a1b32..13291aeaf350 100644
1501 +--- a/drivers/staging/ccree/ssi_hash.c
1502 ++++ b/drivers/staging/ccree/ssi_hash.c
1503 +@@ -1781,7 +1781,7 @@ static int ssi_ahash_import(struct ahash_request *req, const void *in)
1504 + struct device *dev = &ctx->drvdata->plat_dev->dev;
1505 + struct ahash_req_ctx *state = ahash_request_ctx(req);
1506 + u32 tmp;
1507 +- int rc = 0;
1508 ++ int rc;
1509 +
1510 + memcpy(&tmp, in, sizeof(u32));
1511 + if (tmp != CC_EXPORT_MAGIC) {
1512 +@@ -1790,12 +1790,9 @@ static int ssi_ahash_import(struct ahash_request *req, const void *in)
1513 + }
1514 + in += sizeof(u32);
1515 +
1516 +- /* call init() to allocate bufs if the user hasn't */
1517 +- if (!state->digest_buff) {
1518 +- rc = ssi_hash_init(state, ctx);
1519 +- if (rc)
1520 +- goto out;
1521 +- }
1522 ++ rc = ssi_hash_init(state, ctx);
1523 ++ if (rc)
1524 ++ goto out;
1525 +
1526 + dma_sync_single_for_cpu(dev, state->digest_buff_dma_addr,
1527 + ctx->inter_digestsize, DMA_BIDIRECTIONAL);
1528 +diff --git a/drivers/tty/Kconfig b/drivers/tty/Kconfig
1529 +index b811442c5ce6..9788a25a34f4 100644
1530 +--- a/drivers/tty/Kconfig
1531 ++++ b/drivers/tty/Kconfig
1532 +@@ -467,4 +467,28 @@ config VCC
1533 + depends on SUN_LDOMS
1534 + help
1535 + Support for Sun logical domain consoles.
1536 ++
1537 ++config LDISC_AUTOLOAD
1538 ++ bool "Automatically load TTY Line Disciplines"
1539 ++ default y
1540 ++ help
1541 ++ Historically the kernel has always automatically loaded any
1542 ++ line discipline that is in a kernel module when a user asks
1543 ++ for it to be loaded with the TIOCSETD ioctl, or through other
1544 ++ means. This is not always the best thing to do on systems
1545 ++ where you know you will not be using some of the more
1546 ++ "ancient" line disciplines, so prevent the kernel from doing
1547 ++ this unless the request is coming from a process with the
1548 ++ CAP_SYS_MODULE permissions.
1549 ++
1550 ++ Say 'Y' here if you trust your userspace users to do the right
1551 ++ thing, or if you have only provided the line disciplines that
1552 ++ you know you will be using, or if you wish to continue to use
1553 ++ the traditional method of on-demand loading of these modules
1554 ++ by any user.
1555 ++
1556 ++ This functionality can be changed at runtime with the
1557 ++ dev.tty.ldisc_autoload sysctl, this configuration option will
1558 ++ only set the default value of this functionality.
1559 ++
1560 + endif # TTY
1561 +diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
1562 +index 7e351d205393..dba4f53a7fff 100644
1563 +--- a/drivers/tty/tty_io.c
1564 ++++ b/drivers/tty/tty_io.c
1565 +@@ -511,6 +511,8 @@ static const struct file_operations hung_up_tty_fops = {
1566 + static DEFINE_SPINLOCK(redirect_lock);
1567 + static struct file *redirect;
1568 +
1569 ++extern void tty_sysctl_init(void);
1570 ++
1571 + /**
1572 + * tty_wakeup - request more data
1573 + * @tty: terminal
1574 +@@ -3332,6 +3334,7 @@ void console_sysfs_notify(void)
1575 + */
1576 + int __init tty_init(void)
1577 + {
1578 ++ tty_sysctl_init();
1579 + cdev_init(&tty_cdev, &tty_fops);
1580 + if (cdev_add(&tty_cdev, MKDEV(TTYAUX_MAJOR, 0), 1) ||
1581 + register_chrdev_region(MKDEV(TTYAUX_MAJOR, 0), 1, "/dev/tty") < 0)
1582 +diff --git a/drivers/tty/tty_ldisc.c b/drivers/tty/tty_ldisc.c
1583 +index ca656ef8de64..01fcdc7ff077 100644
1584 +--- a/drivers/tty/tty_ldisc.c
1585 ++++ b/drivers/tty/tty_ldisc.c
1586 +@@ -155,6 +155,13 @@ static void put_ldops(struct tty_ldisc_ops *ldops)
1587 + * takes tty_ldiscs_lock to guard against ldisc races
1588 + */
1589 +
1590 ++#if defined(CONFIG_LDISC_AUTOLOAD)
1591 ++ #define INITIAL_AUTOLOAD_STATE 1
1592 ++#else
1593 ++ #define INITIAL_AUTOLOAD_STATE 0
1594 ++#endif
1595 ++static int tty_ldisc_autoload = INITIAL_AUTOLOAD_STATE;
1596 ++
1597 + static struct tty_ldisc *tty_ldisc_get(struct tty_struct *tty, int disc)
1598 + {
1599 + struct tty_ldisc *ld;
1600 +@@ -169,6 +176,8 @@ static struct tty_ldisc *tty_ldisc_get(struct tty_struct *tty, int disc)
1601 + */
1602 + ldops = get_ldops(disc);
1603 + if (IS_ERR(ldops)) {
1604 ++ if (!capable(CAP_SYS_MODULE) && !tty_ldisc_autoload)
1605 ++ return ERR_PTR(-EPERM);
1606 + request_module("tty-ldisc-%d", disc);
1607 + ldops = get_ldops(disc);
1608 + if (IS_ERR(ldops))
1609 +@@ -841,3 +850,41 @@ void tty_ldisc_deinit(struct tty_struct *tty)
1610 + tty_ldisc_put(tty->ldisc);
1611 + tty->ldisc = NULL;
1612 + }
1613 ++
1614 ++static int zero;
1615 ++static int one = 1;
1616 ++static struct ctl_table tty_table[] = {
1617 ++ {
1618 ++ .procname = "ldisc_autoload",
1619 ++ .data = &tty_ldisc_autoload,
1620 ++ .maxlen = sizeof(tty_ldisc_autoload),
1621 ++ .mode = 0644,
1622 ++ .proc_handler = proc_dointvec,
1623 ++ .extra1 = &zero,
1624 ++ .extra2 = &one,
1625 ++ },
1626 ++ { }
1627 ++};
1628 ++
1629 ++static struct ctl_table tty_dir_table[] = {
1630 ++ {
1631 ++ .procname = "tty",
1632 ++ .mode = 0555,
1633 ++ .child = tty_table,
1634 ++ },
1635 ++ { }
1636 ++};
1637 ++
1638 ++static struct ctl_table tty_root_table[] = {
1639 ++ {
1640 ++ .procname = "dev",
1641 ++ .mode = 0555,
1642 ++ .child = tty_dir_table,
1643 ++ },
1644 ++ { }
1645 ++};
1646 ++
1647 ++void tty_sysctl_init(void)
1648 ++{
1649 ++ register_sysctl_table(tty_root_table);
1650 ++}
1651 +diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c
1652 +index 71458f493cf8..cc9d421c0929 100644
1653 +--- a/drivers/virtio/virtio_ring.c
1654 ++++ b/drivers/virtio/virtio_ring.c
1655 +@@ -1087,6 +1087,8 @@ struct virtqueue *vring_create_virtqueue(
1656 + GFP_KERNEL|__GFP_NOWARN|__GFP_ZERO);
1657 + if (queue)
1658 + break;
1659 ++ if (!may_reduce_num)
1660 ++ return NULL;
1661 + }
1662 +
1663 + if (!num)
1664 +diff --git a/fs/block_dev.c b/fs/block_dev.c
1665 +index 3911c1a80219..61949e3446e5 100644
1666 +--- a/fs/block_dev.c
1667 ++++ b/fs/block_dev.c
1668 +@@ -306,10 +306,10 @@ static void blkdev_bio_end_io(struct bio *bio)
1669 + struct blkdev_dio *dio = bio->bi_private;
1670 + bool should_dirty = dio->should_dirty;
1671 +
1672 +- if (dio->multi_bio && !atomic_dec_and_test(&dio->ref)) {
1673 +- if (bio->bi_status && !dio->bio.bi_status)
1674 +- dio->bio.bi_status = bio->bi_status;
1675 +- } else {
1676 ++ if (bio->bi_status && !dio->bio.bi_status)
1677 ++ dio->bio.bi_status = bio->bi_status;
1678 ++
1679 ++ if (!dio->multi_bio || atomic_dec_and_test(&dio->ref)) {
1680 + if (!dio->is_sync) {
1681 + struct kiocb *iocb = dio->iocb;
1682 + ssize_t ret;
1683 +diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
1684 +index cddd63b9103f..dd3b4820ac30 100644
1685 +--- a/fs/btrfs/ioctl.c
1686 ++++ b/fs/btrfs/ioctl.c
1687 +@@ -357,6 +357,16 @@ static noinline int btrfs_ioctl_fitrim(struct file *file, void __user *arg)
1688 + if (!capable(CAP_SYS_ADMIN))
1689 + return -EPERM;
1690 +
1691 ++ /*
1692 ++ * If the fs is mounted with nologreplay, which requires it to be
1693 ++ * mounted in RO mode as well, we can not allow discard on free space
1694 ++ * inside block groups, because log trees refer to extents that are not
1695 ++ * pinned in a block group's free space cache (pinning the extents is
1696 ++ * precisely the first phase of replaying a log tree).
1697 ++ */
1698 ++ if (btrfs_test_opt(fs_info, NOLOGREPLAY))
1699 ++ return -EROFS;
1700 ++
1701 + rcu_read_lock();
1702 + list_for_each_entry_rcu(device, &fs_info->fs_devices->devices,
1703 + dev_list) {
1704 +diff --git a/fs/btrfs/props.c b/fs/btrfs/props.c
1705 +index cbabc6f2b322..266f9069307b 100644
1706 +--- a/fs/btrfs/props.c
1707 ++++ b/fs/btrfs/props.c
1708 +@@ -386,11 +386,11 @@ int btrfs_subvol_inherit_props(struct btrfs_trans_handle *trans,
1709 +
1710 + static int prop_compression_validate(const char *value, size_t len)
1711 + {
1712 +- if (!strncmp("lzo", value, len))
1713 ++ if (!strncmp("lzo", value, 3))
1714 + return 0;
1715 +- else if (!strncmp("zlib", value, len))
1716 ++ else if (!strncmp("zlib", value, 4))
1717 + return 0;
1718 +- else if (!strncmp("zstd", value, len))
1719 ++ else if (!strncmp("zstd", value, 4))
1720 + return 0;
1721 +
1722 + return -EINVAL;
1723 +@@ -416,7 +416,7 @@ static int prop_compression_apply(struct inode *inode,
1724 + btrfs_set_fs_incompat(fs_info, COMPRESS_LZO);
1725 + } else if (!strncmp("zlib", value, 4)) {
1726 + type = BTRFS_COMPRESS_ZLIB;
1727 +- } else if (!strncmp("zstd", value, len)) {
1728 ++ } else if (!strncmp("zstd", value, 4)) {
1729 + type = BTRFS_COMPRESS_ZSTD;
1730 + btrfs_set_fs_incompat(fs_info, COMPRESS_ZSTD);
1731 + } else {
1732 +diff --git a/include/linux/bitrev.h b/include/linux/bitrev.h
1733 +index 50fb0dee23e8..d35b8ec1c485 100644
1734 +--- a/include/linux/bitrev.h
1735 ++++ b/include/linux/bitrev.h
1736 +@@ -34,41 +34,41 @@ static inline u32 __bitrev32(u32 x)
1737 +
1738 + #define __constant_bitrev32(x) \
1739 + ({ \
1740 +- u32 __x = x; \
1741 +- __x = (__x >> 16) | (__x << 16); \
1742 +- __x = ((__x & (u32)0xFF00FF00UL) >> 8) | ((__x & (u32)0x00FF00FFUL) << 8); \
1743 +- __x = ((__x & (u32)0xF0F0F0F0UL) >> 4) | ((__x & (u32)0x0F0F0F0FUL) << 4); \
1744 +- __x = ((__x & (u32)0xCCCCCCCCUL) >> 2) | ((__x & (u32)0x33333333UL) << 2); \
1745 +- __x = ((__x & (u32)0xAAAAAAAAUL) >> 1) | ((__x & (u32)0x55555555UL) << 1); \
1746 +- __x; \
1747 ++ u32 ___x = x; \
1748 ++ ___x = (___x >> 16) | (___x << 16); \
1749 ++ ___x = ((___x & (u32)0xFF00FF00UL) >> 8) | ((___x & (u32)0x00FF00FFUL) << 8); \
1750 ++ ___x = ((___x & (u32)0xF0F0F0F0UL) >> 4) | ((___x & (u32)0x0F0F0F0FUL) << 4); \
1751 ++ ___x = ((___x & (u32)0xCCCCCCCCUL) >> 2) | ((___x & (u32)0x33333333UL) << 2); \
1752 ++ ___x = ((___x & (u32)0xAAAAAAAAUL) >> 1) | ((___x & (u32)0x55555555UL) << 1); \
1753 ++ ___x; \
1754 + })
1755 +
1756 + #define __constant_bitrev16(x) \
1757 + ({ \
1758 +- u16 __x = x; \
1759 +- __x = (__x >> 8) | (__x << 8); \
1760 +- __x = ((__x & (u16)0xF0F0U) >> 4) | ((__x & (u16)0x0F0FU) << 4); \
1761 +- __x = ((__x & (u16)0xCCCCU) >> 2) | ((__x & (u16)0x3333U) << 2); \
1762 +- __x = ((__x & (u16)0xAAAAU) >> 1) | ((__x & (u16)0x5555U) << 1); \
1763 +- __x; \
1764 ++ u16 ___x = x; \
1765 ++ ___x = (___x >> 8) | (___x << 8); \
1766 ++ ___x = ((___x & (u16)0xF0F0U) >> 4) | ((___x & (u16)0x0F0FU) << 4); \
1767 ++ ___x = ((___x & (u16)0xCCCCU) >> 2) | ((___x & (u16)0x3333U) << 2); \
1768 ++ ___x = ((___x & (u16)0xAAAAU) >> 1) | ((___x & (u16)0x5555U) << 1); \
1769 ++ ___x; \
1770 + })
1771 +
1772 + #define __constant_bitrev8x4(x) \
1773 + ({ \
1774 +- u32 __x = x; \
1775 +- __x = ((__x & (u32)0xF0F0F0F0UL) >> 4) | ((__x & (u32)0x0F0F0F0FUL) << 4); \
1776 +- __x = ((__x & (u32)0xCCCCCCCCUL) >> 2) | ((__x & (u32)0x33333333UL) << 2); \
1777 +- __x = ((__x & (u32)0xAAAAAAAAUL) >> 1) | ((__x & (u32)0x55555555UL) << 1); \
1778 +- __x; \
1779 ++ u32 ___x = x; \
1780 ++ ___x = ((___x & (u32)0xF0F0F0F0UL) >> 4) | ((___x & (u32)0x0F0F0F0FUL) << 4); \
1781 ++ ___x = ((___x & (u32)0xCCCCCCCCUL) >> 2) | ((___x & (u32)0x33333333UL) << 2); \
1782 ++ ___x = ((___x & (u32)0xAAAAAAAAUL) >> 1) | ((___x & (u32)0x55555555UL) << 1); \
1783 ++ ___x; \
1784 + })
1785 +
1786 + #define __constant_bitrev8(x) \
1787 + ({ \
1788 +- u8 __x = x; \
1789 +- __x = (__x >> 4) | (__x << 4); \
1790 +- __x = ((__x & (u8)0xCCU) >> 2) | ((__x & (u8)0x33U) << 2); \
1791 +- __x = ((__x & (u8)0xAAU) >> 1) | ((__x & (u8)0x55U) << 1); \
1792 +- __x; \
1793 ++ u8 ___x = x; \
1794 ++ ___x = (___x >> 4) | (___x << 4); \
1795 ++ ___x = ((___x & (u8)0xCCU) >> 2) | ((___x & (u8)0x33U) << 2); \
1796 ++ ___x = ((___x & (u8)0xAAU) >> 1) | ((___x & (u8)0x55U) << 1); \
1797 ++ ___x; \
1798 + })
1799 +
1800 + #define bitrev32(x) \
1801 +diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
1802 +index 88f0c530fe9c..32d445315128 100644
1803 +--- a/include/linux/mlx5/driver.h
1804 ++++ b/include/linux/mlx5/driver.h
1805 +@@ -743,6 +743,8 @@ struct mlx5_pagefault {
1806 + };
1807 +
1808 + struct mlx5_td {
1809 ++ /* protects tirs list changes while tirs refresh */
1810 ++ struct mutex list_lock;
1811 + struct list_head tirs_list;
1812 + u32 tdn;
1813 + };
1814 +diff --git a/include/linux/string.h b/include/linux/string.h
1815 +index 96115bf561b4..3d43329c20be 100644
1816 +--- a/include/linux/string.h
1817 ++++ b/include/linux/string.h
1818 +@@ -142,6 +142,9 @@ extern void * memscan(void *,int,__kernel_size_t);
1819 + #ifndef __HAVE_ARCH_MEMCMP
1820 + extern int memcmp(const void *,const void *,__kernel_size_t);
1821 + #endif
1822 ++#ifndef __HAVE_ARCH_BCMP
1823 ++extern int bcmp(const void *,const void *,__kernel_size_t);
1824 ++#endif
1825 + #ifndef __HAVE_ARCH_MEMCHR
1826 + extern void * memchr(const void *,int,__kernel_size_t);
1827 + #endif
1828 +diff --git a/include/linux/virtio_ring.h b/include/linux/virtio_ring.h
1829 +index bbf32524ab27..75007e648dfa 100644
1830 +--- a/include/linux/virtio_ring.h
1831 ++++ b/include/linux/virtio_ring.h
1832 +@@ -63,7 +63,7 @@ struct virtqueue;
1833 + /*
1834 + * Creates a virtqueue and allocates the descriptor ring. If
1835 + * may_reduce_num is set, then this may allocate a smaller ring than
1836 +- * expected. The caller should query virtqueue_get_ring_size to learn
1837 ++ * expected. The caller should query virtqueue_get_vring_size to learn
1838 + * the actual size of the ring.
1839 + */
1840 + struct virtqueue *vring_create_virtqueue(unsigned int index,
1841 +diff --git a/include/net/ip.h b/include/net/ip.h
1842 +index 80575db4e304..b8ebee43941f 100644
1843 +--- a/include/net/ip.h
1844 ++++ b/include/net/ip.h
1845 +@@ -603,7 +603,7 @@ int ip_options_get_from_user(struct net *net, struct ip_options_rcu **optp,
1846 + unsigned char __user *data, int optlen);
1847 + void ip_options_undo(struct ip_options *opt);
1848 + void ip_forward_options(struct sk_buff *skb);
1849 +-int ip_options_rcv_srr(struct sk_buff *skb);
1850 ++int ip_options_rcv_srr(struct sk_buff *skb, struct net_device *dev);
1851 +
1852 + /*
1853 + * Functions provided by ip_sockglue.c
1854 +diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h
1855 +index f4bf75fac349..d96c9d9cca96 100644
1856 +--- a/include/net/net_namespace.h
1857 ++++ b/include/net/net_namespace.h
1858 +@@ -56,6 +56,7 @@ struct net {
1859 + */
1860 + spinlock_t rules_mod_lock;
1861 +
1862 ++ u32 hash_mix;
1863 + atomic64_t cookie_gen;
1864 +
1865 + struct list_head list; /* list of network namespaces */
1866 +diff --git a/include/net/netns/hash.h b/include/net/netns/hash.h
1867 +index 24c78183a4c2..d9b665151f3d 100644
1868 +--- a/include/net/netns/hash.h
1869 ++++ b/include/net/netns/hash.h
1870 +@@ -2,21 +2,10 @@
1871 + #ifndef __NET_NS_HASH_H__
1872 + #define __NET_NS_HASH_H__
1873 +
1874 +-#include <asm/cache.h>
1875 +-
1876 +-struct net;
1877 ++#include <net/net_namespace.h>
1878 +
1879 + static inline u32 net_hash_mix(const struct net *net)
1880 + {
1881 +-#ifdef CONFIG_NET_NS
1882 +- /*
1883 +- * shift this right to eliminate bits, that are
1884 +- * always zeroed
1885 +- */
1886 +-
1887 +- return (u32)(((unsigned long)net) >> L1_CACHE_SHIFT);
1888 +-#else
1889 +- return 0;
1890 +-#endif
1891 ++ return net->hash_mix;
1892 + }
1893 + #endif
1894 +diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c
1895 +index 0fa7ef74303b..317fc759de76 100644
1896 +--- a/kernel/irq/chip.c
1897 ++++ b/kernel/irq/chip.c
1898 +@@ -1363,6 +1363,10 @@ int irq_chip_set_vcpu_affinity_parent(struct irq_data *data, void *vcpu_info)
1899 + int irq_chip_set_wake_parent(struct irq_data *data, unsigned int on)
1900 + {
1901 + data = data->parent_data;
1902 ++
1903 ++ if (data->chip->flags & IRQCHIP_SKIP_SET_WAKE)
1904 ++ return 0;
1905 ++
1906 + if (data->chip->irq_set_wake)
1907 + return data->chip->irq_set_wake(data, on);
1908 +
1909 +diff --git a/kernel/irq/irqdesc.c b/kernel/irq/irqdesc.c
1910 +index c2bfb11a9d05..aa08d4184608 100644
1911 +--- a/kernel/irq/irqdesc.c
1912 ++++ b/kernel/irq/irqdesc.c
1913 +@@ -535,6 +535,7 @@ int __init early_irq_init(void)
1914 + alloc_masks(&desc[i], node);
1915 + raw_spin_lock_init(&desc[i].lock);
1916 + lockdep_set_class(&desc[i].lock, &irq_desc_lock_class);
1917 ++ mutex_init(&desc[i].request_mutex);
1918 + desc_set_defaults(i, &desc[i], node, NULL, NULL);
1919 + }
1920 + return arch_early_irq_init();
1921 +diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
1922 +index 4d54c1fe9623..9829ede00498 100644
1923 +--- a/kernel/sched/fair.c
1924 ++++ b/kernel/sched/fair.c
1925 +@@ -7018,10 +7018,10 @@ static void update_cfs_rq_h_load(struct cfs_rq *cfs_rq)
1926 + if (cfs_rq->last_h_load_update == now)
1927 + return;
1928 +
1929 +- cfs_rq->h_load_next = NULL;
1930 ++ WRITE_ONCE(cfs_rq->h_load_next, NULL);
1931 + for_each_sched_entity(se) {
1932 + cfs_rq = cfs_rq_of(se);
1933 +- cfs_rq->h_load_next = se;
1934 ++ WRITE_ONCE(cfs_rq->h_load_next, se);
1935 + if (cfs_rq->last_h_load_update == now)
1936 + break;
1937 + }
1938 +@@ -7031,7 +7031,7 @@ static void update_cfs_rq_h_load(struct cfs_rq *cfs_rq)
1939 + cfs_rq->last_h_load_update = now;
1940 + }
1941 +
1942 +- while ((se = cfs_rq->h_load_next) != NULL) {
1943 ++ while ((se = READ_ONCE(cfs_rq->h_load_next)) != NULL) {
1944 + load = cfs_rq->h_load;
1945 + load = div64_ul(load * se->avg.load_avg,
1946 + cfs_rq_load_avg(cfs_rq) + 1);
1947 +diff --git a/kernel/time/alarmtimer.c b/kernel/time/alarmtimer.c
1948 +index fa5de5e8de61..fdeb9bc6affb 100644
1949 +--- a/kernel/time/alarmtimer.c
1950 ++++ b/kernel/time/alarmtimer.c
1951 +@@ -597,7 +597,7 @@ static ktime_t alarm_timer_remaining(struct k_itimer *timr, ktime_t now)
1952 + {
1953 + struct alarm *alarm = &timr->it.alarm.alarmtimer;
1954 +
1955 +- return ktime_sub(now, alarm->node.expires);
1956 ++ return ktime_sub(alarm->node.expires, now);
1957 + }
1958 +
1959 + /**
1960 +diff --git a/lib/string.c b/lib/string.c
1961 +index 5e8d410a93df..1530643edf00 100644
1962 +--- a/lib/string.c
1963 ++++ b/lib/string.c
1964 +@@ -865,6 +865,26 @@ __visible int memcmp(const void *cs, const void *ct, size_t count)
1965 + EXPORT_SYMBOL(memcmp);
1966 + #endif
1967 +
1968 ++#ifndef __HAVE_ARCH_BCMP
1969 ++/**
1970 ++ * bcmp - returns 0 if and only if the buffers have identical contents.
1971 ++ * @a: pointer to first buffer.
1972 ++ * @b: pointer to second buffer.
1973 ++ * @len: size of buffers.
1974 ++ *
1975 ++ * The sign or magnitude of a non-zero return value has no particular
1976 ++ * meaning, and architectures may implement their own more efficient bcmp(). So
1977 ++ * while this particular implementation is a simple (tail) call to memcmp, do
1978 ++ * not rely on anything but whether the return value is zero or non-zero.
1979 ++ */
1980 ++#undef bcmp
1981 ++int bcmp(const void *a, const void *b, size_t len)
1982 ++{
1983 ++ return memcmp(a, b, len);
1984 ++}
1985 ++EXPORT_SYMBOL(bcmp);
1986 ++#endif
1987 ++
1988 + #ifndef __HAVE_ARCH_MEMSCAN
1989 + /**
1990 + * memscan - Find a character in an area of memory.
1991 +diff --git a/net/core/ethtool.c b/net/core/ethtool.c
1992 +index 3469f5053c79..145cb343c1b0 100644
1993 +--- a/net/core/ethtool.c
1994 ++++ b/net/core/ethtool.c
1995 +@@ -1815,11 +1815,15 @@ static int ethtool_get_strings(struct net_device *dev, void __user *useraddr)
1996 + WARN_ON_ONCE(!ret);
1997 +
1998 + gstrings.len = ret;
1999 +- data = vzalloc(gstrings.len * ETH_GSTRING_LEN);
2000 +- if (gstrings.len && !data)
2001 +- return -ENOMEM;
2002 ++ if (gstrings.len) {
2003 ++ data = vzalloc(gstrings.len * ETH_GSTRING_LEN);
2004 ++ if (!data)
2005 ++ return -ENOMEM;
2006 +
2007 +- __ethtool_get_strings(dev, gstrings.string_set, data);
2008 ++ __ethtool_get_strings(dev, gstrings.string_set, data);
2009 ++ } else {
2010 ++ data = NULL;
2011 ++ }
2012 +
2013 + ret = -EFAULT;
2014 + if (copy_to_user(useraddr, &gstrings, sizeof(gstrings)))
2015 +@@ -1915,11 +1919,14 @@ static int ethtool_get_stats(struct net_device *dev, void __user *useraddr)
2016 + return -EFAULT;
2017 +
2018 + stats.n_stats = n_stats;
2019 +- data = vzalloc(n_stats * sizeof(u64));
2020 +- if (n_stats && !data)
2021 +- return -ENOMEM;
2022 +-
2023 +- ops->get_ethtool_stats(dev, &stats, data);
2024 ++ if (n_stats) {
2025 ++ data = vzalloc(n_stats * sizeof(u64));
2026 ++ if (!data)
2027 ++ return -ENOMEM;
2028 ++ ops->get_ethtool_stats(dev, &stats, data);
2029 ++ } else {
2030 ++ data = NULL;
2031 ++ }
2032 +
2033 + ret = -EFAULT;
2034 + if (copy_to_user(useraddr, &stats, sizeof(stats)))
2035 +@@ -1955,13 +1962,17 @@ static int ethtool_get_phy_stats(struct net_device *dev, void __user *useraddr)
2036 + return -EFAULT;
2037 +
2038 + stats.n_stats = n_stats;
2039 +- data = vzalloc(n_stats * sizeof(u64));
2040 +- if (n_stats && !data)
2041 +- return -ENOMEM;
2042 ++ if (n_stats) {
2043 ++ data = vzalloc(n_stats * sizeof(u64));
2044 ++ if (!data)
2045 ++ return -ENOMEM;
2046 +
2047 +- mutex_lock(&phydev->lock);
2048 +- phydev->drv->get_stats(phydev, &stats, data);
2049 +- mutex_unlock(&phydev->lock);
2050 ++ mutex_lock(&phydev->lock);
2051 ++ phydev->drv->get_stats(phydev, &stats, data);
2052 ++ mutex_unlock(&phydev->lock);
2053 ++ } else {
2054 ++ data = NULL;
2055 ++ }
2056 +
2057 + ret = -EFAULT;
2058 + if (copy_to_user(useraddr, &stats, sizeof(stats)))
2059 +diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c
2060 +index 0dd6359e5924..60b88718b1d4 100644
2061 +--- a/net/core/net_namespace.c
2062 ++++ b/net/core/net_namespace.c
2063 +@@ -285,6 +285,7 @@ static __net_init int setup_net(struct net *net, struct user_namespace *user_ns)
2064 +
2065 + atomic_set(&net->count, 1);
2066 + refcount_set(&net->passive, 1);
2067 ++ get_random_bytes(&net->hash_mix, sizeof(u32));
2068 + net->dev_base_seq = 1;
2069 + net->user_ns = user_ns;
2070 + idr_init(&net->netns_ids);
2071 +diff --git a/net/core/skbuff.c b/net/core/skbuff.c
2072 +index 1b39aef5cf82..2b3b0307dd89 100644
2073 +--- a/net/core/skbuff.c
2074 ++++ b/net/core/skbuff.c
2075 +@@ -3808,7 +3808,7 @@ int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb)
2076 + struct sk_buff *lp, *p = *head;
2077 + unsigned int delta_truesize;
2078 +
2079 +- if (unlikely(p->len + len >= 65536))
2080 ++ if (unlikely(p->len + len >= 65536 || NAPI_GRO_CB(skb)->flush))
2081 + return -E2BIG;
2082 +
2083 + lp = NAPI_GRO_CB(p)->last;
2084 +diff --git a/net/ipv4/ip_input.c b/net/ipv4/ip_input.c
2085 +index 1b160378ea9c..6fc45d3a1f8a 100644
2086 +--- a/net/ipv4/ip_input.c
2087 ++++ b/net/ipv4/ip_input.c
2088 +@@ -259,11 +259,10 @@ int ip_local_deliver(struct sk_buff *skb)
2089 + ip_local_deliver_finish);
2090 + }
2091 +
2092 +-static inline bool ip_rcv_options(struct sk_buff *skb)
2093 ++static inline bool ip_rcv_options(struct sk_buff *skb, struct net_device *dev)
2094 + {
2095 + struct ip_options *opt;
2096 + const struct iphdr *iph;
2097 +- struct net_device *dev = skb->dev;
2098 +
2099 + /* It looks as overkill, because not all
2100 + IP options require packet mangling.
2101 +@@ -299,7 +298,7 @@ static inline bool ip_rcv_options(struct sk_buff *skb)
2102 + }
2103 + }
2104 +
2105 +- if (ip_options_rcv_srr(skb))
2106 ++ if (ip_options_rcv_srr(skb, dev))
2107 + goto drop;
2108 + }
2109 +
2110 +@@ -362,7 +361,7 @@ static int ip_rcv_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
2111 + }
2112 + #endif
2113 +
2114 +- if (iph->ihl > 5 && ip_rcv_options(skb))
2115 ++ if (iph->ihl > 5 && ip_rcv_options(skb, dev))
2116 + goto drop;
2117 +
2118 + rt = skb_rtable(skb);
2119 +diff --git a/net/ipv4/ip_options.c b/net/ipv4/ip_options.c
2120 +index 32a35043c9f5..3db31bb9df50 100644
2121 +--- a/net/ipv4/ip_options.c
2122 ++++ b/net/ipv4/ip_options.c
2123 +@@ -612,7 +612,7 @@ void ip_forward_options(struct sk_buff *skb)
2124 + }
2125 + }
2126 +
2127 +-int ip_options_rcv_srr(struct sk_buff *skb)
2128 ++int ip_options_rcv_srr(struct sk_buff *skb, struct net_device *dev)
2129 + {
2130 + struct ip_options *opt = &(IPCB(skb)->opt);
2131 + int srrspace, srrptr;
2132 +@@ -647,7 +647,7 @@ int ip_options_rcv_srr(struct sk_buff *skb)
2133 +
2134 + orefdst = skb->_skb_refdst;
2135 + skb_dst_set(skb, NULL);
2136 +- err = ip_route_input(skb, nexthop, iph->saddr, iph->tos, skb->dev);
2137 ++ err = ip_route_input(skb, nexthop, iph->saddr, iph->tos, dev);
2138 + rt2 = skb_rtable(skb);
2139 + if (err || (rt2->rt_type != RTN_UNICAST && rt2->rt_type != RTN_LOCAL)) {
2140 + skb_dst_drop(skb);
2141 +diff --git a/net/ipv4/tcp_dctcp.c b/net/ipv4/tcp_dctcp.c
2142 +index 8b637f9f23a2..f0de9fb92f0d 100644
2143 +--- a/net/ipv4/tcp_dctcp.c
2144 ++++ b/net/ipv4/tcp_dctcp.c
2145 +@@ -66,11 +66,6 @@ static unsigned int dctcp_alpha_on_init __read_mostly = DCTCP_MAX_ALPHA;
2146 + module_param(dctcp_alpha_on_init, uint, 0644);
2147 + MODULE_PARM_DESC(dctcp_alpha_on_init, "parameter for initial alpha value");
2148 +
2149 +-static unsigned int dctcp_clamp_alpha_on_loss __read_mostly;
2150 +-module_param(dctcp_clamp_alpha_on_loss, uint, 0644);
2151 +-MODULE_PARM_DESC(dctcp_clamp_alpha_on_loss,
2152 +- "parameter for clamping alpha on loss");
2153 +-
2154 + static struct tcp_congestion_ops dctcp_reno;
2155 +
2156 + static void dctcp_reset(const struct tcp_sock *tp, struct dctcp *ca)
2157 +@@ -211,21 +206,23 @@ static void dctcp_update_alpha(struct sock *sk, u32 flags)
2158 + }
2159 + }
2160 +
2161 +-static void dctcp_state(struct sock *sk, u8 new_state)
2162 ++static void dctcp_react_to_loss(struct sock *sk)
2163 + {
2164 +- if (dctcp_clamp_alpha_on_loss && new_state == TCP_CA_Loss) {
2165 +- struct dctcp *ca = inet_csk_ca(sk);
2166 ++ struct dctcp *ca = inet_csk_ca(sk);
2167 ++ struct tcp_sock *tp = tcp_sk(sk);
2168 +
2169 +- /* If this extension is enabled, we clamp dctcp_alpha to
2170 +- * max on packet loss; the motivation is that dctcp_alpha
2171 +- * is an indicator to the extend of congestion and packet
2172 +- * loss is an indicator of extreme congestion; setting
2173 +- * this in practice turned out to be beneficial, and
2174 +- * effectively assumes total congestion which reduces the
2175 +- * window by half.
2176 +- */
2177 +- ca->dctcp_alpha = DCTCP_MAX_ALPHA;
2178 +- }
2179 ++ ca->loss_cwnd = tp->snd_cwnd;
2180 ++ tp->snd_ssthresh = max(tp->snd_cwnd >> 1U, 2U);
2181 ++}
2182 ++
2183 ++static void dctcp_state(struct sock *sk, u8 new_state)
2184 ++{
2185 ++ if (new_state == TCP_CA_Recovery &&
2186 ++ new_state != inet_csk(sk)->icsk_ca_state)
2187 ++ dctcp_react_to_loss(sk);
2188 ++ /* We handle RTO in dctcp_cwnd_event to ensure that we perform only
2189 ++ * one loss-adjustment per RTT.
2190 ++ */
2191 + }
2192 +
2193 + static void dctcp_cwnd_event(struct sock *sk, enum tcp_ca_event ev)
2194 +@@ -237,6 +234,9 @@ static void dctcp_cwnd_event(struct sock *sk, enum tcp_ca_event ev)
2195 + case CA_EVENT_ECN_NO_CE:
2196 + dctcp_ce_state_1_to_0(sk);
2197 + break;
2198 ++ case CA_EVENT_LOSS:
2199 ++ dctcp_react_to_loss(sk);
2200 ++ break;
2201 + default:
2202 + /* Don't care for the rest. */
2203 + break;
2204 +diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
2205 +index 7ca8264cbdf9..2af849ba33c9 100644
2206 +--- a/net/ipv6/ip6_output.c
2207 ++++ b/net/ipv6/ip6_output.c
2208 +@@ -611,7 +611,7 @@ int ip6_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
2209 + inet6_sk(skb->sk) : NULL;
2210 + struct ipv6hdr *tmp_hdr;
2211 + struct frag_hdr *fh;
2212 +- unsigned int mtu, hlen, left, len;
2213 ++ unsigned int mtu, hlen, left, len, nexthdr_offset;
2214 + int hroom, troom;
2215 + __be32 frag_id;
2216 + int ptr, offset = 0, err = 0;
2217 +@@ -622,6 +622,7 @@ int ip6_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
2218 + goto fail;
2219 + hlen = err;
2220 + nexthdr = *prevhdr;
2221 ++ nexthdr_offset = prevhdr - skb_network_header(skb);
2222 +
2223 + mtu = ip6_skb_dst_mtu(skb);
2224 +
2225 +@@ -656,6 +657,7 @@ int ip6_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
2226 + (err = skb_checksum_help(skb)))
2227 + goto fail;
2228 +
2229 ++ prevhdr = skb_network_header(skb) + nexthdr_offset;
2230 + hroom = LL_RESERVED_SPACE(rt->dst.dev);
2231 + if (skb_has_frag_list(skb)) {
2232 + unsigned int first_len = skb_pagelen(skb);
2233 +diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
2234 +index 1812c2a748ff..f71c7915ff0e 100644
2235 +--- a/net/ipv6/ip6_tunnel.c
2236 ++++ b/net/ipv6/ip6_tunnel.c
2237 +@@ -633,7 +633,7 @@ ip4ip6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
2238 + IPPROTO_IPIP,
2239 + RT_TOS(eiph->tos), 0);
2240 + if (IS_ERR(rt) ||
2241 +- rt->dst.dev->type != ARPHRD_TUNNEL) {
2242 ++ rt->dst.dev->type != ARPHRD_TUNNEL6) {
2243 + if (!IS_ERR(rt))
2244 + ip_rt_put(rt);
2245 + goto out;
2246 +@@ -643,7 +643,7 @@ ip4ip6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
2247 + ip_rt_put(rt);
2248 + if (ip_route_input(skb2, eiph->daddr, eiph->saddr, eiph->tos,
2249 + skb2->dev) ||
2250 +- skb_dst(skb2)->dev->type != ARPHRD_TUNNEL)
2251 ++ skb_dst(skb2)->dev->type != ARPHRD_TUNNEL6)
2252 + goto out;
2253 + }
2254 +
2255 +diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
2256 +index e23190725244..f7d080d1cf8e 100644
2257 +--- a/net/ipv6/sit.c
2258 ++++ b/net/ipv6/sit.c
2259 +@@ -661,6 +661,10 @@ static int ipip6_rcv(struct sk_buff *skb)
2260 + !net_eq(tunnel->net, dev_net(tunnel->dev))))
2261 + goto out;
2262 +
2263 ++ /* skb can be uncloned in iptunnel_pull_header, so
2264 ++ * old iph is no longer valid
2265 ++ */
2266 ++ iph = (const struct iphdr *)skb_mac_header(skb);
2267 + err = IP_ECN_decapsulate(iph, skb);
2268 + if (unlikely(err)) {
2269 + if (log_ecn_error)
2270 +diff --git a/net/kcm/kcmsock.c b/net/kcm/kcmsock.c
2271 +index 9bf997404918..7b4f3f865861 100644
2272 +--- a/net/kcm/kcmsock.c
2273 ++++ b/net/kcm/kcmsock.c
2274 +@@ -2059,14 +2059,14 @@ static int __init kcm_init(void)
2275 + if (err)
2276 + goto fail;
2277 +
2278 +- err = sock_register(&kcm_family_ops);
2279 +- if (err)
2280 +- goto sock_register_fail;
2281 +-
2282 + err = register_pernet_device(&kcm_net_ops);
2283 + if (err)
2284 + goto net_ops_fail;
2285 +
2286 ++ err = sock_register(&kcm_family_ops);
2287 ++ if (err)
2288 ++ goto sock_register_fail;
2289 ++
2290 + err = kcm_proc_init();
2291 + if (err)
2292 + goto proc_init_fail;
2293 +@@ -2074,12 +2074,12 @@ static int __init kcm_init(void)
2294 + return 0;
2295 +
2296 + proc_init_fail:
2297 +- unregister_pernet_device(&kcm_net_ops);
2298 +-
2299 +-net_ops_fail:
2300 + sock_unregister(PF_KCM);
2301 +
2302 + sock_register_fail:
2303 ++ unregister_pernet_device(&kcm_net_ops);
2304 ++
2305 ++net_ops_fail:
2306 + proto_unregister(&kcm_proto);
2307 +
2308 + fail:
2309 +@@ -2095,8 +2095,8 @@ fail:
2310 + static void __exit kcm_exit(void)
2311 + {
2312 + kcm_proc_exit();
2313 +- unregister_pernet_device(&kcm_net_ops);
2314 + sock_unregister(PF_KCM);
2315 ++ unregister_pernet_device(&kcm_net_ops);
2316 + proto_unregister(&kcm_proto);
2317 + destroy_workqueue(kcm_wq);
2318 +
2319 +diff --git a/net/openvswitch/flow_netlink.c b/net/openvswitch/flow_netlink.c
2320 +index e687b89dafe6..f5deae2ccb79 100644
2321 +--- a/net/openvswitch/flow_netlink.c
2322 ++++ b/net/openvswitch/flow_netlink.c
2323 +@@ -1967,14 +1967,14 @@ static struct nlattr *reserve_sfa_size(struct sw_flow_actions **sfa,
2324 +
2325 + struct sw_flow_actions *acts;
2326 + int new_acts_size;
2327 +- int req_size = NLA_ALIGN(attr_len);
2328 ++ size_t req_size = NLA_ALIGN(attr_len);
2329 + int next_offset = offsetof(struct sw_flow_actions, actions) +
2330 + (*sfa)->actions_len;
2331 +
2332 + if (req_size <= (ksize(*sfa) - next_offset))
2333 + goto out;
2334 +
2335 +- new_acts_size = ksize(*sfa) * 2;
2336 ++ new_acts_size = max(next_offset + req_size, ksize(*sfa) * 2);
2337 +
2338 + if (new_acts_size > MAX_ACTIONS_BUFSIZE) {
2339 + if ((MAX_ACTIONS_BUFSIZE - next_offset) < req_size) {
2340 +diff --git a/net/rds/tcp.c b/net/rds/tcp.c
2341 +index 2a08bf75d008..82e9ffecd90e 100644
2342 +--- a/net/rds/tcp.c
2343 ++++ b/net/rds/tcp.c
2344 +@@ -530,7 +530,7 @@ static void rds_tcp_kill_sock(struct net *net)
2345 + list_for_each_entry_safe(tc, _tc, &rds_tcp_conn_list, t_tcp_node) {
2346 + struct net *c_net = read_pnet(&tc->t_cpath->cp_conn->c_net);
2347 +
2348 +- if (net != c_net || !tc->t_sock)
2349 ++ if (net != c_net)
2350 + continue;
2351 + if (!list_has_conn(&tmp_list, tc->t_cpath->cp_conn)) {
2352 + list_move_tail(&tc->t_tcp_node, &tmp_list);
2353 +diff --git a/net/sched/act_sample.c b/net/sched/act_sample.c
2354 +index a859b55d7899..64fd1e9818a6 100644
2355 +--- a/net/sched/act_sample.c
2356 ++++ b/net/sched/act_sample.c
2357 +@@ -45,6 +45,7 @@ static int tcf_sample_init(struct net *net, struct nlattr *nla,
2358 + struct tc_sample *parm;
2359 + struct tcf_sample *s;
2360 + bool exists = false;
2361 ++ u32 rate;
2362 + int ret;
2363 +
2364 + if (!nla)
2365 +@@ -73,10 +74,17 @@ static int tcf_sample_init(struct net *net, struct nlattr *nla,
2366 + if (!ovr)
2367 + return -EEXIST;
2368 + }
2369 +- s = to_sample(*a);
2370 +
2371 ++ rate = nla_get_u32(tb[TCA_SAMPLE_RATE]);
2372 ++ if (!rate) {
2373 ++ tcf_idr_release(*a, bind);
2374 ++ return -EINVAL;
2375 ++ }
2376 ++
2377 ++ s = to_sample(*a);
2378 + s->tcf_action = parm->action;
2379 + s->rate = nla_get_u32(tb[TCA_SAMPLE_RATE]);
2380 ++ s->rate = rate;
2381 + s->psample_group_num = nla_get_u32(tb[TCA_SAMPLE_PSAMPLE_GROUP]);
2382 + psample_group = psample_group_get(net, s->psample_group_num);
2383 + if (!psample_group) {
2384 +diff --git a/net/sched/cls_matchall.c b/net/sched/cls_matchall.c
2385 +index 6499aecfbfc4..d8fd152779c8 100644
2386 +--- a/net/sched/cls_matchall.c
2387 ++++ b/net/sched/cls_matchall.c
2388 +@@ -125,6 +125,11 @@ static void mall_destroy(struct tcf_proto *tp)
2389 +
2390 + static void *mall_get(struct tcf_proto *tp, u32 handle)
2391 + {
2392 ++ struct cls_mall_head *head = rtnl_dereference(tp->root);
2393 ++
2394 ++ if (head && head->handle == handle)
2395 ++ return head;
2396 ++
2397 + return NULL;
2398 + }
2399 +
2400 +diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c
2401 +index cbb04d66f564..a7529aca2ac8 100644
2402 +--- a/net/sctp/protocol.c
2403 ++++ b/net/sctp/protocol.c
2404 +@@ -605,6 +605,7 @@ out:
2405 + static int sctp_v4_addr_to_user(struct sctp_sock *sp, union sctp_addr *addr)
2406 + {
2407 + /* No address mapping for V4 sockets */
2408 ++ memset(addr->v4.sin_zero, 0, sizeof(addr->v4.sin_zero));
2409 + return sizeof(struct sockaddr_in);
2410 + }
2411 +
2412 +diff --git a/sound/core/seq/seq_clientmgr.c b/sound/core/seq/seq_clientmgr.c
2413 +index 350c33ec82b3..3bcd7a2f0394 100644
2414 +--- a/sound/core/seq/seq_clientmgr.c
2415 ++++ b/sound/core/seq/seq_clientmgr.c
2416 +@@ -1249,7 +1249,7 @@ static int snd_seq_ioctl_set_client_info(struct snd_seq_client *client,
2417 +
2418 + /* fill the info fields */
2419 + if (client_info->name[0])
2420 +- strlcpy(client->name, client_info->name, sizeof(client->name));
2421 ++ strscpy(client->name, client_info->name, sizeof(client->name));
2422 +
2423 + client->filter = client_info->filter;
2424 + client->event_lost = client_info->event_lost;
2425 +@@ -1527,7 +1527,7 @@ static int snd_seq_ioctl_create_queue(struct snd_seq_client *client, void *arg)
2426 + /* set queue name */
2427 + if (!info->name[0])
2428 + snprintf(info->name, sizeof(info->name), "Queue-%d", q->queue);
2429 +- strlcpy(q->name, info->name, sizeof(q->name));
2430 ++ strscpy(q->name, info->name, sizeof(q->name));
2431 + snd_use_lock_free(&q->use_lock);
2432 +
2433 + return 0;
2434 +@@ -1589,7 +1589,7 @@ static int snd_seq_ioctl_set_queue_info(struct snd_seq_client *client,
2435 + queuefree(q);
2436 + return -EPERM;
2437 + }
2438 +- strlcpy(q->name, info->name, sizeof(q->name));
2439 ++ strscpy(q->name, info->name, sizeof(q->name));
2440 + queuefree(q);
2441 +
2442 + return 0;
2443 +diff --git a/sound/soc/fsl/fsl_esai.c b/sound/soc/fsl/fsl_esai.c
2444 +index a23d6a821ff3..6152ae24772b 100644
2445 +--- a/sound/soc/fsl/fsl_esai.c
2446 ++++ b/sound/soc/fsl/fsl_esai.c
2447 +@@ -58,6 +58,8 @@ struct fsl_esai {
2448 + u32 fifo_depth;
2449 + u32 slot_width;
2450 + u32 slots;
2451 ++ u32 tx_mask;
2452 ++ u32 rx_mask;
2453 + u32 hck_rate[2];
2454 + u32 sck_rate[2];
2455 + bool hck_dir[2];
2456 +@@ -358,21 +360,13 @@ static int fsl_esai_set_dai_tdm_slot(struct snd_soc_dai *dai, u32 tx_mask,
2457 + regmap_update_bits(esai_priv->regmap, REG_ESAI_TCCR,
2458 + ESAI_xCCR_xDC_MASK, ESAI_xCCR_xDC(slots));
2459 +
2460 +- regmap_update_bits(esai_priv->regmap, REG_ESAI_TSMA,
2461 +- ESAI_xSMA_xS_MASK, ESAI_xSMA_xS(tx_mask));
2462 +- regmap_update_bits(esai_priv->regmap, REG_ESAI_TSMB,
2463 +- ESAI_xSMB_xS_MASK, ESAI_xSMB_xS(tx_mask));
2464 +-
2465 + regmap_update_bits(esai_priv->regmap, REG_ESAI_RCCR,
2466 + ESAI_xCCR_xDC_MASK, ESAI_xCCR_xDC(slots));
2467 +
2468 +- regmap_update_bits(esai_priv->regmap, REG_ESAI_RSMA,
2469 +- ESAI_xSMA_xS_MASK, ESAI_xSMA_xS(rx_mask));
2470 +- regmap_update_bits(esai_priv->regmap, REG_ESAI_RSMB,
2471 +- ESAI_xSMB_xS_MASK, ESAI_xSMB_xS(rx_mask));
2472 +-
2473 + esai_priv->slot_width = slot_width;
2474 + esai_priv->slots = slots;
2475 ++ esai_priv->tx_mask = tx_mask;
2476 ++ esai_priv->rx_mask = rx_mask;
2477 +
2478 + return 0;
2479 + }
2480 +@@ -593,6 +587,7 @@ static int fsl_esai_trigger(struct snd_pcm_substream *substream, int cmd,
2481 + bool tx = substream->stream == SNDRV_PCM_STREAM_PLAYBACK;
2482 + u8 i, channels = substream->runtime->channels;
2483 + u32 pins = DIV_ROUND_UP(channels, esai_priv->slots);
2484 ++ u32 mask;
2485 +
2486 + switch (cmd) {
2487 + case SNDRV_PCM_TRIGGER_START:
2488 +@@ -605,15 +600,38 @@ static int fsl_esai_trigger(struct snd_pcm_substream *substream, int cmd,
2489 + for (i = 0; tx && i < channels; i++)
2490 + regmap_write(esai_priv->regmap, REG_ESAI_ETDR, 0x0);
2491 +
2492 ++ /*
2493 ++ * When set the TE/RE in the end of enablement flow, there
2494 ++ * will be channel swap issue for multi data line case.
2495 ++ * In order to workaround this issue, we switch the bit
2496 ++ * enablement sequence to below sequence
2497 ++ * 1) clear the xSMB & xSMA: which is done in probe and
2498 ++ * stop state.
2499 ++ * 2) set TE/RE
2500 ++ * 3) set xSMB
2501 ++ * 4) set xSMA: xSMA is the last one in this flow, which
2502 ++ * will trigger esai to start.
2503 ++ */
2504 + regmap_update_bits(esai_priv->regmap, REG_ESAI_xCR(tx),
2505 + tx ? ESAI_xCR_TE_MASK : ESAI_xCR_RE_MASK,
2506 + tx ? ESAI_xCR_TE(pins) : ESAI_xCR_RE(pins));
2507 ++ mask = tx ? esai_priv->tx_mask : esai_priv->rx_mask;
2508 ++
2509 ++ regmap_update_bits(esai_priv->regmap, REG_ESAI_xSMB(tx),
2510 ++ ESAI_xSMB_xS_MASK, ESAI_xSMB_xS(mask));
2511 ++ regmap_update_bits(esai_priv->regmap, REG_ESAI_xSMA(tx),
2512 ++ ESAI_xSMA_xS_MASK, ESAI_xSMA_xS(mask));
2513 ++
2514 + break;
2515 + case SNDRV_PCM_TRIGGER_SUSPEND:
2516 + case SNDRV_PCM_TRIGGER_STOP:
2517 + case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
2518 + regmap_update_bits(esai_priv->regmap, REG_ESAI_xCR(tx),
2519 + tx ? ESAI_xCR_TE_MASK : ESAI_xCR_RE_MASK, 0);
2520 ++ regmap_update_bits(esai_priv->regmap, REG_ESAI_xSMA(tx),
2521 ++ ESAI_xSMA_xS_MASK, 0);
2522 ++ regmap_update_bits(esai_priv->regmap, REG_ESAI_xSMB(tx),
2523 ++ ESAI_xSMB_xS_MASK, 0);
2524 +
2525 + /* Disable and reset FIFO */
2526 + regmap_update_bits(esai_priv->regmap, REG_ESAI_xFCR(tx),
2527 +@@ -903,6 +921,15 @@ static int fsl_esai_probe(struct platform_device *pdev)
2528 + return ret;
2529 + }
2530 +
2531 ++ esai_priv->tx_mask = 0xFFFFFFFF;
2532 ++ esai_priv->rx_mask = 0xFFFFFFFF;
2533 ++
2534 ++ /* Clear the TSMA, TSMB, RSMA, RSMB */
2535 ++ regmap_write(esai_priv->regmap, REG_ESAI_TSMA, 0);
2536 ++ regmap_write(esai_priv->regmap, REG_ESAI_TSMB, 0);
2537 ++ regmap_write(esai_priv->regmap, REG_ESAI_RSMA, 0);
2538 ++ regmap_write(esai_priv->regmap, REG_ESAI_RSMB, 0);
2539 ++
2540 + ret = devm_snd_soc_register_component(&pdev->dev, &fsl_esai_component,
2541 + &fsl_esai_dai, 1);
2542 + if (ret) {